hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
67002008c681993225737fbce0fd75ad373119c3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
Recursive Gaussian filter
sgreen 8/1/08
This code sample implements a Gaussian blur using Deriche's recursive method:
http://citeseer.ist.psu.edu/deriche93recursively.html
This is similar to the box filter sample in the SDK, but it uses the previous
outputs of the filter as well as the previous inputs. This is also known as an
IIR (infinite impulse response) filter, since its response to an input impulse
can last forever.
The main advantage of this method is that the execution time is independent of
the filter width.
The GPU processes columns of the image in parallel. To avoid uncoalesced reads
for the row pass we transpose the image and then transpose it back again
afterwards.
The implementation is based on code from the CImg library:
http://cimg.sourceforge.net/
Thanks to David Tschumperl and all the CImg contributors!
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_math.h>
#include <thrust/window_2d.h>
#include <thrust/window_transform.h>
#include "recursiveGaussian_kernel.cuh"
hipEvent_t start, stop;
float time_in_ms;
#define USE_SIMPLE_FILTER 0
//Round a / b to nearest higher integer value
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
/*
Transpose a 2D array (see SDK transpose example)
*/
extern "C"
void transpose(uint *d_src, uint *d_dest, uint width, int height)
{
dim3 grid(iDivUp(width, BLOCK_DIM), iDivUp(height, BLOCK_DIM), 1);
dim3 threads(BLOCK_DIM, BLOCK_DIM, 1);
hipLaunchKernelGGL(( d_transpose), dim3(grid), dim3(threads) , 0, 0, d_dest, d_src, width, height);
getLastCudaError("Kernel execution failed");
}
/*
Perform Gaussian filter on a 2D image using CUDA
Parameters:
d_src - pointer to input image in device memory
d_dest - pointer to destination image in device memory
d_temp - pointer to temporary storage in device memory
width - image width
height - image height
sigma - sigma of Gaussian
order - filter order (0, 1 or 2)
*/
// 8-bit RGBA version
extern "C"
void gaussianFilterRGBA(uint *d_src,thrust::block_2d<unsigned int> &block_d_input, uint *d_dest,thrust::block_2d<unsigned int> &block_d_output, uint *d_temp, int width, int height, float sigma, int order, int nthreads)
{
// compute filter coefficients
const float
nsigma = sigma < 0.1f ? 0.1f : sigma,
alpha = 1.695f / nsigma,
ema = (float)::exp(-alpha),
ema2 = (float)::exp(-2*alpha),
b1 = -2*ema,
b2 = ema2;
float a0 = 0, a1 = 0, a2 = 0, a3 = 0, coefp = 0, coefn = 0;
switch (order)
{
case 0:
{
const float k = (1-ema)*(1-ema)/(1+2*alpha*ema-ema2);
a0 = k;
a1 = k*(alpha-1)*ema;
a2 = k*(alpha+1)*ema;
a3 = -k*ema2;
}
break;
case 1:
{
const float k = (1-ema)*(1-ema)/ema;
a0 = k*ema;
a1 = a3 = 0;
a2 = -a0;
}
break;
case 2:
{
const float
ea = (float)::exp(-alpha),
k = -(ema2-1)/(2*alpha*ema),
kn = (-2*(-1+3*ea-3*ea*ea+ea*ea*ea)/(3*ea+1+3*ea*ea+ea*ea*ea));
a0 = kn;
a1 = -kn*(1+k*alpha)*ema;
a2 = kn*(1-k*alpha)*ema;
a3 = -kn*ema2;
}
break;
default:
fprintf(stderr, "gaussianFilter: invalid order parameter!\n");
return;
}
coefp = (a0+a1)/(1+b1+b2);
coefn = (a2+a3)/(1+b1+b2);
thrust::window_vector<unsigned int> input_window_vector(&block_d_input,1,height,1,1);
thrust::window_vector<unsigned int> output_window_vector(&block_d_output,1,height,1,1);
thrust::block_2d<unsigned int> null_vector(width,height);
// process columns
// #if USE_SIMPLE_FILTER
// d_simpleRecursive_rgba<<< iDivUp(width, nthreads), nthreads >>>(d_src, d_temp, width, height, ema);
// #else
thrust::transform(thrust::hip::global,input_window_vector.begin(),input_window_vector.end(),output_window_vector.begin(),d_recursiveGaussian_functor(width, height, a0, a1, a2, a3, b1, b2, coefp, coefn));
hipLaunchKernelGGL(( d_recursiveGaussian_rgba), dim3(iDivUp(width, nthreads)), dim3(nthreads) , 0, 0, d_src, d_temp, width, height, a0, a1, a2, a3, b1, b2, coefp, coefn);
// #endif
getLastCudaError("Kernel execution failed");
transpose(d_temp, d_dest, width, height);
getLastCudaError("transpose: Kernel execution failed");
// process rows
#if USE_SIMPLE_FILTER
// d_simpleRecursive_rgba<<< iDivUp(height, nthreads), nthreads >>>(d_dest, d_temp, height, width, ema);
#else
// d_recursiveGaussian_rgba<<< iDivUp(height, nthreads), nthreads >>>(d_dest, d_temp, height, width, a0, a1, a2, a3, b1, b2, coefp, coefn);
#endif
getLastCudaError("Kernel execution failed");
transpose(d_temp, d_dest, height, width);
}
| 67002008c681993225737fbce0fd75ad373119c3.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
Recursive Gaussian filter
sgreen 8/1/08
This code sample implements a Gaussian blur using Deriche's recursive method:
http://citeseer.ist.psu.edu/deriche93recursively.html
This is similar to the box filter sample in the SDK, but it uses the previous
outputs of the filter as well as the previous inputs. This is also known as an
IIR (infinite impulse response) filter, since its response to an input impulse
can last forever.
The main advantage of this method is that the execution time is independent of
the filter width.
The GPU processes columns of the image in parallel. To avoid uncoalesced reads
for the row pass we transpose the image and then transpose it back again
afterwards.
The implementation is based on code from the CImg library:
http://cimg.sourceforge.net/
Thanks to David Tschumperl� and all the CImg contributors!
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_math.h>
#include <thrust/window_2d.h>
#include <thrust/window_transform.h>
#include "recursiveGaussian_kernel.cuh"
cudaEvent_t start, stop;
float time_in_ms;
#define USE_SIMPLE_FILTER 0
//Round a / b to nearest higher integer value
int iDivUp(int a, int b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
/*
Transpose a 2D array (see SDK transpose example)
*/
extern "C"
void transpose(uint *d_src, uint *d_dest, uint width, int height)
{
dim3 grid(iDivUp(width, BLOCK_DIM), iDivUp(height, BLOCK_DIM), 1);
dim3 threads(BLOCK_DIM, BLOCK_DIM, 1);
d_transpose<<< grid, threads >>>(d_dest, d_src, width, height);
getLastCudaError("Kernel execution failed");
}
/*
Perform Gaussian filter on a 2D image using CUDA
Parameters:
d_src - pointer to input image in device memory
d_dest - pointer to destination image in device memory
d_temp - pointer to temporary storage in device memory
width - image width
height - image height
sigma - sigma of Gaussian
order - filter order (0, 1 or 2)
*/
// 8-bit RGBA version
extern "C"
void gaussianFilterRGBA(uint *d_src,thrust::block_2d<unsigned int> &block_d_input, uint *d_dest,thrust::block_2d<unsigned int> &block_d_output, uint *d_temp, int width, int height, float sigma, int order, int nthreads)
{
// compute filter coefficients
const float
nsigma = sigma < 0.1f ? 0.1f : sigma,
alpha = 1.695f / nsigma,
ema = (float)std::exp(-alpha),
ema2 = (float)std::exp(-2*alpha),
b1 = -2*ema,
b2 = ema2;
float a0 = 0, a1 = 0, a2 = 0, a3 = 0, coefp = 0, coefn = 0;
switch (order)
{
case 0:
{
const float k = (1-ema)*(1-ema)/(1+2*alpha*ema-ema2);
a0 = k;
a1 = k*(alpha-1)*ema;
a2 = k*(alpha+1)*ema;
a3 = -k*ema2;
}
break;
case 1:
{
const float k = (1-ema)*(1-ema)/ema;
a0 = k*ema;
a1 = a3 = 0;
a2 = -a0;
}
break;
case 2:
{
const float
ea = (float)std::exp(-alpha),
k = -(ema2-1)/(2*alpha*ema),
kn = (-2*(-1+3*ea-3*ea*ea+ea*ea*ea)/(3*ea+1+3*ea*ea+ea*ea*ea));
a0 = kn;
a1 = -kn*(1+k*alpha)*ema;
a2 = kn*(1-k*alpha)*ema;
a3 = -kn*ema2;
}
break;
default:
fprintf(stderr, "gaussianFilter: invalid order parameter!\n");
return;
}
coefp = (a0+a1)/(1+b1+b2);
coefn = (a2+a3)/(1+b1+b2);
thrust::window_vector<unsigned int> input_window_vector(&block_d_input,1,height,1,1);
thrust::window_vector<unsigned int> output_window_vector(&block_d_output,1,height,1,1);
thrust::block_2d<unsigned int> null_vector(width,height);
// process columns
// #if USE_SIMPLE_FILTER
// d_simpleRecursive_rgba<<< iDivUp(width, nthreads), nthreads >>>(d_src, d_temp, width, height, ema);
// #else
thrust::transform(thrust::cuda::global,input_window_vector.begin(),input_window_vector.end(),output_window_vector.begin(),d_recursiveGaussian_functor(width, height, a0, a1, a2, a3, b1, b2, coefp, coefn));
d_recursiveGaussian_rgba<<< iDivUp(width, nthreads), nthreads >>>(d_src, d_temp, width, height, a0, a1, a2, a3, b1, b2, coefp, coefn);
// #endif
getLastCudaError("Kernel execution failed");
transpose(d_temp, d_dest, width, height);
getLastCudaError("transpose: Kernel execution failed");
// process rows
#if USE_SIMPLE_FILTER
// d_simpleRecursive_rgba<<< iDivUp(height, nthreads), nthreads >>>(d_dest, d_temp, height, width, ema);
#else
// d_recursiveGaussian_rgba<<< iDivUp(height, nthreads), nthreads >>>(d_dest, d_temp, height, width, a0, a1, a2, a3, b1, b2, coefp, coefn);
#endif
getLastCudaError("Kernel execution failed");
transpose(d_temp, d_dest, height, width);
}
|
06588851c0e403b3ad0f54308b7d29ba520e63c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "test_comm.h"
void host_norm(double* pIn, double *pOut, int sizeIn, int sizeOut)
{
int i, j;
double *data1, *data2;
float *data1f, *data2f;
float *data1f_gpu, *data2f_gpu;
int sizeBlock;
sizeBlock = VECTOR_BLOCK_SIZE;
data1 = pIn;
data2 = pOut;
// Find the dimensions of the data
// Create an mxArray for the output data
// Create an input and output data array on the GPU
hipMalloc( (void **) &data1f_gpu,sizeof(float)*sizeIn);
hipMalloc( (void **) &data2f_gpu,sizeof(float)*sizeOut);
// Retrieve the input data
// Check if the input array is single or double precision
// The input array is in double precision, it needs to be converted t floats before being sent to the card
data1f = (float *) malloc(sizeof(float)*sizeIn);
for (j = 0; j < sizeIn; j++)
{
data1f[j] = (float) data1[j];
}
for (i = 0; i < sizeIn; i++)
{
printf("data1f[%d] = %f, ", i, data1f[i]);
}
printf("\n");
hipMemcpy( data1f_gpu, data1f, sizeof(float)*sizeIn, hipMemcpyHostToDevice);
data2f = (float *) malloc(sizeof(float)*sizeOut);
//hipMemcpy( data2f_gpu, data2f, sizeof(float)*sizeOut, hipMemcpyHostToDevice);
// Compute execution configuration using 128 threads per block
dim3 dimBlock(sizeBlock);
dim3 dimGrid((sizeIn)/dimBlock.x);
if ( (sizeIn) % sizeBlock !=0 ) dimGrid.x+=1;
// Call function on GPU hipLaunchKernelGGL((
norm_elements), dim3(dimGrid),dim3(dimBlock), 0, 0, data1f_gpu, data2f_gpu, sizeIn);
hipError_t e;
e = hipGetLastError();
if ( e != hipSuccess)
{
fprintf(stderr, "CUDA Error on square_elements: '%s' \n", hipGetErrorString(e));
exit(-1);
}
// Copy result back to host
hipMemcpy( data2f, data2f_gpu, sizeof(float)*sizeOut, hipMemcpyDeviceToHost);
for (i = 0; i < sizeOut; i++)
{
printf("data2f[%d] = %f, ", i, data2f[i]);
}
printf("\n");
// Create a pointer to the output data
// Convert from single to double before returning
for (j = 0; j < sizeOut; j++)
{
data2[j] = (double) data2f[j];
}
// Clean-up memory on device and host
free(data1f);
free(data2f);
hipFree(data1f_gpu);
hipFree(data2f_gpu);
}
int test_norm()
{
double *pIn, *pOut;
int sizeIn, sizeOut;
int i;
sizeIn = 10000;
sizeOut = sizeIn/VECTOR_BLOCK_SIZE;
pIn = (double*)malloc(sizeof(double)*sizeIn);
pOut = (double*)malloc(sizeof(double)*sizeOut);
/*
pIn[0] = 3;
pIn[1] = 4;
//pIn[2] = 3;
*/
for (i = 0; i < sizeIn; i++){
pIn[i] = 1;
}
host_norm(pIn, pOut, sizeIn, sizeOut);
printf("output square result");
for (i = 0; i < sizeOut; i++)
{
printf(" pOut[%d] = %lf, ", i, pOut[i]);
}
printf("\n");
printf("output norm result");
for (i = 0; i < sizeOut; i++)
{
//pOut[i] = sqrt(pOut[i]);
printf("squre of pOut[%d] = %lf, ", i, pOut[i]);
}
printf("\n");
free(pIn);
free(pOut);
return 0;
}
int mexTest_norm(double *pIn,double *pOut,int sizeIn)
{
//double *pOut;
int sizeOut;
int i;
//sizeOut =sizeIn/VECTOR_BLOCK_SIZE + 1;
sizeOut=1;
//pOut = (double*)malloc(sizeof(double)*sizeOut);
host_norm(pIn, pOut, sizeIn, sizeOut);
double expect=sizeIn;
printf("output square result");
//if(pOut[0] != expect){
for (i = 0; i < sizeOut; i++)
{
printf(" pOut[%d] = %lf, ", i, pOut[i]);
}
//}
//free(pOut);
return 0;
} | 06588851c0e403b3ad0f54308b7d29ba520e63c5.cu | #include "test_comm.h"
void host_norm(double* pIn, double *pOut, int sizeIn, int sizeOut)
{
int i, j;
double *data1, *data2;
float *data1f, *data2f;
float *data1f_gpu, *data2f_gpu;
int sizeBlock;
sizeBlock = VECTOR_BLOCK_SIZE;
data1 = pIn;
data2 = pOut;
// Find the dimensions of the data
// Create an mxArray for the output data
// Create an input and output data array on the GPU
cudaMalloc( (void **) &data1f_gpu,sizeof(float)*sizeIn);
cudaMalloc( (void **) &data2f_gpu,sizeof(float)*sizeOut);
// Retrieve the input data
// Check if the input array is single or double precision
// The input array is in double precision, it needs to be converted t floats before being sent to the card
data1f = (float *) malloc(sizeof(float)*sizeIn);
for (j = 0; j < sizeIn; j++)
{
data1f[j] = (float) data1[j];
}
for (i = 0; i < sizeIn; i++)
{
printf("data1f[%d] = %f, ", i, data1f[i]);
}
printf("\n");
cudaMemcpy( data1f_gpu, data1f, sizeof(float)*sizeIn, cudaMemcpyHostToDevice);
data2f = (float *) malloc(sizeof(float)*sizeOut);
//cudaMemcpy( data2f_gpu, data2f, sizeof(float)*sizeOut, cudaMemcpyHostToDevice);
// Compute execution configuration using 128 threads per block
dim3 dimBlock(sizeBlock);
dim3 dimGrid((sizeIn)/dimBlock.x);
if ( (sizeIn) % sizeBlock !=0 ) dimGrid.x+=1;
// Call function on GPU
norm_elements<<<dimGrid,dimBlock>>>(data1f_gpu, data2f_gpu, sizeIn);
cudaError_t e;
e = cudaGetLastError();
if ( e != cudaSuccess)
{
fprintf(stderr, "CUDA Error on square_elements: '%s' \n", cudaGetErrorString(e));
exit(-1);
}
// Copy result back to host
cudaMemcpy( data2f, data2f_gpu, sizeof(float)*sizeOut, cudaMemcpyDeviceToHost);
for (i = 0; i < sizeOut; i++)
{
printf("data2f[%d] = %f, ", i, data2f[i]);
}
printf("\n");
// Create a pointer to the output data
// Convert from single to double before returning
for (j = 0; j < sizeOut; j++)
{
data2[j] = (double) data2f[j];
}
// Clean-up memory on device and host
free(data1f);
free(data2f);
cudaFree(data1f_gpu);
cudaFree(data2f_gpu);
}
int test_norm()
{
double *pIn, *pOut;
int sizeIn, sizeOut;
int i;
sizeIn = 10000;
sizeOut = sizeIn/VECTOR_BLOCK_SIZE;
pIn = (double*)malloc(sizeof(double)*sizeIn);
pOut = (double*)malloc(sizeof(double)*sizeOut);
/*
pIn[0] = 3;
pIn[1] = 4;
//pIn[2] = 3;
*/
for (i = 0; i < sizeIn; i++){
pIn[i] = 1;
}
host_norm(pIn, pOut, sizeIn, sizeOut);
printf("output square result");
for (i = 0; i < sizeOut; i++)
{
printf(" pOut[%d] = %lf, ", i, pOut[i]);
}
printf("\n");
printf("output norm result");
for (i = 0; i < sizeOut; i++)
{
//pOut[i] = sqrt(pOut[i]);
printf("squre of pOut[%d] = %lf, ", i, pOut[i]);
}
printf("\n");
free(pIn);
free(pOut);
return 0;
}
int mexTest_norm(double *pIn,double *pOut,int sizeIn)
{
//double *pOut;
int sizeOut;
int i;
//sizeOut =sizeIn/VECTOR_BLOCK_SIZE + 1;
sizeOut=1;
//pOut = (double*)malloc(sizeof(double)*sizeOut);
host_norm(pIn, pOut, sizeIn, sizeOut);
double expect=sizeIn;
printf("output square result");
//if(pOut[0] != expect){
for (i = 0; i < sizeOut; i++)
{
printf(" pOut[%d] = %lf, ", i, pOut[i]);
}
//}
//free(pOut);
return 0;
} |
5e8ce6bd6a13debf473acd2528ed8d94482ef1e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
// includes, project
#include "2Dconvolution.h"
#include "2Dconvolution_gold.cpp"
using namespace std;
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void ConstCopyToDeviceMatrix(float* Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
bool CompareResults(float* A, float* B, int elements, float eps);
bool ReadParams(int* params, int size, char* file_name);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P);
__constant__ float cM[25];
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
////////////////////////////////////////////////////////////////////////////////
__global__ void ConvolutionKernel(Matrix M, Matrix N, Matrix P)
{
//Set up the appropriate indices
int tidx = threadIdx.x;
int tidy = threadIdx.y;
int tidxy = tidx + tidy*blockDim.x;
int bidx = blockIdx.x;
int bidy = blockIdx.y;
int nx = tidx + bidx*blockDim.x;
int ny = tidy + bidy*blockDim.y;
//Hard coded to 16*16 + 2*16*4 + 16 to be large enough based on 16x16 blocks
__shared__ float ns[400];
//Set up the indices for the shared version of submatrix of N
int bsx0 = (bidx < 1)? 0 : (bidx*blockDim.x-2);
int xs0 = (bidx < 1)? 0 : 2;
int bsx1 = (bidx = (gridDim.x-1))? (N.width-1) : ((bidx+1)*blockDim.x+2);
int bsy0 = (bidy < 1)? 0 : (bidy*blockDim.y-2);
int ys0 = (bidy < 1)? 0 : 2;
int bsy1 = (bidy = (gridDim.y-1))? (N.height-1) : ((bidy+1)*blockDim.y+2);
int bxdim = bsx1-bsx0+1;
int bydim = bsy1-bsy0+1;
int bsize = bxdim*bydim;
//Create the shared memory array
//I cannot explain this indexing with words, would need visual aid
//It is some janky stuff I duct taped together
if ((tidxy)<bsize)
{
int row = (tidxy)/bxdim;
int ty = bsy0 + row;
int tx = tidxy - row*bxdim + bsx0;
ns[tidxy] = N.elements[tx + ty*N.width];
}
if (bsize>256 && (tidxy+256)<bsize) //16x16 hard coded here, could be changed
{
int row2 = (tidxy+256)/bxdim;
int ty2 = bsy0 + row2;
int tx2 = tidxy+256 - row2*bxdim + bsx0;
ns[tidxy+256] = N.elements[tx2 + ty2*N.width];
}
__syncthreads();
if (nx<(N.width) && ny<(N.height))
{
int x0 = (nx < 2)? (2-nx) : 0;
int x1 = (nx > (N.width-3))? (N.width-nx+2) : 5;
int y0 = (ny < 2)? (2-ny) : 0;
int y1 = (ny > (N.height-3))? (N.height-ny+2) : 5;
float sum = 0;
for(int i=x0; i<x1; i++)
{
for(int j=y0; j<y1; j++)
{
//sum = sum + M.elements[i+j*M.width]*N.elements[(i+nx-2) + (ny+j-2)*N.width];
sum = sum + M.elements[i+j*M.width]*ns[(tidx+i-xs0) + (tidy+j-ys0)*bxdim];
}
}
P.elements[nx + ny*N.width] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
srand(2013);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1);
//N = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1);
N = AllocateMatrix(N_X, N_Y, 1);
//N_X and N_Y specified in header. Can be any positive int >= 16
P = AllocateMatrix(N.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = (int*)malloc(2 * sizeof(int));
unsigned int data_read = 2;
if(ReadParams(params, data_read, argv[1])){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0);
N = AllocateMatrix(params[0], params[1], 0);
P = AllocateMatrix(params[0], params[1], 0);
(void)ReadFile(&M, argv[2]);
(void)ReadFile(&N, argv[3]);
}
// M * N on the device
ConvolutionOnDevice(M, N, P);
//Start cpu timing here
hipEvent_t startCPU, stopCPU;
hipEventCreate(&startCPU);
hipEventCreate(&stopCPU);
hipEventRecord(startCPU, 0);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, N.height, N.width);
//Stop cpu timing here
hipEventRecord(stopCPU, 0);
hipEventSynchronize(stopCPU);
float cpuTime;
hipEventElapsedTime(&cpuTime, startCPU, stopCPU);
hipEventDestroy(startCPU);
hipEventDestroy(stopCPU);
//Output timing
printf("CPU time: %f ms. \n", cpuTime);
// in this case check if the result is equivalent to the expected soluion
bool res = CompareResults(reference.elements, P.elements, P.width * P.height, 0.01f);;
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P)
{
//Start inclusive timing here
hipEvent_t startIn, stopIn;
hipEventCreate(&startIn);
hipEventCreate(&stopIn);
hipEventRecord(startIn, 0);
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
//Start exclusive timing here
hipEvent_t startEx, stopEx;
hipEventCreate(&startEx);
hipEventCreate(&stopEx);
hipEventRecord(startEx, 0);
// Setup the execution configuration
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid((N.width+dimBlock.x-1)/dimBlock.x,(N.height+dimBlock.y-1)/dimBlock.y);
// Launch the device computation threads!
hipLaunchKernelGGL(( ConvolutionKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Md,Nd,Pd);
//Stop exclusive timing here
hipEventRecord(stopEx, 0);
hipEventSynchronize(stopEx);
float exTime;
hipEventElapsedTime(&exTime, startEx, stopEx);
hipEventDestroy(startEx);
hipEventDestroy(stopEx);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
//Stop inclusive timing here
hipEventRecord(stopIn, 0);
hipEventSynchronize(stopIn);
float inTime;
hipEventElapsedTime(&inTime, startIn, stopIn);
hipEventDestroy(startIn);
hipEventDestroy(stopIn);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
//Output timing
printf("Inclusive time: %f ms. \n", inTime);
printf("Exclusive time: %f ms. \n", exTime);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
if(rand() % 2)
M.elements[i] = - M.elements[i];
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a host matrix to a constant device matrix
void ConstCopyToDeviceMatrix(float* Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
hipMemcpyToSymbol(Mdevice, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
//compare the data stored in two arrays on the host
bool CompareResults(float* A, float* B, int elements, float eps)
{
for(unsigned int i = 0; i < elements; i++){
float error = A[i]-B[i];
if(error>eps){
return false;
}
}
return true;
}
bool ReadParams(int* params, int size, char* file_name){
ifstream ifile(file_name);
int i=0;
for(int i=0; i<size; i++){
if(ifile.fail()==false){
ifile>>params[i];
}
}
return (i==size)? 1:0;
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height * M->width;
std::ifstream ifile(file_name);
for(unsigned int i = 0; i < data_read; i++){
ifile>>M->elements[i];
}
ifile.close();
return data_read;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
std::ofstream ofile(file_name);
for(unsigned int i = 0; i < M.width*M.height; i++){
ofile<<M.elements[i];
}
ofile.close();
}
| 5e8ce6bd6a13debf473acd2528ed8d94482ef1e1.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
// includes, project
#include "2Dconvolution.h"
#include "2Dconvolution_gold.cpp"
using namespace std;
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void ConstCopyToDeviceMatrix(float* Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
bool CompareResults(float* A, float* B, int elements, float eps);
bool ReadParams(int* params, int size, char* file_name);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P);
__constant__ float cM[25];
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
////////////////////////////////////////////////////////////////////////////////
__global__ void ConvolutionKernel(Matrix M, Matrix N, Matrix P)
{
//Set up the appropriate indices
int tidx = threadIdx.x;
int tidy = threadIdx.y;
int tidxy = tidx + tidy*blockDim.x;
int bidx = blockIdx.x;
int bidy = blockIdx.y;
int nx = tidx + bidx*blockDim.x;
int ny = tidy + bidy*blockDim.y;
//Hard coded to 16*16 + 2*16*4 + 16 to be large enough based on 16x16 blocks
__shared__ float ns[400];
//Set up the indices for the shared version of submatrix of N
int bsx0 = (bidx < 1)? 0 : (bidx*blockDim.x-2);
int xs0 = (bidx < 1)? 0 : 2;
int bsx1 = (bidx = (gridDim.x-1))? (N.width-1) : ((bidx+1)*blockDim.x+2);
int bsy0 = (bidy < 1)? 0 : (bidy*blockDim.y-2);
int ys0 = (bidy < 1)? 0 : 2;
int bsy1 = (bidy = (gridDim.y-1))? (N.height-1) : ((bidy+1)*blockDim.y+2);
int bxdim = bsx1-bsx0+1;
int bydim = bsy1-bsy0+1;
int bsize = bxdim*bydim;
//Create the shared memory array
//I cannot explain this indexing with words, would need visual aid
//It is some janky stuff I duct taped together
if ((tidxy)<bsize)
{
int row = (tidxy)/bxdim;
int ty = bsy0 + row;
int tx = tidxy - row*bxdim + bsx0;
ns[tidxy] = N.elements[tx + ty*N.width];
}
if (bsize>256 && (tidxy+256)<bsize) //16x16 hard coded here, could be changed
{
int row2 = (tidxy+256)/bxdim;
int ty2 = bsy0 + row2;
int tx2 = tidxy+256 - row2*bxdim + bsx0;
ns[tidxy+256] = N.elements[tx2 + ty2*N.width];
}
__syncthreads();
if (nx<(N.width) && ny<(N.height))
{
int x0 = (nx < 2)? (2-nx) : 0;
int x1 = (nx > (N.width-3))? (N.width-nx+2) : 5;
int y0 = (ny < 2)? (2-ny) : 0;
int y1 = (ny > (N.height-3))? (N.height-ny+2) : 5;
float sum = 0;
for(int i=x0; i<x1; i++)
{
for(int j=y0; j<y1; j++)
{
//sum = sum + M.elements[i+j*M.width]*N.elements[(i+nx-2) + (ny+j-2)*N.width];
sum = sum + M.elements[i+j*M.width]*ns[(tidx+i-xs0) + (tidy+j-ys0)*bxdim];
}
}
P.elements[nx + ny*N.width] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
srand(2013);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1);
//N = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1);
N = AllocateMatrix(N_X, N_Y, 1);
//N_X and N_Y specified in header. Can be any positive int >= 16
P = AllocateMatrix(N.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = (int*)malloc(2 * sizeof(int));
unsigned int data_read = 2;
if(ReadParams(params, data_read, argv[1])){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0);
N = AllocateMatrix(params[0], params[1], 0);
P = AllocateMatrix(params[0], params[1], 0);
(void)ReadFile(&M, argv[2]);
(void)ReadFile(&N, argv[3]);
}
// M * N on the device
ConvolutionOnDevice(M, N, P);
//Start cpu timing here
cudaEvent_t startCPU, stopCPU;
cudaEventCreate(&startCPU);
cudaEventCreate(&stopCPU);
cudaEventRecord(startCPU, 0);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, N.height, N.width);
//Stop cpu timing here
cudaEventRecord(stopCPU, 0);
cudaEventSynchronize(stopCPU);
float cpuTime;
cudaEventElapsedTime(&cpuTime, startCPU, stopCPU);
cudaEventDestroy(startCPU);
cudaEventDestroy(stopCPU);
//Output timing
printf("CPU time: %f ms. \n", cpuTime);
// in this case check if the result is equivalent to the expected soluion
bool res = CompareResults(reference.elements, P.elements, P.width * P.height, 0.01f);;
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void ConvolutionOnDevice(const Matrix M, const Matrix N, Matrix P)
{
//Start inclusive timing here
cudaEvent_t startIn, stopIn;
cudaEventCreate(&startIn);
cudaEventCreate(&stopIn);
cudaEventRecord(startIn, 0);
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
//Start exclusive timing here
cudaEvent_t startEx, stopEx;
cudaEventCreate(&startEx);
cudaEventCreate(&stopEx);
cudaEventRecord(startEx, 0);
// Setup the execution configuration
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid((N.width+dimBlock.x-1)/dimBlock.x,(N.height+dimBlock.y-1)/dimBlock.y);
// Launch the device computation threads!
ConvolutionKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd);
//Stop exclusive timing here
cudaEventRecord(stopEx, 0);
cudaEventSynchronize(stopEx);
float exTime;
cudaEventElapsedTime(&exTime, startEx, stopEx);
cudaEventDestroy(startEx);
cudaEventDestroy(stopEx);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
//Stop inclusive timing here
cudaEventRecord(stopIn, 0);
cudaEventSynchronize(stopIn);
float inTime;
cudaEventElapsedTime(&inTime, startIn, stopIn);
cudaEventDestroy(startIn);
cudaEventDestroy(stopIn);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
//Output timing
printf("Inclusive time: %f ms. \n", inTime);
printf("Exclusive time: %f ms. \n", exTime);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
if(rand() % 2)
M.elements[i] = - M.elements[i];
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a host matrix to a constant device matrix
void ConstCopyToDeviceMatrix(float* Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
cudaMemcpyToSymbol(Mdevice, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
//compare the data stored in two arrays on the host
bool CompareResults(float* A, float* B, int elements, float eps)
{
for(unsigned int i = 0; i < elements; i++){
float error = A[i]-B[i];
if(error>eps){
return false;
}
}
return true;
}
bool ReadParams(int* params, int size, char* file_name){
ifstream ifile(file_name);
int i=0;
for(int i=0; i<size; i++){
if(ifile.fail()==false){
ifile>>params[i];
}
}
return (i==size)? 1:0;
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->height * M->width;
std::ifstream ifile(file_name);
for(unsigned int i = 0; i < data_read; i++){
ifile>>M->elements[i];
}
ifile.close();
return data_read;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
std::ofstream ofile(file_name);
for(unsigned int i = 0; i < M.width*M.height; i++){
ofile<<M.elements[i];
}
ofile.close();
}
|
12461d0e208558f31fab14efdd905970a919c0d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <sys/time.h>
#include <demo_util.h>
#include <cuda_util.h>
#define imin(a,b) (a<b?a:b)
const int N = (1 << 14);
const int threadsPerBlock = 256;
__global__ void dot( double *a, double *b, double *c )
{
__shared__ double localDot[threadsPerBlock]; /* Statically defined */
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int localIndex = threadIdx.x;
double localSum = 0;
while (ix < N) {
localSum += a[ix] * b[ix];
ix += blockDim.x * gridDim.x;
}
/* Store sum computed by this thread */
localDot[localIndex] = localSum;
/* Wait for all threads to get to this point */
__syncthreads();
/* Every block should add up sum computed on
threads in the block */
int i = blockDim.x/2;
while (i != 0)
{
if (localIndex < i)
localDot[localIndex] += localDot[localIndex + i];
__syncthreads();
i /= 2;
}
/* Each block stores local dot product */
if (localIndex == 0)
c[blockIdx.x] = localDot[0];
}
int main( void )
{
double *a, *b, c, *partial_c;
double *dev_a, *dev_b, *dev_partial_c;
int blocksPerGrid = (N+threadsPerBlock-1) / threadsPerBlock;
/* Allocate memory on the CPU */
a = (double*) malloc( N*sizeof(double) );
b = (double*) malloc( N*sizeof(double) );
partial_c = (double*) malloc( blocksPerGrid*sizeof(double) );
/* allocate the memory on the GPU */
CHECK(hipMalloc((void**) &dev_a, N*sizeof(double)));
CHECK(hipMalloc((void**) &dev_b, N*sizeof(double)));
CHECK(hipMalloc((void**) &dev_partial_c, blocksPerGrid*sizeof(double) ) );
/* Define vectors a and b */
for (int i = 0; i < N; i++)
{
a[i] = i;
b[i] = i;
}
/* copy the arrays 'a' and 'b' to the GPU */
CHECK(hipMemcpy(dev_a, a, N*sizeof(double),
hipMemcpyHostToDevice ) );
CHECK(hipMemcpy(dev_b, b, N*sizeof(double),
hipMemcpyHostToDevice ) );
dim3 block(threadsPerBlock); /* Values defined in macros */
dim3 grid(blocksPerGrid); /* defined in macros, above */
hipLaunchKernelGGL(( dot), dim3(grid),dim3(block), 0, 0, dev_a, dev_b,dev_partial_c );
/* copy the array 'c' back from the GPU to the CPU */
CHECK( hipMemcpy( partial_c, dev_partial_c,
blocksPerGrid*sizeof(double),
hipMemcpyDeviceToHost ) );
/* Sum of block sums */
c = 0;
for (int i = 0; i < blocksPerGrid; i++)
{
c += partial_c[i];
}
/* Check result */
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
double s = sum_squares((double)(N-1));
// double s = N; /* Sum of 1s */
double diff = abs(c - s)/abs(s);
printf("%20s %10f\n","Computed dot product", c);
printf("%20s %10f\n","True dot product", s);
/* free memory on the gpu side */
CHECK(hipFree(dev_a));
CHECK(hipFree(dev_b));
CHECK(hipFree(dev_partial_c));
/* free memory on the cpu side */
free(a );
free(b );
free(partial_c );
}
| 12461d0e208558f31fab14efdd905970a919c0d3.cu | #include <stdio.h>
#include <sys/time.h>
#include <demo_util.h>
#include <cuda_util.h>
#define imin(a,b) (a<b?a:b)
const int N = (1 << 14);
const int threadsPerBlock = 256;
__global__ void dot( double *a, double *b, double *c )
{
__shared__ double localDot[threadsPerBlock]; /* Statically defined */
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int localIndex = threadIdx.x;
double localSum = 0;
while (ix < N) {
localSum += a[ix] * b[ix];
ix += blockDim.x * gridDim.x;
}
/* Store sum computed by this thread */
localDot[localIndex] = localSum;
/* Wait for all threads to get to this point */
__syncthreads();
/* Every block should add up sum computed on
threads in the block */
int i = blockDim.x/2;
while (i != 0)
{
if (localIndex < i)
localDot[localIndex] += localDot[localIndex + i];
__syncthreads();
i /= 2;
}
/* Each block stores local dot product */
if (localIndex == 0)
c[blockIdx.x] = localDot[0];
}
int main( void )
{
double *a, *b, c, *partial_c;
double *dev_a, *dev_b, *dev_partial_c;
int blocksPerGrid = (N+threadsPerBlock-1) / threadsPerBlock;
/* Allocate memory on the CPU */
a = (double*) malloc( N*sizeof(double) );
b = (double*) malloc( N*sizeof(double) );
partial_c = (double*) malloc( blocksPerGrid*sizeof(double) );
/* allocate the memory on the GPU */
CHECK(cudaMalloc((void**) &dev_a, N*sizeof(double)));
CHECK(cudaMalloc((void**) &dev_b, N*sizeof(double)));
CHECK(cudaMalloc((void**) &dev_partial_c, blocksPerGrid*sizeof(double) ) );
/* Define vectors a and b */
for (int i = 0; i < N; i++)
{
a[i] = i;
b[i] = i;
}
/* copy the arrays 'a' and 'b' to the GPU */
CHECK(cudaMemcpy(dev_a, a, N*sizeof(double),
cudaMemcpyHostToDevice ) );
CHECK(cudaMemcpy(dev_b, b, N*sizeof(double),
cudaMemcpyHostToDevice ) );
dim3 block(threadsPerBlock); /* Values defined in macros */
dim3 grid(blocksPerGrid); /* defined in macros, above */
dot<<<grid,block>>>( dev_a, dev_b,dev_partial_c );
/* copy the array 'c' back from the GPU to the CPU */
CHECK( cudaMemcpy( partial_c, dev_partial_c,
blocksPerGrid*sizeof(double),
cudaMemcpyDeviceToHost ) );
/* Sum of block sums */
c = 0;
for (int i = 0; i < blocksPerGrid; i++)
{
c += partial_c[i];
}
/* Check result */
#define sum_squares(x) (x*(x+1)*(2*x+1)/6)
double s = sum_squares((double)(N-1));
// double s = N; /* Sum of 1s */
double diff = abs(c - s)/abs(s);
printf("%20s %10f\n","Computed dot product", c);
printf("%20s %10f\n","True dot product", s);
/* free memory on the gpu side */
CHECK(cudaFree(dev_a));
CHECK(cudaFree(dev_b));
CHECK(cudaFree(dev_partial_c));
/* free memory on the cpu side */
free(a );
free(b );
free(partial_c );
}
|
77c1b8243fe13c54636b5521f42de95715c4b882.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#define cudaCheck(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"cudaAssert: %s at %s:%d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void saxpy(int n, float a, float *x, float *y)
{
// setup
int total_thread_num = gridDim.x * blockDim.x;
int num_per_thread = n / total_thread_num;
int leftover = n % total_thread_num;
int thread_index = blockIdx.x*blockDim.x + threadIdx.x;
int start_index = num_per_thread * thread_index;
if (thread_index < leftover) {
start_index += thread_index;
num_per_thread++;
}
else {
start_index += leftover;
}
// saxpy
for (int i = start_index; i < start_index + num_per_thread; i++) {
y[i] = a*x[i] + y[i];
}
}
int main(void)
{
int nDevices;
// print GPU info
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
int N = 1<<10;
printf("N: %d\n", N);
float *x, *y;
// start profiling
cudaCheck(hipProfilerStart());
// allocate memory with UM
printf("Allocating %d bytes each...\n", (size_t)N*sizeof(float));
cudaCheck(hipMallocManaged(&x, N*sizeof(float)));
cudaCheck(hipMallocManaged(&y, N*sizeof(float)));
// initialization on host -> page fault (CPU)
printf("Initializing...\n");
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// kernel launch -> page fault (GPU)
printf("Launching kernel...\n");
hipLaunchKernelGGL(( saxpy), dim3(min(1024, (N+255)/256)), dim3(256), 0, 0, N, 2.0f, x, y); // watch out for grid size limit
// check for kernel launch error
cudaCheck(hipPeekAtLastError());
// wait for kernel to finish
printf("Synchronizing...\n");
cudaCheck(hipDeviceSynchronize());
// check error -> page fault (CPU)
printf("Checking error...\n");
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
// free memory
printf("Freeing memory...\n");
cudaCheck(hipFree(x));
cudaCheck(hipFree(y));
// end profiling
printf("All done!\n");
cudaCheck(hipProfilerStop());
}
| 77c1b8243fe13c54636b5521f42de95715c4b882.cu | #include <stdio.h>
#include <cuda_profiler_api.h>
#define cudaCheck(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"cudaAssert: %s at %s:%d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void saxpy(int n, float a, float *x, float *y)
{
// setup
int total_thread_num = gridDim.x * blockDim.x;
int num_per_thread = n / total_thread_num;
int leftover = n % total_thread_num;
int thread_index = blockIdx.x*blockDim.x + threadIdx.x;
int start_index = num_per_thread * thread_index;
if (thread_index < leftover) {
start_index += thread_index;
num_per_thread++;
}
else {
start_index += leftover;
}
// saxpy
for (int i = start_index; i < start_index + num_per_thread; i++) {
y[i] = a*x[i] + y[i];
}
}
int main(void)
{
int nDevices;
// print GPU info
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
int N = 1<<10;
printf("N: %d\n", N);
float *x, *y;
// start profiling
cudaCheck(cudaProfilerStart());
// allocate memory with UM
printf("Allocating %d bytes each...\n", (size_t)N*sizeof(float));
cudaCheck(cudaMallocManaged(&x, N*sizeof(float)));
cudaCheck(cudaMallocManaged(&y, N*sizeof(float)));
// initialization on host -> page fault (CPU)
printf("Initializing...\n");
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// kernel launch -> page fault (GPU)
printf("Launching kernel...\n");
saxpy<<<min(1024, (N+255)/256), 256>>>(N, 2.0f, x, y); // watch out for grid size limit
// check for kernel launch error
cudaCheck(cudaPeekAtLastError());
// wait for kernel to finish
printf("Synchronizing...\n");
cudaCheck(cudaDeviceSynchronize());
// check error -> page fault (CPU)
printf("Checking error...\n");
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
// free memory
printf("Freeing memory...\n");
cudaCheck(cudaFree(x));
cudaCheck(cudaFree(y));
// end profiling
printf("All done!\n");
cudaCheck(cudaProfilerStop());
}
|
7cba90029ca7c16ea7e885ea825214adc082c2a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "defs_gpu.cuh"
__global__ void find_cart(float * pc_res_d, float *pc_a_d, float *pc_r_d, int cart_len){
int start = blockIdx.x*cart_len;
for(int i=start;i<(start+cart_len);i++){
//pc_res_d[i]= tex2D(texPC,pc_a_d[i],pc_r_d[i]);
pc_res_d[i]= tex2D(texPC,pc_a_d[i]+0.5f,pc_r_d[i]+0.5f);
}
}
__global__ void find_fdk2(float* vecv_d, float *m_x_d, float *m_y_d, int *i_a_d, int *i_r_d, int im, int na_old, int nrn, int aimin, int w_a, int w_r, size_t pitch_pc){
int rr1, aa1;
rr1 = (i_r_d[blockIdx.x]*blockDim.y+threadIdx.y)*w_r;
aa1 = (i_a_d[blockIdx.x]*blockDim.x+threadIdx.x)*w_a;
int rr, aa, a2, m1, m2;
int p4 = pitch_pc/4;
for(int j=0;j<w_a;j++){
aa = aa1+j;
a2 = (aa+im+aimin)%(na_old);
for(int i=0;i<w_r;i++){
rr = rr1+i;
m1 = rr*p4+aa;
m2 = rr*na_old+a2;
//vecv_d[m1]+= tex2D(texI,m_x_d[m2],m_y_d[m2]);
vecv_d[m1]+= tex2D(texI, m_x_d[m2]+0.5f, m_y_d[m2]+0.5f);
//vecv_d[m1] = m2;
//vecv_d[m1]= m1;//aa1;
//vecv_d[m1]= rr1;//1.2+blockDim.y;//i_r_d[blockIdx.x];//tex2D(texI,m_x_d[m2],m_y_d[m2]);
}
}
}
__global__ void find_fdk3(float* vecv_d, float *m_x_d, float *m_y_d, int *i_a_d, int *i_r_d, int im, int na_old, int nrn, int aimin, int w_a, int w_r, size_t pitch_pc, float vx, float vy){
int rr1, aa1;
rr1 = (i_r_d[blockIdx.x]*blockDim.y+threadIdx.y)*w_r;
aa1 = (i_a_d[blockIdx.x]*blockDim.x+threadIdx.x)*w_a;
int rr, aa, a2, m1, m2;
int p4 = pitch_pc/4;
for(int j=0;j<w_a;j++){
aa = aa1+j;
a2 = (aa+im+aimin)%(na_old);
for(int i=0;i<w_r;i++){
rr = rr1+i;
m1 = rr*p4+aa;
m2 = rr*na_old+a2;
//vecv_d[m1]+= tex2D(texI,m_x_d[m2],m_y_d[m2]);
vecv_d[m1]+= tex2D(texI, m_x_d[m2]+0.5f-vx, m_y_d[m2]+0.5f-vy);
//vecv_d[m1] = m2;
//vecv_d[m1]= m1;//aa1;
//vecv_d[m1]= rr1;//1.2+blockDim.y;//i_r_d[blockIdx.x];//tex2D(texI,m_x_d[m2],m_y_d[m2]);
}
}
}
bool cudaReconstructFDK(MatPar *matp, PolarToCart *PC, allData *aD, OtherParam *other){
int nx, ny;
nx = aD->nx;
ny = matp->y_maxi - matp->y_mini+1;
int deviceCount;
hipGetDeviceCount(&deviceCount);
hipSetDevice(0);
int ustep3 = 32;
dim3 grid_cr(nx/ustep3,ny);
dim3 grid_cr2(1,ny);
int volMatrix;
volMatrix = matp->nr*matp->na;
printf("volMatrix: %i\n",volMatrix);
dim3 gfdk2(PC->ntb,1,1);
dim3 tfdk2(b_a,b_r);
float *veci_d;
float *m_x_d, *m_y_d;
int mem_allm = volMatrix*sizeof(float);
HM_SAFE_CALL(hipMalloc((void**)&m_x_d,mem_allm));
HM_SAFE_CALL(hipMalloc((void**)&m_y_d,mem_allm));
HM_SAFE_CALL(hipMemcpy(m_x_d,matp->m_x,mem_allm,hipMemcpyHostToDevice));
HM_SAFE_CALL(hipMemcpy(m_y_d,matp->m_y,mem_allm,hipMemcpyHostToDevice));
size_t pitch_pc;
float *vecv_d;
HM_SAFE_CALL(hipMallocPitch((void**)&vecv_d, &pitch_pc, PC->nan * sizeof(float), PC->nrn+1));
HM_SAFE_CALL(hipMemset2D(vecv_d, pitch_pc, 0, PC->nan*sizeof(float), PC->nrn+1));
texPC.normalized = false;
texPC.filterMode = hipFilterModeLinear;
texPC.addressMode[0] = hipAddressModeClamp;
texPC.addressMode[1] = hipAddressModeClamp;
hipChannelFormatDesc channelDesc_pc = hipCreateChannelDesc<float>();
hipBindTexture2D(NULL, &texPC, vecv_d, &channelDesc_pc, PC->nan, PC->nrn+1, pitch_pc);
printf("PC_nan: %i\n",PC->nan);
printf("PC_nrn: %i\n",PC->nrn);
printf("pitch_pc: %i\n",pitch_pc);
int mem_size_ntb = PC->ntb*sizeof(Ipp32s);
int *i_a_d, *i_r_d;
HM_SAFE_CALL(hipMalloc((void**)&i_a_d,mem_size_ntb));
HM_SAFE_CALL(hipMalloc((void**)&i_r_d,mem_size_ntb));
HM_SAFE_CALL(hipMemcpy(i_a_d,PC->i_a,mem_size_ntb,hipMemcpyHostToDevice));
HM_SAFE_CALL(hipMemcpy(i_r_d,PC->i_r,mem_size_ntb,hipMemcpyHostToDevice));
size_t pitch;
HM_SAFE_CALL(hipMallocPitch((void**)&veci_d, &pitch, nx * sizeof(float), ny));
texI.normalized = false;
texI.filterMode = hipFilterModeLinear;
texI.addressMode[0] = hipAddressModeClamp;
texI.addressMode[1] = hipAddressModeClamp;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipBindTexture2D(NULL, &texI, veci_d, &channelDesc, nx, ny, pitch);
for(int i=0;i<other->images_to_process;i++){
HM_SAFE_CALL(hipMemcpy2D(veci_d, pitch, matp->ivol+i*nx*ny,nx*sizeof(float),nx*sizeof(float),ny,hipMemcpyHostToDevice));
hipLaunchKernelGGL(( find_fdk3), dim3(gfdk2),dim3(tfdk2), 0, 0, vecv_d, m_x_d, m_y_d, i_a_d, i_r_d, i, matp->na, PC->nrn, PC->aimin, PC->w_a, PC->w_r, pitch_pc, aD->data->shift_x[i], aD->data->shift_y[i]);
//find_fdk2<<<gfdk2,tfdk2>>>(vecv_d, m_x_d, m_y_d, i_a_d, i_r_d, i, matp->na, PC->nrn, PC->aimin, PC->w_a, PC->w_r, pitch_pc);
HM_SAFE_CALL( hipDeviceSynchronize() );
}
int mem_cart = PC->pc_len*sizeof(float);
hipUnbindTexture(&texI);
HM_SAFE_CALL(hipFree(veci_d)); veci_d = NULL;
HM_SAFE_CALL(hipFree(m_x_d)); m_x_d = NULL;
HM_SAFE_CALL(hipFree(m_y_d)); m_y_d = NULL;
HM_SAFE_CALL(hipFree(i_a_d)); i_a_d = NULL;
HM_SAFE_CALL(hipFree(i_r_d)); i_r_d = NULL;
int cart_len = 256;
dim3 tcart(1,1,1);
dim3 gcart(PC->pc_len/cart_len,1,1);
float *pc_r_d, *pc_a_d, *pc_res_d;
HM_SAFE_CALL(hipMalloc((void**)&pc_r_d,mem_cart));
HM_SAFE_CALL(hipMalloc((void**)&pc_a_d,mem_cart));
HM_SAFE_CALL(hipMalloc((void**)&pc_res_d,mem_cart));
HM_SAFE_CALL(hipMemcpy(pc_r_d,PC->pc_r,mem_cart,hipMemcpyHostToDevice));
HM_SAFE_CALL(hipMemcpy(pc_a_d,PC->pc_a,mem_cart,hipMemcpyHostToDevice));
HM_SAFE_CALL(hipMemcpy(pc_res_d,PC->pc_res,mem_cart,hipMemcpyHostToDevice));
hipLaunchKernelGGL(( find_cart), dim3(gcart),dim3(tcart), 0, 0, pc_res_d,pc_a_d,pc_r_d,cart_len);
HM_SAFE_CALL( hipDeviceSynchronize() );
HM_SAFE_CALL(hipMemcpy(PC->pc_res,pc_res_d,mem_cart,hipMemcpyDeviceToHost));
HM_SAFE_CALL( hipDeviceSynchronize() );
hipUnbindTexture(&texPC);
ippsZero_32f(matp->rec,PC->OutputHeight * PC->OutputWidth);
int sp;
sp = 0;
for(int j=0;j<PC->pc_nc;j++){
ippsCopy_32f(PC->pc_res+sp,matp->rec+PC->pc_pos[j],PC->pc_size[j]);
sp+=(PC->pc_size[j]);
}
HM_SAFE_CALL(hipFree(vecv_d)); vecv_d = NULL;
HM_SAFE_CALL(hipFree(pc_r_d)); pc_r_d = NULL;
HM_SAFE_CALL(hipFree(pc_a_d)); pc_a_d = NULL;
HM_SAFE_CALL(hipFree(pc_res_d)); pc_res_d = NULL;
return true;
}
| 7cba90029ca7c16ea7e885ea825214adc082c2a3.cu | #include "defs_gpu.cuh"
__global__ void find_cart(float * pc_res_d, float *pc_a_d, float *pc_r_d, int cart_len){
int start = blockIdx.x*cart_len;
for(int i=start;i<(start+cart_len);i++){
//pc_res_d[i]= tex2D(texPC,pc_a_d[i],pc_r_d[i]);
pc_res_d[i]= tex2D(texPC,pc_a_d[i]+0.5f,pc_r_d[i]+0.5f);
}
}
__global__ void find_fdk2(float* vecv_d, float *m_x_d, float *m_y_d, int *i_a_d, int *i_r_d, int im, int na_old, int nrn, int aimin, int w_a, int w_r, size_t pitch_pc){
int rr1, aa1;
rr1 = (i_r_d[blockIdx.x]*blockDim.y+threadIdx.y)*w_r;
aa1 = (i_a_d[blockIdx.x]*blockDim.x+threadIdx.x)*w_a;
int rr, aa, a2, m1, m2;
int p4 = pitch_pc/4;
for(int j=0;j<w_a;j++){
aa = aa1+j;
a2 = (aa+im+aimin)%(na_old);
for(int i=0;i<w_r;i++){
rr = rr1+i;
m1 = rr*p4+aa;
m2 = rr*na_old+a2;
//vecv_d[m1]+= tex2D(texI,m_x_d[m2],m_y_d[m2]);
vecv_d[m1]+= tex2D(texI, m_x_d[m2]+0.5f, m_y_d[m2]+0.5f);
//vecv_d[m1] = m2;
//vecv_d[m1]= m1;//aa1;
//vecv_d[m1]= rr1;//1.2+blockDim.y;//i_r_d[blockIdx.x];//tex2D(texI,m_x_d[m2],m_y_d[m2]);
}
}
}
__global__ void find_fdk3(float* vecv_d, float *m_x_d, float *m_y_d, int *i_a_d, int *i_r_d, int im, int na_old, int nrn, int aimin, int w_a, int w_r, size_t pitch_pc, float vx, float vy){
int rr1, aa1;
rr1 = (i_r_d[blockIdx.x]*blockDim.y+threadIdx.y)*w_r;
aa1 = (i_a_d[blockIdx.x]*blockDim.x+threadIdx.x)*w_a;
int rr, aa, a2, m1, m2;
int p4 = pitch_pc/4;
for(int j=0;j<w_a;j++){
aa = aa1+j;
a2 = (aa+im+aimin)%(na_old);
for(int i=0;i<w_r;i++){
rr = rr1+i;
m1 = rr*p4+aa;
m2 = rr*na_old+a2;
//vecv_d[m1]+= tex2D(texI,m_x_d[m2],m_y_d[m2]);
vecv_d[m1]+= tex2D(texI, m_x_d[m2]+0.5f-vx, m_y_d[m2]+0.5f-vy);
//vecv_d[m1] = m2;
//vecv_d[m1]= m1;//aa1;
//vecv_d[m1]= rr1;//1.2+blockDim.y;//i_r_d[blockIdx.x];//tex2D(texI,m_x_d[m2],m_y_d[m2]);
}
}
}
bool cudaReconstructFDK(MatPar *matp, PolarToCart *PC, allData *aD, OtherParam *other){
int nx, ny;
nx = aD->nx;
ny = matp->y_maxi - matp->y_mini+1;
int deviceCount;
cudaGetDeviceCount(&deviceCount);
cudaSetDevice(0);
int ustep3 = 32;
dim3 grid_cr(nx/ustep3,ny);
dim3 grid_cr2(1,ny);
int volMatrix;
volMatrix = matp->nr*matp->na;
printf("volMatrix: %i\n",volMatrix);
dim3 gfdk2(PC->ntb,1,1);
dim3 tfdk2(b_a,b_r);
float *veci_d;
float *m_x_d, *m_y_d;
int mem_allm = volMatrix*sizeof(float);
HM_SAFE_CALL(cudaMalloc((void**)&m_x_d,mem_allm));
HM_SAFE_CALL(cudaMalloc((void**)&m_y_d,mem_allm));
HM_SAFE_CALL(cudaMemcpy(m_x_d,matp->m_x,mem_allm,cudaMemcpyHostToDevice));
HM_SAFE_CALL(cudaMemcpy(m_y_d,matp->m_y,mem_allm,cudaMemcpyHostToDevice));
size_t pitch_pc;
float *vecv_d;
HM_SAFE_CALL(cudaMallocPitch((void**)&vecv_d, &pitch_pc, PC->nan * sizeof(float), PC->nrn+1));
HM_SAFE_CALL(cudaMemset2D(vecv_d, pitch_pc, 0, PC->nan*sizeof(float), PC->nrn+1));
texPC.normalized = false;
texPC.filterMode = cudaFilterModeLinear;
texPC.addressMode[0] = cudaAddressModeClamp;
texPC.addressMode[1] = cudaAddressModeClamp;
cudaChannelFormatDesc channelDesc_pc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(NULL, &texPC, vecv_d, &channelDesc_pc, PC->nan, PC->nrn+1, pitch_pc);
printf("PC_nan: %i\n",PC->nan);
printf("PC_nrn: %i\n",PC->nrn);
printf("pitch_pc: %i\n",pitch_pc);
int mem_size_ntb = PC->ntb*sizeof(Ipp32s);
int *i_a_d, *i_r_d;
HM_SAFE_CALL(cudaMalloc((void**)&i_a_d,mem_size_ntb));
HM_SAFE_CALL(cudaMalloc((void**)&i_r_d,mem_size_ntb));
HM_SAFE_CALL(cudaMemcpy(i_a_d,PC->i_a,mem_size_ntb,cudaMemcpyHostToDevice));
HM_SAFE_CALL(cudaMemcpy(i_r_d,PC->i_r,mem_size_ntb,cudaMemcpyHostToDevice));
size_t pitch;
HM_SAFE_CALL(cudaMallocPitch((void**)&veci_d, &pitch, nx * sizeof(float), ny));
texI.normalized = false;
texI.filterMode = cudaFilterModeLinear;
texI.addressMode[0] = cudaAddressModeClamp;
texI.addressMode[1] = cudaAddressModeClamp;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaBindTexture2D(NULL, &texI, veci_d, &channelDesc, nx, ny, pitch);
for(int i=0;i<other->images_to_process;i++){
HM_SAFE_CALL(cudaMemcpy2D(veci_d, pitch, matp->ivol+i*nx*ny,nx*sizeof(float),nx*sizeof(float),ny,cudaMemcpyHostToDevice));
find_fdk3<<<gfdk2,tfdk2>>>(vecv_d, m_x_d, m_y_d, i_a_d, i_r_d, i, matp->na, PC->nrn, PC->aimin, PC->w_a, PC->w_r, pitch_pc, aD->data->shift_x[i], aD->data->shift_y[i]);
//find_fdk2<<<gfdk2,tfdk2>>>(vecv_d, m_x_d, m_y_d, i_a_d, i_r_d, i, matp->na, PC->nrn, PC->aimin, PC->w_a, PC->w_r, pitch_pc);
HM_SAFE_CALL( cudaThreadSynchronize() );
}
int mem_cart = PC->pc_len*sizeof(float);
cudaUnbindTexture(&texI);
HM_SAFE_CALL(cudaFree(veci_d)); veci_d = NULL;
HM_SAFE_CALL(cudaFree(m_x_d)); m_x_d = NULL;
HM_SAFE_CALL(cudaFree(m_y_d)); m_y_d = NULL;
HM_SAFE_CALL(cudaFree(i_a_d)); i_a_d = NULL;
HM_SAFE_CALL(cudaFree(i_r_d)); i_r_d = NULL;
int cart_len = 256;
dim3 tcart(1,1,1);
dim3 gcart(PC->pc_len/cart_len,1,1);
float *pc_r_d, *pc_a_d, *pc_res_d;
HM_SAFE_CALL(cudaMalloc((void**)&pc_r_d,mem_cart));
HM_SAFE_CALL(cudaMalloc((void**)&pc_a_d,mem_cart));
HM_SAFE_CALL(cudaMalloc((void**)&pc_res_d,mem_cart));
HM_SAFE_CALL(cudaMemcpy(pc_r_d,PC->pc_r,mem_cart,cudaMemcpyHostToDevice));
HM_SAFE_CALL(cudaMemcpy(pc_a_d,PC->pc_a,mem_cart,cudaMemcpyHostToDevice));
HM_SAFE_CALL(cudaMemcpy(pc_res_d,PC->pc_res,mem_cart,cudaMemcpyHostToDevice));
find_cart<<<gcart,tcart>>>(pc_res_d,pc_a_d,pc_r_d,cart_len);
HM_SAFE_CALL( cudaThreadSynchronize() );
HM_SAFE_CALL(cudaMemcpy(PC->pc_res,pc_res_d,mem_cart,cudaMemcpyDeviceToHost));
HM_SAFE_CALL( cudaThreadSynchronize() );
cudaUnbindTexture(&texPC);
ippsZero_32f(matp->rec,PC->OutputHeight * PC->OutputWidth);
int sp;
sp = 0;
for(int j=0;j<PC->pc_nc;j++){
ippsCopy_32f(PC->pc_res+sp,matp->rec+PC->pc_pos[j],PC->pc_size[j]);
sp+=(PC->pc_size[j]);
}
HM_SAFE_CALL(cudaFree(vecv_d)); vecv_d = NULL;
HM_SAFE_CALL(cudaFree(pc_r_d)); pc_r_d = NULL;
HM_SAFE_CALL(cudaFree(pc_a_d)); pc_a_d = NULL;
HM_SAFE_CALL(cudaFree(pc_res_d)); pc_res_d = NULL;
return true;
}
|
f56a53417ebb92f2ef50f4a204558d3a632e4a3f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "uplo_ramp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
uplo_ramp), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
uplo_ramp), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
uplo_ramp), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f56a53417ebb92f2ef50f4a204558d3a632e4a3f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "uplo_ramp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
uplo_ramp<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
uplo_ramp<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
uplo_ramp<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c9c0c26588d1e7599cb7068debf8cb8a7ebf9061.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in1[tid] + in2[tid];
} | c9c0c26588d1e7599cb7068debf8cb8a7ebf9061.cu | #include "includes.h"
__global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in1[tid] + in2[tid];
} |
aa29e63302dcf4b3cfaa5a4f64c003d57ed89aa4.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using matched weigts for CBCT
*
*
* CODE by Ander Biguri
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "voxel_backprojection.hpp"
#include "voxel_backprojection2.hpp"
#include "mex.h"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, hipTextureType3D , hipReadModeElementType> tex;
__global__ void matrixConstantMultiply(const Geometry geo,float* image,float constant){
unsigned long long idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) {
image[idx]*=constant;
}
}
// Using Matched weigths
__global__ void kernelPixelBackprojection(const Geometry geo,
float* image,
const int indAlpha,
const Point3D deltaX ,
const Point3D deltaY,
const Point3D deltaZ,
const Point3D xyzOrigin,
const Point3D xyzOffset, // this is a direct copy, it has not been scaled
const Point3D uv0Offset, // This is a direct copy, it has not been scaled
const float sinalpha,
const float cosalpha){
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long indZ = blockIdx.z * blockDim.z + threadIdx.z;
//Make sure we dont go out of bounds
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |indZ>=geo.nVoxelZ)
return;
// Geometric trasnformations:
//Source, scaled XYZ coordinates
Point3D S;
S.x=geo.DSO; // we dont scale the x direction, because the detecros is only in YZ (and the image is rotated)
S.y=-uv0Offset.x/geo.dDetecU;
S.z=-uv0Offset.y/geo.dDetecV;
// "XYZ" in the scaled coordinate system of the current point. The iamge is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-geo.COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(geo.DSO-geo.DSD /*-DDO*/ - S.x)/vectX;
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+geo.nDetecU/2-0.5;
v=z+geo.nDetecV/2-0.5;
// TODO: put this in a separate kernel?
// Compute the weigth of the matched backprojection , as in doi: 10.1088/0031-9155/56/13/004, eq (3)
float weigth;
//Real coordinates of Voxel. Instead of reverting the tranformation, its less math (faster) to compute it from the indexes.
Point3D realvoxel;
realvoxel.x=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x;
realvoxel.y=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y;
realvoxel.z=-geo.sVoxelZ/2+geo.dVoxelZ/2 +indZ*geo.dVoxelZ +xyzOffset.z;
//Real coords of Source
// We already have S.x, and S.y and S.z are always zero. we just need to rotate
S.x= geo.DSO*cosalpha;
S.y=-geo.DSO*sinalpha;
// Real XYZ coordinates of Detector.
Point3D realD, realDaux;
// We know the index of the detector (u,v). Start from there.
realDaux.x=-(geo.DSD-geo.DSO);
realDaux.y=-geo.sDetecU/2+geo.dDetecU/2 + u*geo.dDetecU +uv0Offset.x;
realD.z =-geo.sDetecV/2+geo.dDetecV/2 + v*geo.dDetecV +uv0Offset.y;
//rotate the detector
realD.x= realDaux.x*cosalpha + realDaux.y*sinalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
realD.y=-realDaux.x*sinalpha + realDaux.y*cosalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
float L,l;
L = sqrt( (S.x-realD.x)*(S.x-realD.x)+ (S.y-realD.y)*(S.y-realD.y)+ (realD.z)*(realD.z)); // Sz=0 always.
l = sqrt( (S.x-realvoxel.x)*(S.x-realvoxel.x)+ (S.y-realvoxel.y)*(S.y-realvoxel.y)+ (S.z-realvoxel.z)*(S.z-realvoxel.z));
weigth=L*L*L/(geo.DSD*l*l);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
image[idx]+=tex3D(tex, u +0.5 ,
v +0.5 ,
indAlpha+0.5)
*weigth;
}
int voxel_backprojection2(float const * const projections, Geometry geo, float* result,float const * const alphas,int nalpha){
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
hipArray *d_projectiondata = 0;
const hipExtent extent = make_hipExtent(geo.nDetecU,geo.nDetecV,nalpha);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_projectiondata, &channelDesc, extent);
cudaCheckErrors("hipMalloc3D error 3D tex");
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
cudaCheckErrors("hipMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = hipFilterModeLinear;
tex.addressMode[0] = hipAddressModeBorder;
tex.addressMode[1] = hipAddressModeBorder;
tex.addressMode[2] = hipAddressModeBorder;
hipBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
hipMalloc((void**)&dimage, num_bytes);
hipMemset(dimage,0,num_bytes);
cudaCheckErrors("hipMalloc fail");
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec;
// time the kernel?
bool timekernel=false;
hipEvent_t start, stop;
float elapsedTime;
if (timekernel){
hipEventCreate(&start);
hipEventRecord(start,0);
}
int divx,divy,divz;
divx=10;
divy=10;
divz=10;
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,divz);
// Main loop
float sinalpha, cosalpha;
for (unsigned int i=0;i<nalpha;i++){
geo.alpha=-alphas[i];
sinalpha=sin(geo.alpha);
cosalpha=cos(geo.alpha);
computeDeltasCube(geo,geo.alpha,i,&xyzOrigin,&deltaX,&deltaY,&deltaZ);
offOrig.x=geo.offOrigX[i];
offOrig.y=geo.offOrigY[i];
offOrig.z=geo.offOrigZ[i];
offDetec.x=geo.offDetecU[i];
offDetec.y=geo.offDetecV[i];
hipLaunchKernelGGL(( kernelPixelBackprojection), dim3(grid),dim3(block), 0, 0,
geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha);
cudaCheckErrors("Kernel fail");
}
// If we are timing this
if (timekernel){
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
mexPrintf("%f\n" ,elapsedTime);
}
//in a Tesla, maximum blocks =15 SM * 4 blocks/SM=60
hipLaunchKernelGGL(( matrixConstantMultiply), dim3(60),dim3(MAXTREADS), 0, 0, geo,dimage,geo.dVoxelX*geo.dVoxelY*geo.dVoxelZ/(geo.dDetecU*geo.dDetecV));
hipMemcpy(result, dimage, num_bytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy fail");
hipUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
hipFree(dimage);
hipFreeArray(d_projectiondata);
cudaCheckErrors("hipFree d_imagedata fail");
//hipDeviceReset();
return 0;
}
#ifndef BACKPROJECTION_HPP
void computeDeltasCube(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ){
Point3D P0, Px0,Py0,Pz0;
// Get coords of Img(0,0,0)
P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x;
Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y;
Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ;
// Rotate image in the opposite direction of what the detector would rotate. We will keep detector still while the image
// changes to accomodate any needed geometric transformation.
Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values!
P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z;
Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z;
Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z;
Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z;
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
// Compute unit vector of change between voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
// Detector offset is encoded in the image.
P.z =P.z-geo.offDetecV[i]/geo.dDetecV;
P.y =P.y-geo.offDetecU[i]/geo.dDetecU;
*xyzorigin=P;
}
#endif | aa29e63302dcf4b3cfaa5a4f64c003d57ed89aa4.cu | /*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using matched weigts for CBCT
*
*
* CODE by Ander Biguri
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "voxel_backprojection.hpp"
#include "voxel_backprojection2.hpp"
#include "mex.h"
#include <math.h>
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
texture<float, cudaTextureType3D , cudaReadModeElementType> tex;
__global__ void matrixConstantMultiply(const Geometry geo,float* image,float constant){
unsigned long long idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) {
image[idx]*=constant;
}
}
// Using Matched weigths
__global__ void kernelPixelBackprojection(const Geometry geo,
float* image,
const int indAlpha,
const Point3D deltaX ,
const Point3D deltaY,
const Point3D deltaZ,
const Point3D xyzOrigin,
const Point3D xyzOffset, // this is a direct copy, it has not been scaled
const Point3D uv0Offset, // This is a direct copy, it has not been scaled
const float sinalpha,
const float cosalpha){
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long indZ = blockIdx.z * blockDim.z + threadIdx.z;
//Make sure we dont go out of bounds
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |indZ>=geo.nVoxelZ)
return;
// Geometric trasnformations:
//Source, scaled XYZ coordinates
Point3D S;
S.x=geo.DSO; // we dont scale the x direction, because the detecros is only in YZ (and the image is rotated)
S.y=-uv0Offset.x/geo.dDetecU;
S.z=-uv0Offset.y/geo.dDetecV;
// "XYZ" in the scaled coordinate system of the current point. The iamge is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-geo.COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=(geo.DSO-geo.DSD /*-DDO*/ - S.x)/vectX;
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+geo.nDetecU/2-0.5;
v=z+geo.nDetecV/2-0.5;
// TODO: put this in a separate kernel?
// Compute the weigth of the matched backprojection , as in doi: 10.1088/0031-9155/56/13/004, eq (3)
float weigth;
//Real coordinates of Voxel. Instead of reverting the tranformation, its less math (faster) to compute it from the indexes.
Point3D realvoxel;
realvoxel.x=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x;
realvoxel.y=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y;
realvoxel.z=-geo.sVoxelZ/2+geo.dVoxelZ/2 +indZ*geo.dVoxelZ +xyzOffset.z;
//Real coords of Source
// We already have S.x, and S.y and S.z are always zero. we just need to rotate
S.x= geo.DSO*cosalpha;
S.y=-geo.DSO*sinalpha;
// Real XYZ coordinates of Detector.
Point3D realD, realDaux;
// We know the index of the detector (u,v). Start from there.
realDaux.x=-(geo.DSD-geo.DSO);
realDaux.y=-geo.sDetecU/2+geo.dDetecU/2 + u*geo.dDetecU +uv0Offset.x;
realD.z =-geo.sDetecV/2+geo.dDetecV/2 + v*geo.dDetecV +uv0Offset.y;
//rotate the detector
realD.x= realDaux.x*cosalpha + realDaux.y*sinalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
realD.y=-realDaux.x*sinalpha + realDaux.y*cosalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
float L,l;
L = sqrt( (S.x-realD.x)*(S.x-realD.x)+ (S.y-realD.y)*(S.y-realD.y)+ (realD.z)*(realD.z)); // Sz=0 always.
l = sqrt( (S.x-realvoxel.x)*(S.x-realvoxel.x)+ (S.y-realvoxel.y)*(S.y-realvoxel.y)+ (S.z-realvoxel.z)*(S.z-realvoxel.z));
weigth=L*L*L/(geo.DSD*l*l);
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
image[idx]+=tex3D(tex, u +0.5 ,
v +0.5 ,
indAlpha+0.5)
*weigth;
}
int voxel_backprojection2(float const * const projections, Geometry geo, float* result,float const * const alphas,int nalpha){
/*
* Allocate texture memory on the device
*/
// copy data to CUDA memory
cudaArray *d_projectiondata = 0;
const cudaExtent extent = make_cudaExtent(geo.nDetecU,geo.nDetecV,nalpha);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_projectiondata, &channelDesc, extent);
cudaCheckErrors("cudaMalloc3D error 3D tex");
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_projectiondata;
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
cudaCheckErrors("cudaMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = cudaFilterModeLinear;
tex.addressMode[0] = cudaAddressModeBorder;
tex.addressMode[1] = cudaAddressModeBorder;
tex.addressMode[2] = cudaAddressModeBorder;
cudaBindTextureToArray(tex, d_projectiondata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
// Allocate result image memory
size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float);
float* dimage;
cudaMalloc((void**)&dimage, num_bytes);
cudaMemset(dimage,0,num_bytes);
cudaCheckErrors("cudaMalloc fail");
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec;
// time the kernel?
bool timekernel=false;
cudaEvent_t start, stop;
float elapsedTime;
if (timekernel){
cudaEventCreate(&start);
cudaEventRecord(start,0);
}
int divx,divy,divz;
divx=10;
divy=10;
divz=10;
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geo.nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,divz);
// Main loop
float sinalpha, cosalpha;
for (unsigned int i=0;i<nalpha;i++){
geo.alpha=-alphas[i];
sinalpha=sin(geo.alpha);
cosalpha=cos(geo.alpha);
computeDeltasCube(geo,geo.alpha,i,&xyzOrigin,&deltaX,&deltaY,&deltaZ);
offOrig.x=geo.offOrigX[i];
offOrig.y=geo.offOrigY[i];
offOrig.z=geo.offOrigZ[i];
offDetec.x=geo.offDetecU[i];
offDetec.y=geo.offDetecV[i];
kernelPixelBackprojection<<<grid,block>>>
(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha);
cudaCheckErrors("Kernel fail");
}
// If we are timing this
if (timekernel){
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
mexPrintf("%f\n" ,elapsedTime);
}
//in a Tesla, maximum blocks =15 SM * 4 blocks/SM=60
matrixConstantMultiply<<<60,MAXTREADS>>>( geo,dimage,geo.dVoxelX*geo.dVoxelY*geo.dVoxelZ/(geo.dDetecU*geo.dDetecV));
cudaMemcpy(result, dimage, num_bytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy fail");
cudaUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
cudaFree(dimage);
cudaFreeArray(d_projectiondata);
cudaCheckErrors("cudaFree d_imagedata fail");
//cudaDeviceReset();
return 0;
}
#ifndef BACKPROJECTION_HPP
void computeDeltasCube(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ){
Point3D P0, Px0,Py0,Pz0;
// Get coords of Img(0,0,0)
P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x;
Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y;
Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ;
// Rotate image in the opposite direction of what the detector would rotate. We will keep detector still while the image
// changes to accomodate any needed geometric transformation.
Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values!
P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z;
Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z;
Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z;
Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z;
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
// Compute unit vector of change between voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
// Detector offset is encoded in the image.
P.z =P.z-geo.offDetecV[i]/geo.dDetecV;
P.y =P.y-geo.offDetecU[i]/geo.dDetecU;
*xyzorigin=P;
}
#endif |
a5e09fd13b1c2d4b62d5b08446220c1ae4c35c62.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <random>
#include <hip/hip_runtime.h>
#include "reference.h"
#define GPU_THREADS 256
#define KERNEL_LOOP(index, range) \
for (int index = blockIdx.x * blockDim.x + threadIdx.x; \
index < (range); index += blockDim.x * gridDim.x)
template <typename T>
__global__
void SwishKernel(const int N, const T* X, T* Y)
{
KERNEL_LOOP(i, N) {
Y[i] = __ldg(X + i) / (T(1) + exp(-__ldg(X + i)));
}
}
template <typename T>
__global__
void SwishGradientKernel(
const int N,
const T* X,
const T* Y,
const T* dY,
T* dX)
{
KERNEL_LOOP(i, N) {
dX[i] = __ldg(dY + i) *
(__ldg(Y + i) + (T(1) - __ldg(Y + i)) / (T(1) + exp(-__ldg(X + i))));
}
}
template<typename T>
void eval_swish (const int N, const int repeat) {
size_t size_bytes = N * sizeof(T);
T *h_X = (T*) malloc (size_bytes);
T *h_Y = (T*) malloc (size_bytes);
T *h_dY = (T*) malloc (size_bytes);
T *h_dX = (T*) malloc (size_bytes);
T *r_Y = (T*) malloc (size_bytes);
T *r_dX = (T*) malloc (size_bytes);
std::default_random_engine gen (123);
std::uniform_real_distribution<float> distr (-2.f, 2.f);
for (int i = 0; i < N; i++) {
h_X[i] = distr(gen);
h_dY[i] = distr(gen);
}
T *d_X, *d_Y, *d_dX, *d_dY;
hipMalloc((void**)&d_X, size_bytes);
hipMemcpy(d_X, h_X, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_Y, size_bytes);
hipMalloc((void**)&d_dY, size_bytes);
hipMemcpy(d_dY, h_dY, size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_dX, size_bytes);
dim3 grid ((N + GPU_THREADS - 1) / GPU_THREADS);
dim3 block (GPU_THREADS);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( SwishKernel) , dim3(grid), dim3(block), 0, 0, N, d_X, d_Y);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of Swish kernel: %f (us)\n", (time * 1e-3f) / repeat);
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
hipLaunchKernelGGL(( SwishGradientKernel) , dim3(grid), dim3(block), 0, 0, N, d_X, d_Y, d_dY, d_dX);
hipDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of SwishGradient kernel: %f (us)\n", (time * 1e-3f) / repeat);
// verify
hipMemcpy(h_dX, d_dX, size_bytes, hipMemcpyDeviceToHost);
hipMemcpy(h_Y, d_Y, size_bytes, hipMemcpyDeviceToHost);
reference (N, h_X, r_Y, r_dX, h_dY);
bool ok = true;
for (int i = 0; i < N; i++) {
if (fabs(h_dX[i] - r_dX[i]) > 1e-3 || fabs(h_Y[i] - r_Y[i]) > 1e-3) {
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
hipFree(d_X);
hipFree(d_Y);
hipFree(d_dX);
hipFree(d_dY);
free(h_X);
free(h_Y);
free(h_dX);
free(h_dY);
free(r_dX);
free(r_Y);
}
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Usage: %s <number of elements> <repeat>\n", argv[0]);
return 1;
}
const int N = atoi(argv[1]);
const int repeat = atoi(argv[2]);
eval_swish<float>(N, repeat);
return 0;
}
| a5e09fd13b1c2d4b62d5b08446220c1ae4c35c62.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <random>
#include <cuda.h>
#include "reference.h"
#define GPU_THREADS 256
#define KERNEL_LOOP(index, range) \
for (int index = blockIdx.x * blockDim.x + threadIdx.x; \
index < (range); index += blockDim.x * gridDim.x)
template <typename T>
__global__
void SwishKernel(const int N, const T* X, T* Y)
{
KERNEL_LOOP(i, N) {
Y[i] = __ldg(X + i) / (T(1) + exp(-__ldg(X + i)));
}
}
template <typename T>
__global__
void SwishGradientKernel(
const int N,
const T* X,
const T* Y,
const T* dY,
T* dX)
{
KERNEL_LOOP(i, N) {
dX[i] = __ldg(dY + i) *
(__ldg(Y + i) + (T(1) - __ldg(Y + i)) / (T(1) + exp(-__ldg(X + i))));
}
}
template<typename T>
void eval_swish (const int N, const int repeat) {
size_t size_bytes = N * sizeof(T);
T *h_X = (T*) malloc (size_bytes);
T *h_Y = (T*) malloc (size_bytes);
T *h_dY = (T*) malloc (size_bytes);
T *h_dX = (T*) malloc (size_bytes);
T *r_Y = (T*) malloc (size_bytes);
T *r_dX = (T*) malloc (size_bytes);
std::default_random_engine gen (123);
std::uniform_real_distribution<float> distr (-2.f, 2.f);
for (int i = 0; i < N; i++) {
h_X[i] = distr(gen);
h_dY[i] = distr(gen);
}
T *d_X, *d_Y, *d_dX, *d_dY;
cudaMalloc((void**)&d_X, size_bytes);
cudaMemcpy(d_X, h_X, size_bytes, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_Y, size_bytes);
cudaMalloc((void**)&d_dY, size_bytes);
cudaMemcpy(d_dY, h_dY, size_bytes, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_dX, size_bytes);
dim3 grid ((N + GPU_THREADS - 1) / GPU_THREADS);
dim3 block (GPU_THREADS);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
SwishKernel <<<grid, block>>> (N, d_X, d_Y);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of Swish kernel: %f (us)\n", (time * 1e-3f) / repeat);
start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++)
SwishGradientKernel <<<grid, block>>> (N, d_X, d_Y, d_dY, d_dX);
cudaDeviceSynchronize();
end = std::chrono::steady_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of SwishGradient kernel: %f (us)\n", (time * 1e-3f) / repeat);
// verify
cudaMemcpy(h_dX, d_dX, size_bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_Y, d_Y, size_bytes, cudaMemcpyDeviceToHost);
reference (N, h_X, r_Y, r_dX, h_dY);
bool ok = true;
for (int i = 0; i < N; i++) {
if (fabs(h_dX[i] - r_dX[i]) > 1e-3 || fabs(h_Y[i] - r_Y[i]) > 1e-3) {
ok = false;
break;
}
}
printf("%s\n", ok ? "PASS" : "FAIL");
cudaFree(d_X);
cudaFree(d_Y);
cudaFree(d_dX);
cudaFree(d_dY);
free(h_X);
free(h_Y);
free(h_dX);
free(h_dY);
free(r_dX);
free(r_Y);
}
int main(int argc, char* argv[])
{
if (argc != 3) {
printf("Usage: %s <number of elements> <repeat>\n", argv[0]);
return 1;
}
const int N = atoi(argv[1]);
const int repeat = atoi(argv[2]);
eval_swish<float>(N, repeat);
return 0;
}
|
73debecf0c5a59b7f10c06ce0b0a49ce50d3163b.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/weight_only_linear_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/datatype_traits.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/weight_only_gemv.h"
#if defined(PADDLE_WITH_CUTLASS)
#include "paddle/phi/kernels/fusion/cutlass/cutlass_kernels/fpA_intB_gemm/fpA_intB_gemm_template.h"
#endif
namespace phi {
template <typename T, typename Context>
void WeightOnlyLinearKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& weight,
const paddle::optional<DenseTensor>& bias,
const DenseTensor& weight_scale,
const std::string& weight_dtype,
DenseTensor* out) {
dev_ctx.template Alloc<T>(out);
const T* x_data = x.data<T>();
const int8_t* weight_data = weight.data<int8_t>();
const T* bias_data = bias ? bias.get().data<T>() : nullptr;
const float* weight_scale_data = weight_scale.data<float>();
T* out_data = out->data<T>();
const auto x_dims = x.dims();
const auto w_dims = weight.dims();
int n = weight_scale.dims()[0];
int k = w_dims[1];
int m = x.numel() / k;
// m > 1: run gemm
if (m > 1 || weight_dtype == "int4") {
#if defined(PADDLE_WITH_CUTLASS)
if (weight_dtype == "int8") {
auto mixed_gemm_runner =
CutlassFpAIntBGemmRunner<typename PDDataTypeTraits<T>::DataType,
uint8_t>();
int mixgemm_max_size = ::max(m, k);
DenseTensor mixgemm_workspace;
int64_t mixgemm_workspace_size_bytes = mixed_gemm_runner.getWorkspaceSize(
m, mixgemm_max_size, mixgemm_max_size);
mixgemm_workspace.Resize({mixgemm_workspace_size_bytes});
dev_ctx.template Alloc<uint8_t>(&mixgemm_workspace);
char* mixgemm_workspace_data =
reinterpret_cast<char*>(mixgemm_workspace.data<uint8_t>());
if (bias_data) {
mixed_gemm_runner.gemm_bias_act(
reinterpret_cast<const typename PDDataTypeTraits<T>::DataType*>(
x_data),
reinterpret_cast<const uint8_t*>(weight_data),
weight_scale_data,
reinterpret_cast<const typename PDDataTypeTraits<T>::DataType*>(
bias_data),
reinterpret_cast<typename PDDataTypeTraits<T>::DataType*>(out_data),
m,
n,
k,
"none",
mixgemm_workspace_data,
mixgemm_workspace_size_bytes,
dev_ctx.stream());
} else {
mixed_gemm_runner.gemm(
reinterpret_cast<const typename PDDataTypeTraits<T>::DataType*>(
x_data),
reinterpret_cast<const uint8_t*>(weight_data),
weight_scale_data,
reinterpret_cast<typename PDDataTypeTraits<T>::DataType*>(out_data),
m,
n,
k,
mixgemm_workspace_data,
mixgemm_workspace_size_bytes,
dev_ctx.stream());
}
} else {
auto mixed_gemm_runner =
CutlassFpAIntBGemmRunner<typename PDDataTypeTraits<T>::DataType,
cutlass::uint4b_t>();
int mixgemm_max_size = ::max(m, k);
DenseTensor mixgemm_workspace;
int64_t mixgemm_workspace_size_bytes = mixed_gemm_runner.getWorkspaceSize(
m, mixgemm_max_size, mixgemm_max_size);
mixgemm_workspace.Resize({mixgemm_workspace_size_bytes});
dev_ctx.template Alloc<uint8_t>(&mixgemm_workspace);
char* mixgemm_workspace_data =
reinterpret_cast<char*>(mixgemm_workspace.data<uint8_t>());
if (bias_data) {
mixed_gemm_runner.gemm_bias_act(
reinterpret_cast<const typename PDDataTypeTraits<T>::DataType*>(
x_data),
reinterpret_cast<const cutlass::uint4b_t*>(weight_data),
weight_scale_data,
reinterpret_cast<const typename PDDataTypeTraits<T>::DataType*>(
bias_data),
reinterpret_cast<typename PDDataTypeTraits<T>::DataType*>(out_data),
m,
n,
k,
"none",
mixgemm_workspace_data,
mixgemm_workspace_size_bytes,
dev_ctx.stream());
} else {
mixed_gemm_runner.gemm(
reinterpret_cast<const typename PDDataTypeTraits<T>::DataType*>(
x_data),
reinterpret_cast<const cutlass::uint4b_t*>(weight_data),
weight_scale_data,
reinterpret_cast<typename PDDataTypeTraits<T>::DataType*>(out_data),
m,
n,
k,
mixgemm_workspace_data,
mixgemm_workspace_size_bytes,
dev_ctx.stream());
}
}
#else
PADDLE_THROW(phi::errors::Unimplemented(
"Please compile with cutlass to make cutlass available"));
#endif
} else { // m == 1: gemv
if (weight_dtype == "int8") {
GemvWeightonlyInt8Wrapper<T, Context>(dev_ctx,
x_data,
weight_data,
bias_data,
weight_scale_data,
n,
k,
"None",
out->data<T>());
} // TODO(lizhenyun) support weight_only_gemv for int4.
}
}
} // namespace phi
PD_REGISTER_KERNEL(weight_only_linear,
GPU,
ALL_LAYOUT,
phi::WeightOnlyLinearKernel,
phi::dtype::float16,
phi::dtype::bfloat16) {}
| 73debecf0c5a59b7f10c06ce0b0a49ce50d3163b.cu | // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/weight_only_linear_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/datatype_traits.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/weight_only_gemv.h"
#if defined(PADDLE_WITH_CUTLASS)
#include "paddle/phi/kernels/fusion/cutlass/cutlass_kernels/fpA_intB_gemm/fpA_intB_gemm_template.h"
#endif
namespace phi {
template <typename T, typename Context>
void WeightOnlyLinearKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& weight,
const paddle::optional<DenseTensor>& bias,
const DenseTensor& weight_scale,
const std::string& weight_dtype,
DenseTensor* out) {
dev_ctx.template Alloc<T>(out);
const T* x_data = x.data<T>();
const int8_t* weight_data = weight.data<int8_t>();
const T* bias_data = bias ? bias.get().data<T>() : nullptr;
const float* weight_scale_data = weight_scale.data<float>();
T* out_data = out->data<T>();
const auto x_dims = x.dims();
const auto w_dims = weight.dims();
int n = weight_scale.dims()[0];
int k = w_dims[1];
int m = x.numel() / k;
// m > 1: run gemm
if (m > 1 || weight_dtype == "int4") {
#if defined(PADDLE_WITH_CUTLASS)
if (weight_dtype == "int8") {
auto mixed_gemm_runner =
CutlassFpAIntBGemmRunner<typename PDDataTypeTraits<T>::DataType,
uint8_t>();
int mixgemm_max_size = std::max(m, k);
DenseTensor mixgemm_workspace;
int64_t mixgemm_workspace_size_bytes = mixed_gemm_runner.getWorkspaceSize(
m, mixgemm_max_size, mixgemm_max_size);
mixgemm_workspace.Resize({mixgemm_workspace_size_bytes});
dev_ctx.template Alloc<uint8_t>(&mixgemm_workspace);
char* mixgemm_workspace_data =
reinterpret_cast<char*>(mixgemm_workspace.data<uint8_t>());
if (bias_data) {
mixed_gemm_runner.gemm_bias_act(
reinterpret_cast<const typename PDDataTypeTraits<T>::DataType*>(
x_data),
reinterpret_cast<const uint8_t*>(weight_data),
weight_scale_data,
reinterpret_cast<const typename PDDataTypeTraits<T>::DataType*>(
bias_data),
reinterpret_cast<typename PDDataTypeTraits<T>::DataType*>(out_data),
m,
n,
k,
"none",
mixgemm_workspace_data,
mixgemm_workspace_size_bytes,
dev_ctx.stream());
} else {
mixed_gemm_runner.gemm(
reinterpret_cast<const typename PDDataTypeTraits<T>::DataType*>(
x_data),
reinterpret_cast<const uint8_t*>(weight_data),
weight_scale_data,
reinterpret_cast<typename PDDataTypeTraits<T>::DataType*>(out_data),
m,
n,
k,
mixgemm_workspace_data,
mixgemm_workspace_size_bytes,
dev_ctx.stream());
}
} else {
auto mixed_gemm_runner =
CutlassFpAIntBGemmRunner<typename PDDataTypeTraits<T>::DataType,
cutlass::uint4b_t>();
int mixgemm_max_size = std::max(m, k);
DenseTensor mixgemm_workspace;
int64_t mixgemm_workspace_size_bytes = mixed_gemm_runner.getWorkspaceSize(
m, mixgemm_max_size, mixgemm_max_size);
mixgemm_workspace.Resize({mixgemm_workspace_size_bytes});
dev_ctx.template Alloc<uint8_t>(&mixgemm_workspace);
char* mixgemm_workspace_data =
reinterpret_cast<char*>(mixgemm_workspace.data<uint8_t>());
if (bias_data) {
mixed_gemm_runner.gemm_bias_act(
reinterpret_cast<const typename PDDataTypeTraits<T>::DataType*>(
x_data),
reinterpret_cast<const cutlass::uint4b_t*>(weight_data),
weight_scale_data,
reinterpret_cast<const typename PDDataTypeTraits<T>::DataType*>(
bias_data),
reinterpret_cast<typename PDDataTypeTraits<T>::DataType*>(out_data),
m,
n,
k,
"none",
mixgemm_workspace_data,
mixgemm_workspace_size_bytes,
dev_ctx.stream());
} else {
mixed_gemm_runner.gemm(
reinterpret_cast<const typename PDDataTypeTraits<T>::DataType*>(
x_data),
reinterpret_cast<const cutlass::uint4b_t*>(weight_data),
weight_scale_data,
reinterpret_cast<typename PDDataTypeTraits<T>::DataType*>(out_data),
m,
n,
k,
mixgemm_workspace_data,
mixgemm_workspace_size_bytes,
dev_ctx.stream());
}
}
#else
PADDLE_THROW(phi::errors::Unimplemented(
"Please compile with cutlass to make cutlass available"));
#endif
} else { // m == 1: gemv
if (weight_dtype == "int8") {
GemvWeightonlyInt8Wrapper<T, Context>(dev_ctx,
x_data,
weight_data,
bias_data,
weight_scale_data,
n,
k,
"None",
out->data<T>());
} // TODO(lizhenyun) support weight_only_gemv for int4.
}
}
} // namespace phi
PD_REGISTER_KERNEL(weight_only_linear,
GPU,
ALL_LAYOUT,
phi::WeightOnlyLinearKernel,
phi::dtype::float16,
phi::dtype::bfloat16) {}
|
a04d77411846d3f6d70ae86a1ba68270c124e499.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "voxel_backprojection2.hpp"
#include "TIGRE_common.hpp"
#include <math.h>
#include "GpuIds.hpp"
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
// this definitionmust go here.
void CreateTexture2(const GpuIds& gpuids, float* projectiondata,Geometry geo,hipArray** d_cuArrTex,unsigned int nangles, hipTextureObject_t *texImage,hipStream_t* stream,int nStreamDevice,bool allocate);
__global__ void matrixConstantMultiply(const Geometry geo,float* image,float constant){
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) {
image[idx]*=constant;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArray2Dev[7*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArray2Dev[5*PROJ_PER_KERNEL];
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojection(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections, hipTextureObject_t tex)
{
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we don't go out of bounds
if (indX>=geo.nVoxelX || indY>=geo.nVoxelY || startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
#pragma unroll
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArray2Dev[7*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArray2Dev[7*projNumber+1];
Point3D deltaZ = projParamsArray2Dev[7*projNumber+2];
Point3D xyzOrigin = projParamsArray2Dev[7*projNumber+3];
Point3D xyzOffset = projParamsArray2Dev[7*projNumber+4];
Point3D uv0Offset = projParamsArray2Dev[7*projNumber+5];
Point3D S = projParamsArray2Dev[7*projNumber+6];
float sinalpha = projSinCosArray2Dev[5*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float cosalpha = projSinCosArray2Dev[5*projNumber+1];
float COR = projSinCosArray2Dev[5*projNumber+2];
float DSD = projSinCosArray2Dev[5*projNumber+3];
float DSO = projSinCosArray2Dev[5*projNumber+4];
// Precomputations for the weights:
//Real coords of Source
// We already have S.x (geo.DSO), and S.y and S.z are always zero. we just need to rotate
Point3D realS;
realS.x= DSO*cosalpha;
realS.y=-DSO*sinalpha;
realS.z=0;
Point3D realvoxel_init;
realvoxel_init.x=-geo.sVoxelX/2+geo.dVoxelX/2+xyzOffset.x;
realvoxel_init.y=-geo.sVoxelY/2+geo.dVoxelY/2+xyzOffset.y;
realvoxel_init.z=-geo.sVoxelZ/2+geo.dVoxelZ/2+xyzOffset.z;
// Real XYZ coordinates of Detector.
Point3D realD, realDaux;
// We know the index of the detector (u,v). Start from there.
realDaux.x=-(DSD-DSO);
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=__fdividef(DSO-DSD-S.x,vectX);
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+(float)geo.nDetecU*0.5f;
v=z+(float)geo.nDetecV*0.5f;
#if IS_FOR_MATLAB_TIGRE
float sample=tex3D<float>(tex, v, u ,indAlpha+0.5f);
#else
float sample=tex3D<float>(tex, u, v ,indAlpha+0.5f);
#endif
float weigth=0;
//
//
//
// IMPORTANT: The weights are almost 50% of the computational time. Is there a way of speeding this up??
//
//Real coordinates of Voxel. Instead of reverting the tranformation, its less math (faster) to compute it from the indexes.
Point3D realvoxel;
realvoxel.x=realvoxel_init.x+indX*geo.dVoxelX;
realvoxel.y=realvoxel_init.y+indY*geo.dVoxelY;
realvoxel.z=realvoxel_init.z+indZ*geo.dVoxelZ;
realDaux.y=(-geo.sDetecU+geo.dDetecU)*0.5f + u*geo.dDetecU +uv0Offset.x;
realD.z =(-geo.sDetecV+geo.dDetecV)*0.5f + v*geo.dDetecV +uv0Offset.y;
//rotate the detector
realD.x= realDaux.x*cosalpha + realDaux.y*sinalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
realD.y=-realDaux.x*sinalpha + realDaux.y*cosalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
float L,lsq;
L = __fsqrt_rd( (realS.x-realD.x)*(realS.x-realD.x)+ (realS.y-realD.y)*(realS.y-realD.y)+ (realD.z)*(realD.z)); // Sz=0 always.
lsq = (realS.x-realvoxel.x)*(realS.x-realvoxel.x)
+ (realS.y-realvoxel.y)*(realS.y-realvoxel.y)
+ (realS.z-realvoxel.z)*(realS.z-realvoxel.z);
weigth=__fdividef(L*L*L,(DSD*lsq));
// weigth=1;
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=sample* weigth;
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection2(float * projections, Geometry geo, float* result,float const * const alphas, int nalpha, const GpuIds& gpuids){
// Prepare for MultiGPU
int deviceCount = gpuids.GetLength();
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("Atb:Voxel_backprojection:GPUselect","There are no available device(s) that support CUDA\n");
}
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
// Check the available devices, and if they are the same
if (!gpuids.AreEqualDevices()) {
mexWarnMsgIdAndTxt("Atb:Voxel_backprojection2:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed.");
}
int dev;
// Split the CT problem
unsigned int split_image;
unsigned int split_projections;
splitCTbackprojection(gpuids,geo,nalpha,&split_image,&split_projections);
// Create the arrays for the geometry. The main difference is that geo.offZ has been tuned for the
// image slices. The rest of the Geometry is the same
Geometry* geoArray=(Geometry*)malloc(split_image*deviceCount*sizeof(Geometry));
createGeoArray(split_image*deviceCount,geo,geoArray,nalpha);
// Now lest allocate all the image memory on the GPU, so we can use it later. If we have made our numbers correctly
// in the previous section this should leave enough space for the textures.
size_t num_bytes_img = (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ* sizeof(float);
float** dimage=(float**)malloc(deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipMalloc((void**)&dimage[dev], num_bytes_img);
cudaCheckErrors("hipMalloc fail");
}
//Pagelock memory for synchronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported = 0;
#if CUDART_VERSION >= 9020
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,gpuids[0]);
#endif
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big.
if (isHostRegisterSupported & split_image>1){
hipHostRegister(result, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),hipHostRegisterPortable);
}
if (isHostRegisterSupported ){
hipHostRegister(projections, (size_t)geo.nDetecU*(size_t)geo.nDetecV*(size_t)nalpha*(size_t)sizeof(float),hipHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
//If it is the first time, lets make sure our image is zeroed.
int nStreamDevice=2;
int nStreams=deviceCount*nStreamDevice;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));;
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
for (int i = 0; i < nStreamDevice; ++i){
hipStreamCreate(&stream[i+dev*nStreamDevice]);
}
}
// Kernel auxiliary variables
Point3D* projParamsArray2Host;
hipHostMalloc((void**)&projParamsArray2Host,7*PROJ_PER_KERNEL*sizeof(Point3D));
float* projSinCosArray2Host;
hipHostMalloc((void**)&projSinCosArray2Host,5*PROJ_PER_KERNEL*sizeof(float));
// Texture object variables
hipTextureObject_t *texProj;
hipArray **d_cuArrTex;
texProj =(hipTextureObject_t*)malloc(deviceCount*2*sizeof(hipTextureObject_t));
d_cuArrTex =(hipArray**)malloc(deviceCount*2*sizeof(hipArray*));
unsigned int proj_split_overlap_number;
// Start with the main loop. The Projection data needs to be allocated and dealocated in the main loop
// as due to the nature of cudaArrays, we can not reuse them. This should not be a problem for the fast execution
// of the code, as repeated allocation and deallocation only happens when the projection data is very very big,
// and therefore allcoation time should be negligible, fluctuation of other computations should mask the time.
unsigned long long proj_linear_idx_start;
unsigned int current_proj_split_size,current_proj_overlap_split_size;
size_t num_bytes_img_curr;
size_t img_linear_idx_start;
float** partial_projection;
size_t* proj_split_size;
for(unsigned int img_slice=0;img_slice<split_image;img_slice++){
//
// Initialize the memory if its the first time.
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipMemset(dimage[dev],0,num_bytes_img);
cudaCheckErrors("memset fail");
}
for( unsigned int proj=0;proj<split_projections;proj++){
// What is the size of the current chunk of proejctions we need in?
current_proj_split_size=(nalpha+split_projections-1)/split_projections;
// if its the last one its probably less
current_proj_split_size=((proj+1)*current_proj_split_size<nalpha)? current_proj_split_size: nalpha-current_proj_split_size*proj;
// We are going to split it in the same amount of kernels we need to execute.
proj_split_overlap_number=(current_proj_split_size+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL;
// Create pointer to pointers of projections and precompute their location and size.
if(!proj && !img_slice){
partial_projection=(float**)malloc(current_proj_split_size*sizeof(float*));
proj_split_size=(size_t*)malloc(current_proj_split_size*sizeof(size_t*));
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Crop the last one, as its likely its not completely divisible.
// now lets split this for simultanoeus memcopy and compute.
// We want to make sure that if we can, we run PROJ_PER_KERNEL projections, to maximize kernel acceleration
// current_proj_overlap_split_size units = angles
current_proj_overlap_split_size=max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL);
current_proj_overlap_split_size=(proj_block_split<proj_split_overlap_number-1)?current_proj_overlap_split_size:current_proj_split_size-(proj_split_overlap_number-1)*current_proj_overlap_split_size;
//Get the linear index where the current memory chunk starts.
proj_linear_idx_start=(unsigned long long)((nalpha+split_projections-1)/split_projections)*(unsigned long long)proj*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
proj_linear_idx_start+=proj_block_split*max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL)*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
//Store result
proj_split_size[proj_block_split]=current_proj_overlap_split_size;
partial_projection[proj_block_split]=&projections[proj_linear_idx_start];
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Now get the projections on memory
CreateTexture2(gpuids,
partial_projection[proj_block_split],geo,
&d_cuArrTex[(proj_block_split%2)*deviceCount],
proj_split_size[proj_block_split],
&texProj [(proj_block_split%2)*deviceCount],
stream, nStreamDevice,
(proj_block_split<2)&!proj&!img_slice);// Only allocate if its the first 2 calls
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipStreamSynchronize(stream[dev*nStreamDevice+1]);
}
for (dev = 0; dev < deviceCount; dev++){
//Safety:
// Depends on the amount of GPUs, the case where a image slice is zero hight can happen.
// Just break the loop if we reached that point
if(geoArray[img_slice*deviceCount+dev].nVoxelZ==0)
break;
hipSetDevice(gpuids[dev]);
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geoArray[img_slice*deviceCount+dev].nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
unsigned int noOfKernelCalls = (proj_split_size[proj_block_split]+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++){
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
unsigned int j;
for(j=0; j<PROJ_PER_KERNEL; j++){
unsigned int currProjNumber_slice=i*PROJ_PER_KERNEL+j;
unsigned int currProjNumber_global=i*PROJ_PER_KERNEL+j // index within kernel
+proj*(nalpha+split_projections-1)/split_projections // index of the global projection split
+proj_block_split*max(current_proj_split_size/proj_split_overlap_number,PROJ_PER_KERNEL); // indexof overlap current split
if(currProjNumber_slice>=proj_split_size[proj_block_split])
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
if(currProjNumber_global>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
float sinalpha,cosalpha;
geoArray[img_slice*deviceCount+dev].alpha=-alphas[currProjNumber_global*3];//we got 3 angles now.
geoArray[img_slice*deviceCount+dev].theta=-alphas[currProjNumber_global*3+1];
geoArray[img_slice*deviceCount+dev].psi =-alphas[currProjNumber_global*3+2];
sinalpha=sin(geoArray[img_slice*deviceCount+dev].alpha);
cosalpha=cos(geoArray[img_slice*deviceCount+dev].alpha);
projSinCosArray2Host[5*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection
projSinCosArray2Host[5*j+1]=cosalpha;
projSinCosArray2Host[5*j+2]=geo.COR[currProjNumber_global];
projSinCosArray2Host[5*j+3]=geo.DSD[currProjNumber_global];
projSinCosArray2Host[5*j+4]=geo.DSO[currProjNumber_global];
computeDeltasCube(geoArray[img_slice*deviceCount+dev],currProjNumber_global,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber_global];
offOrig.y=geo.offOrigY[currProjNumber_global];
offOrig.z=geoArray[img_slice*deviceCount+dev].offOrigZ[currProjNumber_global];
offDetec.x=geo.offDetecU[currProjNumber_global];
offDetec.y=geo.offDetecV[currProjNumber_global];
offDetec.z=0;//unused
projParamsArray2Host[7*j] =deltaX; // 7*j because we have 7 Point3D values per projection
projParamsArray2Host[7*j+1]=deltaY;
projParamsArray2Host[7*j+2]=deltaZ;
projParamsArray2Host[7*j+3]=xyzOrigin;
projParamsArray2Host[7*j+4]=offOrig;
projParamsArray2Host[7*j+5]=offDetec;
projParamsArray2Host[7*j+6]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
hipMemcpyToSymbolAsync(projSinCosArray2Dev, projSinCosArray2Host, sizeof(float)*5*PROJ_PER_KERNEL,0,hipMemcpyHostToDevice,stream[dev*nStreamDevice]);
hipMemcpyToSymbolAsync(projParamsArray2Dev, projParamsArray2Host, sizeof(Point3D)*7*PROJ_PER_KERNEL,0,hipMemcpyHostToDevice,stream[dev*nStreamDevice]);
hipStreamSynchronize(stream[dev*nStreamDevice]);
hipLaunchKernelGGL(( kernelPixelBackprojection), dim3(grid),dim3(block),0,stream[dev*nStreamDevice], geoArray[img_slice*deviceCount+dev],dimage[dev],i,proj_split_size[proj_block_split],texProj[(proj_block_split%2)*deviceCount+dev]);
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END RB code, Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
}
} // END sub-split of current projection chunk
} // END projection splits
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipLaunchKernelGGL(( matrixConstantMultiply), dim3(60),dim3(MAXTREADS),0,stream[dev*nStreamDevice], geoArray[img_slice*deviceCount+dev],dimage[dev],geo.dVoxelX*geo.dVoxelY*geo.dVoxelZ/(geo.dDetecU*geo.dDetecV));
}
// Now we need to take the image out of the GPU
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipStreamSynchronize(stream[dev*nStreamDevice]);
num_bytes_img_curr=(size_t)geoArray[img_slice*deviceCount+dev].nVoxelX*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelY*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelZ*sizeof(float);
img_linear_idx_start=(size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ*(size_t)(img_slice*deviceCount+dev);
hipMemcpyAsync(&result[img_linear_idx_start], dimage[dev], num_bytes_img_curr, hipMemcpyDeviceToHost,stream[dev*nStreamDevice+1]);
}
} // end image splits
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipDeviceSynchronize();
}
// Clean the GPU
bool two_buffers_used=((((nalpha+split_projections-1)/split_projections)+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL)>1;
for(unsigned int i=0; i<2;i++){ // 2 buffers (if needed, maybe only 1)
if (!two_buffers_used && i==1)
break; for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipDestroyTextureObject(texProj[i*deviceCount+dev]);
hipFreeArray(d_cuArrTex[i*deviceCount+dev]);
}
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipFree(dimage[dev]);
}
hipHostFree(projSinCosArray2Host);
hipHostFree(projParamsArray2Host);
free(partial_projection);
free(proj_split_size);
freeGeoArray(split_image*deviceCount,geoArray);
if (isHostRegisterSupported & split_image>1){
hipHostUnregister(result);
}
if (isHostRegisterSupported){
hipHostUnregister(projections);
}
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]);
cudaCheckErrors("hipFree fail");
// hipDeviceReset(); // For the Nvidia Visual Profiler
return 0;
} // END voxel_backprojection
void CreateTexture2(const GpuIds& gpuids, float* projectiondata,Geometry geo,hipArray** d_cuArrTex,unsigned int nangles, hipTextureObject_t *texImage,hipStream_t* stream,int nStreamDevice,bool allocate){
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
int num_devices = gpuids.GetLength();
#if IS_FOR_MATLAB_TIGRE
const hipExtent extent =make_hipExtent(geo.nDetecV, geo.nDetecU, nangles);
#else
const hipExtent extent =make_hipExtent(geo.nDetecU, geo.nDetecV, nangles);
#endif
if (allocate){
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(gpuids[dev]);
//hipArray Descriptor
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
//cuda Array
hipMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent);
}
}
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(gpuids[dev]);
hipMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_hipPitchedPtr((void *)projectiondata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[dev];
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3DAsync(©Params,stream[dev*nStreamDevice+1]);
}
//Array creation End
for (unsigned int dev = 0; dev < num_devices; dev++){
hipSetDevice(gpuids[dev]);
hipResourceDesc texRes;
memset(&texRes, 0, sizeof(hipResourceDesc));
texRes.resType = hipResourceTypeArray;
texRes.res.array.array = d_cuArrTex[dev];
hipTextureDesc texDescr;
memset(&texDescr, 0, sizeof(hipTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = hipFilterModeLinear;
texDescr.addressMode[0] = hipAddressModeBorder;
texDescr.addressMode[1] = hipAddressModeBorder;
texDescr.addressMode[2] = hipAddressModeBorder;
texDescr.readMode = hipReadModeElementType;
hipCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL);
}
}
#ifndef BACKPROJECTION_HPP
void splitCTbackprojection(const GpuIds& gpuids, Geometry geo,int nalpha, unsigned int* split_image, unsigned int * split_projections){
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(gpuids, &mem_GPU_global);
const int deviceCount = gpuids.GetLength();
// Compute how much memory each of the relevant memory pieces need
size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float);
// Does everything fit in the GPU?
if(mem_image/deviceCount+mem_proj*PROJ_PER_KERNEL*2<mem_GPU_global){
// We only need to split if we have extra GPUs
*split_image=1;
*split_projections=1;
}
// We know we need to split, but:
// Does all the image fit in the GPU, with some slack for a stack of projections??
else
{
// As we can overlap memcpys from H2D of the projections, we should then minimize the amount of image splits.
// Lets assume to start with that we only need 1 stack of PROJ_PER_KERNEL projections. The rest is for the image.
size_t mem_free=mem_GPU_global-2*mem_proj*PROJ_PER_KERNEL;
*split_image=(mem_image/deviceCount+mem_free-1)/mem_free;
// Now knowing how many splits we have for images, we can recompute how many slices of projections actually
// fit on the GPU. Must be more than 0 obviously.
mem_free=mem_GPU_global-(mem_image/deviceCount)/(*split_image); // NOTE: There is some rounding error, but its in the order of bytes, and we have 5% of GPU free jsut in case. We are safe
*split_projections=(mem_proj*PROJ_PER_KERNEL*2+mem_free-1)/mem_free;
}
}
void computeDeltasCube(Geometry geo,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S)
{
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x+(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x+(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x+(geo.DSD[i]-geo.DSO[i]);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x-(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x-(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x-(geo.DSD[i]-geo.DSO[i]);
//Done for P, now source
Point3D source;
source.x=geo.DSD[i]; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD[i]-geo.DSO[i]);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
} // END computeDeltasCube
void rollPitchYawT(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y
-sin(geo.dPitch[i])*auxPoint.z;
point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z;
point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
void checkFreeMemory(const GpuIds& gpuids,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
const int gpuids.GetLength();
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(gpuids[dev]);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
#endif | a04d77411846d3f6d70ae86a1ba68270c124e499.cu | /*-------------------------------------------------------------------------
*
* CUDA function for backrpojection using FDK weigts for CBCT
*
*
* CODE by Ander Biguri
* Optimized and modified by RB
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define PI_2 1.57079632679489661923
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "voxel_backprojection2.hpp"
#include "TIGRE_common.hpp"
#include <math.h>
#include "GpuIds.hpp"
// https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* *--->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
// this definitionmust go here.
void CreateTexture2(const GpuIds& gpuids, float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream,int nStreamDevice,bool allocate);
__global__ void matrixConstantMultiply(const Geometry geo,float* image,float constant){
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx<geo.nVoxelX* geo.nVoxelY *geo.nVoxelZ; idx+=gridDim.x*blockDim.x) {
image[idx]*=constant;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were:
// PROJ_PER_KERNEL = 32 or 16 (very similar times)
// VOXELS_PER_THREAD = 8
// Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code.
// (e.g. 16.2 s vs. ~62 s).
const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck.
const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck.
// We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection:
// deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec
// So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel
// (they will be updated in the main loop before each kernel call).
__constant__ Point3D projParamsArray2Dev[7*PROJ_PER_KERNEL]; // Dev means it is on device
// We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above)
// Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection)
__constant__ float projSinCosArray2Dev[5*PROJ_PER_KERNEL];
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//______________________________________________________________________________
//
// Function: kernelPixelBackprojectionFDK
//
// Description: Main FDK backprojection kernel
//______________________________________________________________________________
__global__ void kernelPixelBackprojection(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections, cudaTextureObject_t tex)
{
unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x;
// unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle
//Make sure we don't go out of bounds
if (indX>=geo.nVoxelX || indY>=geo.nVoxelY || startIndZ>=geo.nVoxelZ)
return;
// We'll keep a local auxiliary array of values of a column of voxels that this thread will update
float voxelColumn[VOXELS_PER_THREAD];
// First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then
// work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes
int colIdx;
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
} // END copy 3D volume voxels to local array
// Now iterate through projections
#pragma unroll
for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++)
{
// Get the current parameters from parameter arrays in constant memory.
int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array
// Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK.
if(indAlpha>=totalNoOfProjections)
break;
Point3D deltaX = projParamsArray2Dev[7*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaY = projParamsArray2Dev[7*projNumber+1];
Point3D deltaZ = projParamsArray2Dev[7*projNumber+2];
Point3D xyzOrigin = projParamsArray2Dev[7*projNumber+3];
Point3D xyzOffset = projParamsArray2Dev[7*projNumber+4];
Point3D uv0Offset = projParamsArray2Dev[7*projNumber+5];
Point3D S = projParamsArray2Dev[7*projNumber+6];
float sinalpha = projSinCosArray2Dev[5*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection
float cosalpha = projSinCosArray2Dev[5*projNumber+1];
float COR = projSinCosArray2Dev[5*projNumber+2];
float DSD = projSinCosArray2Dev[5*projNumber+3];
float DSO = projSinCosArray2Dev[5*projNumber+4];
// Precomputations for the weights:
//Real coords of Source
// We already have S.x (geo.DSO), and S.y and S.z are always zero. we just need to rotate
Point3D realS;
realS.x= DSO*cosalpha;
realS.y=-DSO*sinalpha;
realS.z=0;
Point3D realvoxel_init;
realvoxel_init.x=-geo.sVoxelX/2+geo.dVoxelX/2+xyzOffset.x;
realvoxel_init.y=-geo.sVoxelY/2+geo.dVoxelY/2+xyzOffset.y;
realvoxel_init.z=-geo.sVoxelZ/2+geo.dVoxelZ/2+xyzOffset.z;
// Real XYZ coordinates of Detector.
Point3D realD, realDaux;
// We know the index of the detector (u,v). Start from there.
realDaux.x=-(DSD-DSO);
// Now iterate through Z in our voxel column FOR A GIVEN PROJECTION
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
// "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles.
Point3D P;
P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x);
P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU;
P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z);
// This is the vector defining the line from the source to the Voxel
float vectX,vectY,vectZ;
vectX=(P.x -S.x);
vectY=(P.y -S.y);
vectZ=(P.z -S.z);
// Get the coordinates in the detector UV where the mid point of the voxel is projected.
float t=__fdividef(DSO-DSD-S.x,vectX);
float y,z;
y=vectY*t+S.y;
z=vectZ*t+S.z;
float u,v;
u=y+(float)geo.nDetecU*0.5f;
v=z+(float)geo.nDetecV*0.5f;
#if IS_FOR_MATLAB_TIGRE
float sample=tex3D<float>(tex, v, u ,indAlpha+0.5f);
#else
float sample=tex3D<float>(tex, u, v ,indAlpha+0.5f);
#endif
float weigth=0;
//
//
//
// IMPORTANT: The weights are almost 50% of the computational time. Is there a way of speeding this up??
//
//Real coordinates of Voxel. Instead of reverting the tranformation, its less math (faster) to compute it from the indexes.
Point3D realvoxel;
realvoxel.x=realvoxel_init.x+indX*geo.dVoxelX;
realvoxel.y=realvoxel_init.y+indY*geo.dVoxelY;
realvoxel.z=realvoxel_init.z+indZ*geo.dVoxelZ;
realDaux.y=(-geo.sDetecU+geo.dDetecU)*0.5f + u*geo.dDetecU +uv0Offset.x;
realD.z =(-geo.sDetecV+geo.dDetecV)*0.5f + v*geo.dDetecV +uv0Offset.y;
//rotate the detector
realD.x= realDaux.x*cosalpha + realDaux.y*sinalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
realD.y=-realDaux.x*sinalpha + realDaux.y*cosalpha; //sin(-x)=-sin(x) , cos(-x)=cos(x)
float L,lsq;
L = __fsqrt_rd( (realS.x-realD.x)*(realS.x-realD.x)+ (realS.y-realD.y)*(realS.y-realD.y)+ (realD.z)*(realD.z)); // Sz=0 always.
lsq = (realS.x-realvoxel.x)*(realS.x-realvoxel.x)
+ (realS.y-realvoxel.y)*(realS.y-realvoxel.y)
+ (realS.z-realvoxel.z)*(realS.z-realvoxel.z);
weigth=__fdividef(L*L*L,(DSD*lsq));
// weigth=1;
// Get Value in the computed (U,V) and multiply by the corresponding weigth.
// indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!)
voxelColumn[colIdx]+=sample* weigth;
} // END iterating through column of voxels
} // END iterating through multiple projections
// And finally copy the updated local voxelColumn array back to our 3D volume (main memory)
#pragma unroll
for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++)
{
unsigned long indZ = startIndZ + colIdx;
// If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't
// be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume)
if(indZ>=geo.nVoxelZ)
break; // break the loop.
unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX;
image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one)
// We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory.
// According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write.
// We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is
// better for avoiding memory congestion.
} // END copy updated voxels from local array to our 3D volume
} // END kernelPixelBackprojectionFDK
//______________________________________________________________________________
//
// Function: voxel_backprojection
//
// Description: Main host function for FDK backprojection (invokes the kernel)
//______________________________________________________________________________
int voxel_backprojection2(float * projections, Geometry geo, float* result,float const * const alphas, int nalpha, const GpuIds& gpuids){
// Prepare for MultiGPU
int deviceCount = gpuids.GetLength();
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("Atb:Voxel_backprojection:GPUselect","There are no available device(s) that support CUDA\n");
}
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning thrown)
// Check the available devices, and if they are the same
if (!gpuids.AreEqualDevices()) {
mexWarnMsgIdAndTxt("Atb:Voxel_backprojection2:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed.");
}
int dev;
// Split the CT problem
unsigned int split_image;
unsigned int split_projections;
splitCTbackprojection(gpuids,geo,nalpha,&split_image,&split_projections);
// Create the arrays for the geometry. The main difference is that geo.offZ has been tuned for the
// image slices. The rest of the Geometry is the same
Geometry* geoArray=(Geometry*)malloc(split_image*deviceCount*sizeof(Geometry));
createGeoArray(split_image*deviceCount,geo,geoArray,nalpha);
// Now lest allocate all the image memory on the GPU, so we can use it later. If we have made our numbers correctly
// in the previous section this should leave enough space for the textures.
size_t num_bytes_img = (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ* sizeof(float);
float** dimage=(float**)malloc(deviceCount*sizeof(float*));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMalloc((void**)&dimage[dev], num_bytes_img);
cudaCheckErrors("cudaMalloc fail");
}
//Pagelock memory for synchronous copy.
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus should have the same attributes.
int isHostRegisterSupported = 0;
#if CUDART_VERSION >= 9020
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,gpuids[0]);
#endif
// empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to
// pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big.
if (isHostRegisterSupported & split_image>1){
cudaHostRegister(result, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),cudaHostRegisterPortable);
}
if (isHostRegisterSupported ){
cudaHostRegister(projections, (size_t)geo.nDetecU*(size_t)geo.nDetecV*(size_t)nalpha*(size_t)sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
//If it is the first time, lets make sure our image is zeroed.
int nStreamDevice=2;
int nStreams=deviceCount*nStreamDevice;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));;
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
for (int i = 0; i < nStreamDevice; ++i){
cudaStreamCreate(&stream[i+dev*nStreamDevice]);
}
}
// Kernel auxiliary variables
Point3D* projParamsArray2Host;
cudaMallocHost((void**)&projParamsArray2Host,7*PROJ_PER_KERNEL*sizeof(Point3D));
float* projSinCosArray2Host;
cudaMallocHost((void**)&projSinCosArray2Host,5*PROJ_PER_KERNEL*sizeof(float));
// Texture object variables
cudaTextureObject_t *texProj;
cudaArray **d_cuArrTex;
texProj =(cudaTextureObject_t*)malloc(deviceCount*2*sizeof(cudaTextureObject_t));
d_cuArrTex =(cudaArray**)malloc(deviceCount*2*sizeof(cudaArray*));
unsigned int proj_split_overlap_number;
// Start with the main loop. The Projection data needs to be allocated and dealocated in the main loop
// as due to the nature of cudaArrays, we can not reuse them. This should not be a problem for the fast execution
// of the code, as repeated allocation and deallocation only happens when the projection data is very very big,
// and therefore allcoation time should be negligible, fluctuation of other computations should mask the time.
unsigned long long proj_linear_idx_start;
unsigned int current_proj_split_size,current_proj_overlap_split_size;
size_t num_bytes_img_curr;
size_t img_linear_idx_start;
float** partial_projection;
size_t* proj_split_size;
for(unsigned int img_slice=0;img_slice<split_image;img_slice++){
//
// Initialize the memory if its the first time.
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMemset(dimage[dev],0,num_bytes_img);
cudaCheckErrors("memset fail");
}
for( unsigned int proj=0;proj<split_projections;proj++){
// What is the size of the current chunk of proejctions we need in?
current_proj_split_size=(nalpha+split_projections-1)/split_projections;
// if its the last one its probably less
current_proj_split_size=((proj+1)*current_proj_split_size<nalpha)? current_proj_split_size: nalpha-current_proj_split_size*proj;
// We are going to split it in the same amount of kernels we need to execute.
proj_split_overlap_number=(current_proj_split_size+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL;
// Create pointer to pointers of projections and precompute their location and size.
if(!proj && !img_slice){
partial_projection=(float**)malloc(current_proj_split_size*sizeof(float*));
proj_split_size=(size_t*)malloc(current_proj_split_size*sizeof(size_t*));
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Crop the last one, as its likely its not completely divisible.
// now lets split this for simultanoeus memcopy and compute.
// We want to make sure that if we can, we run PROJ_PER_KERNEL projections, to maximize kernel acceleration
// current_proj_overlap_split_size units = angles
current_proj_overlap_split_size=max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL);
current_proj_overlap_split_size=(proj_block_split<proj_split_overlap_number-1)?current_proj_overlap_split_size:current_proj_split_size-(proj_split_overlap_number-1)*current_proj_overlap_split_size;
//Get the linear index where the current memory chunk starts.
proj_linear_idx_start=(unsigned long long)((nalpha+split_projections-1)/split_projections)*(unsigned long long)proj*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
proj_linear_idx_start+=proj_block_split*max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL)*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV;
//Store result
proj_split_size[proj_block_split]=current_proj_overlap_split_size;
partial_projection[proj_block_split]=&projections[proj_linear_idx_start];
}
for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){
// Now get the projections on memory
CreateTexture2(gpuids,
partial_projection[proj_block_split],geo,
&d_cuArrTex[(proj_block_split%2)*deviceCount],
proj_split_size[proj_block_split],
&texProj [(proj_block_split%2)*deviceCount],
stream, nStreamDevice,
(proj_block_split<2)&!proj&!img_slice);// Only allocate if its the first 2 calls
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaStreamSynchronize(stream[dev*nStreamDevice+1]);
}
for (dev = 0; dev < deviceCount; dev++){
//Safety:
// Depends on the amount of GPUs, the case where a image slice is zero hight can happen.
// Just break the loop if we reached that point
if(geoArray[img_slice*deviceCount+dev].nVoxelZ==0)
break;
cudaSetDevice(gpuids[dev]);
int divx,divy,divz;
// RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y).
// I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so
// let's stick with the values from Zinsser and Keck.
divx=16;
divy=32;
divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks!
dim3 grid((geo.nVoxelX+divx-1)/divx,
(geo.nVoxelY+divy-1)/divy,
(geoArray[img_slice*deviceCount+dev].nVoxelZ+divz-1)/divz);
dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1)
//////////////////////////////////////////////////////////////////////////////////////
// Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
// Since we'll have multiple projections processed by a SINGLE kernel call, compute how many
// kernel calls we'll need altogether.
unsigned int noOfKernelCalls = (proj_split_size[proj_block_split]+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL
for (unsigned int i=0; i<noOfKernelCalls; i++){
// Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it
unsigned int j;
for(j=0; j<PROJ_PER_KERNEL; j++){
unsigned int currProjNumber_slice=i*PROJ_PER_KERNEL+j;
unsigned int currProjNumber_global=i*PROJ_PER_KERNEL+j // index within kernel
+proj*(nalpha+split_projections-1)/split_projections // index of the global projection split
+proj_block_split*max(current_proj_split_size/proj_split_overlap_number,PROJ_PER_KERNEL); // indexof overlap current split
if(currProjNumber_slice>=proj_split_size[proj_block_split])
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
if(currProjNumber_global>=nalpha)
break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway.
Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, offDetec,source;
float sinalpha,cosalpha;
geoArray[img_slice*deviceCount+dev].alpha=-alphas[currProjNumber_global*3];//we got 3 angles now.
geoArray[img_slice*deviceCount+dev].theta=-alphas[currProjNumber_global*3+1];
geoArray[img_slice*deviceCount+dev].psi =-alphas[currProjNumber_global*3+2];
sinalpha=sin(geoArray[img_slice*deviceCount+dev].alpha);
cosalpha=cos(geoArray[img_slice*deviceCount+dev].alpha);
projSinCosArray2Host[5*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection
projSinCosArray2Host[5*j+1]=cosalpha;
projSinCosArray2Host[5*j+2]=geo.COR[currProjNumber_global];
projSinCosArray2Host[5*j+3]=geo.DSD[currProjNumber_global];
projSinCosArray2Host[5*j+4]=geo.DSO[currProjNumber_global];
computeDeltasCube(geoArray[img_slice*deviceCount+dev],currProjNumber_global,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source);
offOrig.x=geo.offOrigX[currProjNumber_global];
offOrig.y=geo.offOrigY[currProjNumber_global];
offOrig.z=geoArray[img_slice*deviceCount+dev].offOrigZ[currProjNumber_global];
offDetec.x=geo.offDetecU[currProjNumber_global];
offDetec.y=geo.offDetecV[currProjNumber_global];
offDetec.z=0;//unused
projParamsArray2Host[7*j] =deltaX; // 7*j because we have 7 Point3D values per projection
projParamsArray2Host[7*j+1]=deltaY;
projParamsArray2Host[7*j+2]=deltaZ;
projParamsArray2Host[7*j+3]=xyzOrigin;
projParamsArray2Host[7*j+4]=offOrig;
projParamsArray2Host[7*j+5]=offDetec;
projParamsArray2Host[7*j+6]=source;
} // END for (preparing params for kernel call)
// Copy the prepared parameter arrays to constant memory to make it available for the kernel
cudaMemcpyToSymbolAsync(projSinCosArray2Dev, projSinCosArray2Host, sizeof(float)*5*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[dev*nStreamDevice]);
cudaMemcpyToSymbolAsync(projParamsArray2Dev, projParamsArray2Host, sizeof(Point3D)*7*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[dev*nStreamDevice]);
cudaStreamSynchronize(stream[dev*nStreamDevice]);
kernelPixelBackprojection<<<grid,block,0,stream[dev*nStreamDevice]>>>(geoArray[img_slice*deviceCount+dev],dimage[dev],i,proj_split_size[proj_block_split],texProj[(proj_block_split%2)*deviceCount+dev]);
} // END for
//////////////////////////////////////////////////////////////////////////////////////
// END RB code, Main reconstruction loop: go through projections (rotation angles) and backproject
//////////////////////////////////////////////////////////////////////////////////////
}
} // END sub-split of current projection chunk
} // END projection splits
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
matrixConstantMultiply<<<60,MAXTREADS,0,stream[dev*nStreamDevice]>>>( geoArray[img_slice*deviceCount+dev],dimage[dev],geo.dVoxelX*geo.dVoxelY*geo.dVoxelZ/(geo.dDetecU*geo.dDetecV));
}
// Now we need to take the image out of the GPU
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaStreamSynchronize(stream[dev*nStreamDevice]);
num_bytes_img_curr=(size_t)geoArray[img_slice*deviceCount+dev].nVoxelX*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelY*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelZ*sizeof(float);
img_linear_idx_start=(size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ*(size_t)(img_slice*deviceCount+dev);
cudaMemcpyAsync(&result[img_linear_idx_start], dimage[dev], num_bytes_img_curr, cudaMemcpyDeviceToHost,stream[dev*nStreamDevice+1]);
}
} // end image splits
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDeviceSynchronize();
}
// Clean the GPU
bool two_buffers_used=((((nalpha+split_projections-1)/split_projections)+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL)>1;
for(unsigned int i=0; i<2;i++){ // 2 buffers (if needed, maybe only 1)
if (!two_buffers_used && i==1)
break; for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaDestroyTextureObject(texProj[i*deviceCount+dev]);
cudaFreeArray(d_cuArrTex[i*deviceCount+dev]);
}
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaFree(dimage[dev]);
}
cudaFreeHost(projSinCosArray2Host);
cudaFreeHost(projParamsArray2Host);
free(partial_projection);
free(proj_split_size);
freeGeoArray(split_image*deviceCount,geoArray);
if (isHostRegisterSupported & split_image>1){
cudaHostUnregister(result);
}
if (isHostRegisterSupported){
cudaHostUnregister(projections);
}
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]);
cudaCheckErrors("cudaFree fail");
// cudaDeviceReset(); // For the Nvidia Visual Profiler
return 0;
} // END voxel_backprojection
void CreateTexture2(const GpuIds& gpuids, float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream,int nStreamDevice,bool allocate){
//size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
int num_devices = gpuids.GetLength();
#if IS_FOR_MATLAB_TIGRE
const cudaExtent extent =make_cudaExtent(geo.nDetecV, geo.nDetecU, nangles);
#else
const cudaExtent extent =make_cudaExtent(geo.nDetecU, geo.nDetecV, nangles);
#endif
if (allocate){
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(gpuids[dev]);
//cudaArray Descriptor
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
//cuda Array
cudaMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent);
}
}
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(gpuids[dev]);
cudaMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_cudaPitchedPtr((void *)projectiondata, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[dev];
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3DAsync(©Params,stream[dev*nStreamDevice+1]);
}
//Array creation End
for (unsigned int dev = 0; dev < num_devices; dev++){
cudaSetDevice(gpuids[dev]);
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = d_cuArrTex[dev];
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = cudaFilterModeLinear;
texDescr.addressMode[0] = cudaAddressModeBorder;
texDescr.addressMode[1] = cudaAddressModeBorder;
texDescr.addressMode[2] = cudaAddressModeBorder;
texDescr.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL);
}
}
#ifndef BACKPROJECTION_HPP
void splitCTbackprojection(const GpuIds& gpuids, Geometry geo,int nalpha, unsigned int* split_image, unsigned int * split_projections){
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(gpuids, &mem_GPU_global);
const int deviceCount = gpuids.GetLength();
// Compute how much memory each of the relevant memory pieces need
size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float);
size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float);
// Does everything fit in the GPU?
if(mem_image/deviceCount+mem_proj*PROJ_PER_KERNEL*2<mem_GPU_global){
// We only need to split if we have extra GPUs
*split_image=1;
*split_projections=1;
}
// We know we need to split, but:
// Does all the image fit in the GPU, with some slack for a stack of projections??
else
{
// As we can overlap memcpys from H2D of the projections, we should then minimize the amount of image splits.
// Lets assume to start with that we only need 1 stack of PROJ_PER_KERNEL projections. The rest is for the image.
size_t mem_free=mem_GPU_global-2*mem_proj*PROJ_PER_KERNEL;
*split_image=(mem_image/deviceCount+mem_free-1)/mem_free;
// Now knowing how many splits we have for images, we can recompute how many slices of projections actually
// fit on the GPU. Must be more than 0 obviously.
mem_free=mem_GPU_global-(mem_image/deviceCount)/(*split_image); // NOTE: There is some rounding error, but its in the order of bytes, and we have 5% of GPU free jsut in case. We are safe
*split_projections=(mem_proj*PROJ_PER_KERNEL*2+mem_free-1)/mem_free;
}
}
void computeDeltasCube(Geometry geo,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S)
{
Point3D P, Px,Py,Pz;
// Get coords of Img(0,0,0)
P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i];
P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i];
P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i];
// Get coors from next voxel in each direction
Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x;
Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y;
Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ;
// Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ
eulerZYZT(geo,&P);
eulerZYZT(geo,&Px);
eulerZYZT(geo,&Py);
eulerZYZT(geo,&Pz);
//detector offset
P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i];
Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i];
Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i];
Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i];
//Detector Roll pitch Yaw
//
//
// first, we need to offset everything so (0,0,0) is the center of the detector
// Only X is required for that
P.x=P.x+(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x+(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x+(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x+(geo.DSD[i]-geo.DSO[i]);
rollPitchYawT(geo,i,&P);
rollPitchYawT(geo,i,&Px);
rollPitchYawT(geo,i,&Py);
rollPitchYawT(geo,i,&Pz);
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Px.x=Px.x-(geo.DSD[i]-geo.DSO[i]);
Py.x=Py.x-(geo.DSD[i]-geo.DSO[i]);
Pz.x=Pz.x-(geo.DSD[i]-geo.DSO[i]);
//Done for P, now source
Point3D source;
source.x=geo.DSD[i]; //allready offseted for rotation
source.y=-geo.offDetecU[i];
source.z=-geo.offDetecV[i];
rollPitchYawT(geo,i,&source);
source.x=source.x-(geo.DSD[i]-geo.DSO[i]);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z;
// mexPrintf("%f,%f,%f\n",source.x,source.y,source.z);
// Scale coords so detector pixels are 1x1
P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU;
Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU;
Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU;
Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU;
source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU;
// get deltas of the changes in voxels
deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z;
deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z;
deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z;
*xyzorigin=P;
*S=source;
} // END computeDeltasCube
void rollPitchYawT(Geometry geo,int i, Point3D* point){
Point3D auxPoint;
auxPoint.x=point->x;
auxPoint.y=point->y;
auxPoint.z=point->z;
point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x
+sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y
-sin(geo.dPitch[i])*auxPoint.z;
point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z;
point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x
+(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y
+cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z;
}
void checkFreeMemory(const GpuIds& gpuids,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
const int gpuids.GetLength();
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(gpuids[dev]);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
#endif |
807cdc2ed4a58de5ccc336c0fa106e50addbaea6.hip | // !!! This is a file automatically generated by hipify!!!
/*
TODO:
- only load parts of the file, in accordance with a prototxt param "max_mem"
*/
#include <stdint.h>
#include <vector>
#include "hdf5.h"
#include "hdf5_hl.h"
#include "caffe/layers/flow_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void FlowDataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
FlowBatch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
// Transform data.
Dtype* data = batch->data_.mutable_cpu_data();
int elements = batch->data_.shape(0) * batch->data_.shape(1) * batch->data_.shape(2) * batch->data_.shape(3);
TransformData(data, dfactor_, meanval_, elements);
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data());
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data());
if (phase_ == 1) { // Test time.
// Reshape to loaded videoIds.
top[2]->ReshapeLike(batch->videoId_);
// Copy the videoIds.
caffe_copy(batch->videoId_.count(), batch->videoId_.gpu_data(), top[2]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(hipStreamSynchronize(hipStreamDefault));
prefetch_free_.push(batch);
iter_++;
forw_files_ = forw_files_ + batch->data_.shape(0);
if (forw_files_ == num_files_) {
epoch_++;
forw_files_ = 0;
do_balance_ = true;
Reset();
}
if (epoch_ % balancingEpoch_ == 0 && balance_ && do_balance_ && iter_ > init_balance_iteration_ && phase_ == 0) {
LOG(INFO) << "New balanced partition";
mtx_.lock();
getBalancedPartition(full_hdf_filenames_, subclasses_, full_labels_, selected_files_, hdf_filenames_, labels_);
num_files_ = hdf_filenames_.size();
mtx_.unlock();
do_balance_ = false;
}
}
INSTANTIATE_LAYER_GPU_FORWARD(FlowDataLayer);
} // namespace caffe
| 807cdc2ed4a58de5ccc336c0fa106e50addbaea6.cu | /*
TODO:
- only load parts of the file, in accordance with a prototxt param "max_mem"
*/
#include <stdint.h>
#include <vector>
#include "hdf5.h"
#include "hdf5_hl.h"
#include "caffe/layers/flow_data_layer.hpp"
namespace caffe {
template <typename Dtype>
void FlowDataLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
FlowBatch<Dtype>* batch = prefetch_full_.pop("Data layer prefetch queue empty");
// Transform data.
Dtype* data = batch->data_.mutable_cpu_data();
int elements = batch->data_.shape(0) * batch->data_.shape(1) * batch->data_.shape(2) * batch->data_.shape(3);
TransformData(data, dfactor_, meanval_, elements);
// Reshape to loaded data.
top[0]->ReshapeLike(batch->data_);
// Copy the data
caffe_copy(batch->data_.count(), batch->data_.gpu_data(), top[0]->mutable_gpu_data());
// Reshape to loaded labels.
top[1]->ReshapeLike(batch->label_);
// Copy the labels.
caffe_copy(batch->label_.count(), batch->label_.gpu_data(), top[1]->mutable_gpu_data());
if (phase_ == 1) { // Test time.
// Reshape to loaded videoIds.
top[2]->ReshapeLike(batch->videoId_);
// Copy the videoIds.
caffe_copy(batch->videoId_.count(), batch->videoId_.gpu_data(), top[2]->mutable_gpu_data());
}
// Ensure the copy is synchronous wrt the host, so that the next batch isn't
// copied in meanwhile.
CUDA_CHECK(cudaStreamSynchronize(cudaStreamDefault));
prefetch_free_.push(batch);
iter_++;
forw_files_ = forw_files_ + batch->data_.shape(0);
if (forw_files_ == num_files_) {
epoch_++;
forw_files_ = 0;
do_balance_ = true;
Reset();
}
if (epoch_ % balancingEpoch_ == 0 && balance_ && do_balance_ && iter_ > init_balance_iteration_ && phase_ == 0) {
LOG(INFO) << "New balanced partition";
mtx_.lock();
getBalancedPartition(full_hdf_filenames_, subclasses_, full_labels_, selected_files_, hdf_filenames_, labels_);
num_files_ = hdf_filenames_.size();
mtx_.unlock();
do_balance_ = false;
}
}
INSTANTIATE_LAYER_GPU_FORWARD(FlowDataLayer);
} // namespace caffe
|
377c4835dd658695e7f385361034d5c2d0fb8405.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/device/limits.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "utils/copygen.hpp"
#include "utils/boxutils.hpp"
#include "utils/bitonic_sort.hpp"
#include "octree_iterator.hpp"
namespace pcl { namespace device { namespace knn_search
{
typedef OctreeImpl::PointType PointType;
struct Batch
{
const PointType* queries;
//int k == 1;
const int *indices;
// X1 X2 X3 X4 ..
// Y1 Y2 Y3 Y4 ..
// Z1 Z2 Z3 Z4 ..
const float* points;
int points_step; // elem step
OctreeGlobalWithBox octree;
int queries_num;
mutable int* output;
mutable int* sizes;
};
struct KernelPolicy
{
enum
{
CTA_SIZE = 512,
WARP_SIZE = 32,
WARPS_COUNT = CTA_SIZE/WARP_SIZE,
};
};
struct Warp_knnSearch
{
public:
typedef OctreeIteratorDeviceNS OctreeIterator;
const Batch& batch;
int query_index;
float3 query;
float min_distance;
int min_idx;
OctreeIterator iterator;
__device__ __forceinline__ Warp_knnSearch(const Batch& batch_arg, int query_index_arg)
: batch(batch_arg), query_index(query_index_arg), min_distance(numeric_limits<float>::max()), min_idx(0), iterator(batch.octree) { }
__device__ __forceinline__ void launch(bool active)
{
if (active)
{
PointType q = batch.queries[query_index];
query = make_float3(q.x, q.y, q.z);
}
else
query_index = -1;
while(__any_sync(0xFFFFFFFF, active))
{
int leaf = -1;
if (active)
leaf = examineNode(iterator);
processLeaf(leaf);
active = active && iterator.level >= 0;
}
if (query_index != -1)
{
batch.output[query_index] = batch.indices[min_idx];
if (batch.sizes)
batch.sizes[query_index] = 1;
}
}
private:
__device__ __forceinline__ int examineNode(OctreeIterator& iterator)
{
int node_idx = *iterator;
int code = batch.octree.codes[node_idx];
float3 node_minp = batch.octree.minp;
float3 node_maxp = batch.octree.maxp;
calcBoundingBox(iterator.level, code, node_minp, node_maxp);
//if true, take nothing, and go to next
if (checkIfNodeOutsideSphere(node_minp, node_maxp, query, min_distance))
{
++iterator;
return -1;
}
//need to go to next level
int node = batch.octree.nodes[node_idx];
int children_mask = node & 0xFF;
bool isLeaf = children_mask == 0;
if (isLeaf)
{
++iterator;
return node_idx;
}
//goto next level
int first = node >> 8;
int len = __popc(children_mask);
iterator.gotoNextLevel(first, len);
return -1;
};
__device__ __forceinline__ void processLeaf(int node_idx)
{
int mask = __ballot_sync(0xFFFFFFFF, node_idx != -1);
unsigned int laneId = Warp::laneId();
unsigned int warpId = Warp::id();
__shared__ volatile int per_warp_buffer[KernelPolicy::WARPS_COUNT];
while(mask)
{
int active_lane = __ffs(mask) - 1; //[0..31]
mask &= ~(1 << active_lane);
volatile int* warp_buffer = &per_warp_buffer[warpId];
//broadcast beg
if (active_lane == laneId)
*warp_buffer = batch.octree.begs[node_idx];
int beg = *warp_buffer;
//broadcast end
if (active_lane == laneId)
*warp_buffer = batch.octree.ends[node_idx];
int end = *warp_buffer;
float3 active_query;
volatile float* warp_buffer_float = (float*)&per_warp_buffer[warpId];
//broadcast warp_query
if (active_lane == laneId)
*warp_buffer_float = query.x;
active_query.x = *warp_buffer_float;
if (active_lane == laneId)
*warp_buffer_float = query.y;
active_query.y = *warp_buffer_float;
if (active_lane == laneId)
*warp_buffer_float = query.z;
active_query.z = *warp_buffer_float;
//broadcast query_index
if (active_lane == laneId)
*warp_buffer = query_index;
float active_query_index = *warp_buffer;
float dist;
int offset = NearestWarpKernel<KernelPolicy::CTA_SIZE>(batch.points + beg, batch.points_step, end - beg, active_query, dist);
if (active_lane == laneId)
if (min_distance > dist)
{
min_distance = dist;
min_idx = beg + offset;
}
}
}
template<int CTA_SIZE>
__device__ __forceinline__ int NearestWarpKernel(const float* points, int points_step, int length, const float3& active_query, float& dist)
{
__shared__ volatile float dist2[CTA_SIZE];
__shared__ volatile int index[CTA_SIZE];
int tid = threadIdx.x;
dist2[tid] = pcl::device::numeric_limits<float>::max();
//serial step
for (int idx = Warp::laneId(); idx < length; idx += Warp::STRIDE)
{
float dx = points[idx ] - active_query.x;
float dy = points[idx + points_step ] - active_query.y;
float dz = points[idx + points_step * 2] - active_query.z;
float d2 = dx * dx + dy * dy + dz * dz;
if (dist2[tid] > d2)
{
dist2[tid] = d2;
index[tid] = idx;
}
}
//parallel step
unsigned int lane = Warp::laneId();
float mind2 = dist2[tid];
if (lane < 16)
{
float next = dist2[tid + 16];
if (mind2 > next)
{
dist2[tid] = mind2 = next;
index[tid] = index[tid + 16];
}
}
if (lane < 8)
{
float next = dist2[tid + 8];
if (mind2 > next)
{
dist2[tid] = mind2 = next;
index[tid] = index[tid + 8];
}
}
if (lane < 4)
{
float next = dist2[tid + 4];
if (mind2 > next)
{
dist2[tid] = mind2 = next;
index[tid] = index[tid + 4];
}
}
if (lane < 2)
{
float next = dist2[tid + 2];
if (mind2 > next)
{
dist2[tid] = mind2 = next;
index[tid] = index[tid + 2];
}
}
if (lane < 1)
{
float next = dist2[tid + 1];
if (mind2 > next)
{
dist2[tid] = mind2 = next;
index[tid] = index[tid + 1];
}
}
dist = sqrt(dist2[tid - lane]);
return index[tid - lane];
}
};
__global__ void KernelKNN(const Batch batch)
{
int query_index = blockIdx.x * blockDim.x + threadIdx.x;
bool active = query_index < batch.queries_num;
if (__all_sync(0xFFFFFFFF, active == false))
return;
Warp_knnSearch search(batch, query_index);
search.launch(active);
}
} } }
void pcl::device::OctreeImpl::nearestKSearchBatch(const Queries& queries, int /*k*/, NeighborIndices& results) const
{
typedef pcl::device::knn_search::Batch BatchType;
BatchType batch;
batch.octree = octreeGlobal;
batch.indices = indices;
batch.queries_num = (int)queries.size();
batch.queries = queries;
batch.output = results.data;
batch.sizes = results.sizes;
batch.points = points_sorted;
batch.points_step = points_sorted.step()/points_sorted.elem_size;
cudaSafeCall( hipFuncSetCacheConfig(pcl::device::knn_search::KernelKNN, hipFuncCachePreferL1) );
int block = pcl::device::knn_search::KernelPolicy::CTA_SIZE;
int grid = (batch.queries_num + block - 1) / block;
hipLaunchKernelGGL(( pcl::device::knn_search::KernelKNN), dim3(grid), dim3(block), 0, 0, batch);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
| 377c4835dd658695e7f385361034d5c2d0fb8405.cu | /*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/device/limits.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "utils/copygen.hpp"
#include "utils/boxutils.hpp"
#include "utils/bitonic_sort.hpp"
#include "octree_iterator.hpp"
namespace pcl { namespace device { namespace knn_search
{
typedef OctreeImpl::PointType PointType;
struct Batch
{
const PointType* queries;
//int k == 1;
const int *indices;
// X1 X2 X3 X4 ..
// Y1 Y2 Y3 Y4 ..
// Z1 Z2 Z3 Z4 ..
const float* points;
int points_step; // elem step
OctreeGlobalWithBox octree;
int queries_num;
mutable int* output;
mutable int* sizes;
};
struct KernelPolicy
{
enum
{
CTA_SIZE = 512,
WARP_SIZE = 32,
WARPS_COUNT = CTA_SIZE/WARP_SIZE,
};
};
struct Warp_knnSearch
{
public:
typedef OctreeIteratorDeviceNS OctreeIterator;
const Batch& batch;
int query_index;
float3 query;
float min_distance;
int min_idx;
OctreeIterator iterator;
__device__ __forceinline__ Warp_knnSearch(const Batch& batch_arg, int query_index_arg)
: batch(batch_arg), query_index(query_index_arg), min_distance(numeric_limits<float>::max()), min_idx(0), iterator(batch.octree) { }
__device__ __forceinline__ void launch(bool active)
{
if (active)
{
PointType q = batch.queries[query_index];
query = make_float3(q.x, q.y, q.z);
}
else
query_index = -1;
while(__any_sync(0xFFFFFFFF, active))
{
int leaf = -1;
if (active)
leaf = examineNode(iterator);
processLeaf(leaf);
active = active && iterator.level >= 0;
}
if (query_index != -1)
{
batch.output[query_index] = batch.indices[min_idx];
if (batch.sizes)
batch.sizes[query_index] = 1;
}
}
private:
__device__ __forceinline__ int examineNode(OctreeIterator& iterator)
{
int node_idx = *iterator;
int code = batch.octree.codes[node_idx];
float3 node_minp = batch.octree.minp;
float3 node_maxp = batch.octree.maxp;
calcBoundingBox(iterator.level, code, node_minp, node_maxp);
//if true, take nothing, and go to next
if (checkIfNodeOutsideSphere(node_minp, node_maxp, query, min_distance))
{
++iterator;
return -1;
}
//need to go to next level
int node = batch.octree.nodes[node_idx];
int children_mask = node & 0xFF;
bool isLeaf = children_mask == 0;
if (isLeaf)
{
++iterator;
return node_idx;
}
//goto next level
int first = node >> 8;
int len = __popc(children_mask);
iterator.gotoNextLevel(first, len);
return -1;
};
__device__ __forceinline__ void processLeaf(int node_idx)
{
int mask = __ballot_sync(0xFFFFFFFF, node_idx != -1);
unsigned int laneId = Warp::laneId();
unsigned int warpId = Warp::id();
__shared__ volatile int per_warp_buffer[KernelPolicy::WARPS_COUNT];
while(mask)
{
int active_lane = __ffs(mask) - 1; //[0..31]
mask &= ~(1 << active_lane);
volatile int* warp_buffer = &per_warp_buffer[warpId];
//broadcast beg
if (active_lane == laneId)
*warp_buffer = batch.octree.begs[node_idx];
int beg = *warp_buffer;
//broadcast end
if (active_lane == laneId)
*warp_buffer = batch.octree.ends[node_idx];
int end = *warp_buffer;
float3 active_query;
volatile float* warp_buffer_float = (float*)&per_warp_buffer[warpId];
//broadcast warp_query
if (active_lane == laneId)
*warp_buffer_float = query.x;
active_query.x = *warp_buffer_float;
if (active_lane == laneId)
*warp_buffer_float = query.y;
active_query.y = *warp_buffer_float;
if (active_lane == laneId)
*warp_buffer_float = query.z;
active_query.z = *warp_buffer_float;
//broadcast query_index
if (active_lane == laneId)
*warp_buffer = query_index;
float active_query_index = *warp_buffer;
float dist;
int offset = NearestWarpKernel<KernelPolicy::CTA_SIZE>(batch.points + beg, batch.points_step, end - beg, active_query, dist);
if (active_lane == laneId)
if (min_distance > dist)
{
min_distance = dist;
min_idx = beg + offset;
}
}
}
template<int CTA_SIZE>
__device__ __forceinline__ int NearestWarpKernel(const float* points, int points_step, int length, const float3& active_query, float& dist)
{
__shared__ volatile float dist2[CTA_SIZE];
__shared__ volatile int index[CTA_SIZE];
int tid = threadIdx.x;
dist2[tid] = pcl::device::numeric_limits<float>::max();
//serial step
for (int idx = Warp::laneId(); idx < length; idx += Warp::STRIDE)
{
float dx = points[idx ] - active_query.x;
float dy = points[idx + points_step ] - active_query.y;
float dz = points[idx + points_step * 2] - active_query.z;
float d2 = dx * dx + dy * dy + dz * dz;
if (dist2[tid] > d2)
{
dist2[tid] = d2;
index[tid] = idx;
}
}
//parallel step
unsigned int lane = Warp::laneId();
float mind2 = dist2[tid];
if (lane < 16)
{
float next = dist2[tid + 16];
if (mind2 > next)
{
dist2[tid] = mind2 = next;
index[tid] = index[tid + 16];
}
}
if (lane < 8)
{
float next = dist2[tid + 8];
if (mind2 > next)
{
dist2[tid] = mind2 = next;
index[tid] = index[tid + 8];
}
}
if (lane < 4)
{
float next = dist2[tid + 4];
if (mind2 > next)
{
dist2[tid] = mind2 = next;
index[tid] = index[tid + 4];
}
}
if (lane < 2)
{
float next = dist2[tid + 2];
if (mind2 > next)
{
dist2[tid] = mind2 = next;
index[tid] = index[tid + 2];
}
}
if (lane < 1)
{
float next = dist2[tid + 1];
if (mind2 > next)
{
dist2[tid] = mind2 = next;
index[tid] = index[tid + 1];
}
}
dist = sqrt(dist2[tid - lane]);
return index[tid - lane];
}
};
__global__ void KernelKNN(const Batch batch)
{
int query_index = blockIdx.x * blockDim.x + threadIdx.x;
bool active = query_index < batch.queries_num;
if (__all_sync(0xFFFFFFFF, active == false))
return;
Warp_knnSearch search(batch, query_index);
search.launch(active);
}
} } }
void pcl::device::OctreeImpl::nearestKSearchBatch(const Queries& queries, int /*k*/, NeighborIndices& results) const
{
typedef pcl::device::knn_search::Batch BatchType;
BatchType batch;
batch.octree = octreeGlobal;
batch.indices = indices;
batch.queries_num = (int)queries.size();
batch.queries = queries;
batch.output = results.data;
batch.sizes = results.sizes;
batch.points = points_sorted;
batch.points_step = points_sorted.step()/points_sorted.elem_size;
cudaSafeCall( cudaFuncSetCacheConfig(pcl::device::knn_search::KernelKNN, cudaFuncCachePreferL1) );
int block = pcl::device::knn_search::KernelPolicy::CTA_SIZE;
int grid = (batch.queries_num + block - 1) / block;
pcl::device::knn_search::KernelKNN<<<grid, block>>>(batch);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
|
51514c4ff70089e6bbb312fdd5925e7060b2d447.hip | // !!! This is a file automatically generated by hipify!!!
#include "GridSamplerPlugin.h"
#include "hip/hip_fp16.h"
#include <chrono>
#include <thread>
#include <cudnn.h>
template<class T>
void GridSamplerPlugin::SpatialTfSamplerForward(const nvinfer1::PluginTensorDesc *inputDesc, const nvinfer1::PluginTensorDesc *outputDesc,
const void *const *inputs, void *const *outputs, cudnnDataType_t cudnnDataType) {
ck(cudnnSetTensorNdDescriptorEx(xDesc, CUDNN_TENSOR_NCHW, cudnnDataType, inputDesc[0].dims.nbDims, inputDesc[0].dims.d));
ck(cudnnSetTensorNdDescriptorEx(yDesc, CUDNN_TENSOR_NCHW, cudnnDataType, outputDesc[0].dims.nbDims, outputDesc[0].dims.d));
ck(cudnnSetSpatialTransformerNdDescriptor(stDesc, CUDNN_SAMPLER_BILINEAR, cudnnDataType, inputDesc[0].dims.nbDims, inputDesc[0].dims.d));
T alpha = 1.0f, beta = 0.0f;
ck(cudnnSpatialTfSamplerForward(cudnnHandle, stDesc, &alpha, xDesc, inputs[0], inputs[1], &beta, yDesc, outputs[0]));
}
int GridSamplerPlugin::enqueue(const nvinfer1::PluginTensorDesc *inputDesc, const nvinfer1::PluginTensorDesc *outputDesc,
const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream)
{
ck(cudnnSetStream(cudnnHandle, stream));
if (inputDesc[0].type == nvinfer1::DataType::kFLOAT) {
SpatialTfSamplerForward<float>(inputDesc, outputDesc, inputs, outputs, CUDNN_DATA_FLOAT);
} else if (inputDesc[1].type == nvinfer1::DataType::kHALF) {
SpatialTfSamplerForward<half>(inputDesc, outputDesc, inputs, outputs, CUDNN_DATA_HALF);
} else {
std::cerr << "Unsupported data type: " << (int)inputDesc[1].type << std::endl;
}
return 0;
}
REGISTER_TENSORRT_PLUGIN(GridSamplerPluginCreator);
| 51514c4ff70089e6bbb312fdd5925e7060b2d447.cu | #include "GridSamplerPlugin.h"
#include "cuda_fp16.h"
#include <chrono>
#include <thread>
#include <cudnn.h>
template<class T>
void GridSamplerPlugin::SpatialTfSamplerForward(const nvinfer1::PluginTensorDesc *inputDesc, const nvinfer1::PluginTensorDesc *outputDesc,
const void *const *inputs, void *const *outputs, cudnnDataType_t cudnnDataType) {
ck(cudnnSetTensorNdDescriptorEx(xDesc, CUDNN_TENSOR_NCHW, cudnnDataType, inputDesc[0].dims.nbDims, inputDesc[0].dims.d));
ck(cudnnSetTensorNdDescriptorEx(yDesc, CUDNN_TENSOR_NCHW, cudnnDataType, outputDesc[0].dims.nbDims, outputDesc[0].dims.d));
ck(cudnnSetSpatialTransformerNdDescriptor(stDesc, CUDNN_SAMPLER_BILINEAR, cudnnDataType, inputDesc[0].dims.nbDims, inputDesc[0].dims.d));
T alpha = 1.0f, beta = 0.0f;
ck(cudnnSpatialTfSamplerForward(cudnnHandle, stDesc, &alpha, xDesc, inputs[0], inputs[1], &beta, yDesc, outputs[0]));
}
int GridSamplerPlugin::enqueue(const nvinfer1::PluginTensorDesc *inputDesc, const nvinfer1::PluginTensorDesc *outputDesc,
const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream)
{
ck(cudnnSetStream(cudnnHandle, stream));
if (inputDesc[0].type == nvinfer1::DataType::kFLOAT) {
SpatialTfSamplerForward<float>(inputDesc, outputDesc, inputs, outputs, CUDNN_DATA_FLOAT);
} else if (inputDesc[1].type == nvinfer1::DataType::kHALF) {
SpatialTfSamplerForward<half>(inputDesc, outputDesc, inputs, outputs, CUDNN_DATA_HALF);
} else {
std::cerr << "Unsupported data type: " << (int)inputDesc[1].type << std::endl;
}
return 0;
}
REGISTER_TENSORRT_PLUGIN(GridSamplerPluginCreator);
|
c6b2f65f2f35ec551234c0d26d754a98e82f66bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void device_only_copy(float* output, float* input, size_t total_size){
for(size_t i = blockIdx.x * blockDim.x + threadIdx.x;
i < total_size;
i += blockDim.x * gridDim.x){
output[i] = input[i];
}
__syncthreads();
} | c6b2f65f2f35ec551234c0d26d754a98e82f66bf.cu | #include "includes.h"
__global__ void device_only_copy(float* output, float* input, size_t total_size){
for(size_t i = blockIdx.x * blockDim.x + threadIdx.x;
i < total_size;
i += blockDim.x * gridDim.x){
output[i] = input[i];
}
__syncthreads();
} |
cc9a906920df604967edacb5c24a6193eed40397.hip | // !!! This is a file automatically generated by hipify!!!
# pragma warning (disable:4819)
#include "mex.h"
// #include "hip/hip_runtime.h"
// #include "hip/hip_runtime.h"
#include "gpu\mxGPUArray.h"
#define BlockSize 1024
#define NPRCMAX 1000
//Device code 1 for single channel distance calculation
void __global__ CalDistance(double * dist,const double * rawdata,int N_data,int Nchn,const double * vref,const double * tim_range, int embedding,int sampling_delay,int Nvec,int Nstep){
/*
*stepwindowthread
*stepw2-w1distance,stepchannelblock
*
*/
int t_idx=threadIdx.x; //this is the vector idx plus step idx
int b_idx=blockIdx.x;// the number of single channel's block idx
int chn_idx=blockIdx.y;//this is the channel idx
int tim_idx;//keep the same with cal_dist_3
int ref_idx; //startpoint
// extern __shared__ double shared_data[]; //should be size of single channel rawdata;
//filling up the shared memory, this should have nothing to do with b_idx
// int temp_idx=t_idx; // this is for filling the shared memory
// int t;
// while(temp_idx<N_data){
// // t=rawdata[chn_idx+temp_idx*Nchn];
// shared_data[temp_idx]=rawdata[chn_idx+temp_idx*Nchn]; //1channel,all thread is involved to acceletate
// temp_idx+=BlockSize;
//
// }
// __syncthreads();
//Data transfer complete,data now transferd to shared memory
//get the actural cordinate by tim_idx and ref_idx
int temp_idx=t_idx+b_idx*BlockSize; //get the current abs cord.
if(temp_idx<Nvec*Nstep){ //theradsperblock
ref_idx=(int) temp_idx/(Nvec);//stp
tim_idx=temp_idx % (Nvec);//vec_idx
// //------------------------------calculating the distance---------------------------------
int tim;
int refe;
double temp=0;
double len=0;
//
refe=vref[ref_idx]; //matlab1c0 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
tim=refe+tim_range[tim_idx]; //tim_range=w1:w2;!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
for(int ebd=0;ebd<embedding;ebd++){
len=rawdata[chn_idx+(refe+ebd*sampling_delay)*Nchn]-rawdata[chn_idx+(tim+ebd*sampling_delay)*Nchn];
temp+=len*len;
// temp=shared_data[N_data-1];//shared data OK!
// temp=tim;//201-202????
}
// if(t_idx+b_idx*BlockSize<N_data){
// dist[t_idx+b_idx*BlockSize]=shared_data[t_idx+b_idx*BlockSize];
// }
dist[ref_idx+tim_idx*Nstep+chn_idx*Nvec*Nstep]=temp;//shared memory would work through block
}
}
void __device__ gpu_sort(double *min_num,double add_num,int N_small){ //pick up smallest N_small values
// int *temp=new temp[N_small+1];
double max_v=add_num;
double temp;
for(int i=0;i<N_small;i++){
if(min_num[i]>max_v){
temp=min_num[i];
min_num[i]=max_v;
max_v=temp;
}
}
}
double __device__ gpu_max(double * data, int N){
double max_v=data[0];
for(int i=1;i<N;i++){
if(data[i]>max_v){
max_v=data[i];
}
}
return max_v;
}
void __global__ GPUprctile(double * dist,double * cdist,int N_small,int Nstep,int Nvec){
int idx_stepA=threadIdx.x;
int idx_stepB=blockIdx.x;
int chn_idx=blockIdx.y;
int stp_idx=idx_stepA+BlockSize*idx_stepB;
double min_num[NPRCMAX];
for(int i=0;i<N_small;i++){
min_num[i]=1.79769e+308;
}
if(stp_idx<Nstep){
for(int i=0;i<Nvec;i++){
gpu_sort(min_num,dist[stp_idx+i*Nstep+chn_idx*Nvec*Nstep],N_small);
}
cdist[stp_idx+chn_idx*Nstep]=gpu_max(min_num,N_small);
}
__syncthreads();
}
/*---------------------------------------Convert distace into pattern-------------*/
void __global__ CalPattern(int * pattern, double * dist,double * cdist,int Nvec,int Nstep){
//-----------------------calculating pattern---------------------------
int t_idx=threadIdx.x; //this is the vector idx plus step idx
int b_idx=blockIdx.x;// the number of single channel's block idx
int chn_idx=blockIdx.y;//this is the channel idx
int tim_idx;//keep the same with cal_dist_3
int ref_idx;
int temp_idx=t_idx+b_idx*BlockSize; // get the current abs cord.
if(temp_idx<Nvec*Nstep){
ref_idx=(int) temp_idx/Nvec; //stp
tim_idx=temp_idx % Nvec;//vec_idx
pattern[ref_idx+tim_idx*Nstep+chn_idx*Nvec*Nstep]=(dist[ref_idx+tim_idx*Nstep+chn_idx*Nvec*Nstep]<=cdist[ref_idx+chn_idx*Nstep]);
}
}
/*---------------------------------------Convert pattern into SL matrix-------------*/
void __global__ SLGen(double * SL, int * pattern,const double * vref,double Nvec,double Nstep,double Nchn,double Nstart,double delays_0){
int tim_idx=threadIdx.x; //vecs location,this can not be longer than 1024
// int step_idx=blockIdx.y; //idxstartpointdelay
int stp=blockIdx.y;
int dly=blockIdx.z;
int chn=blockIdx.x;// channel number A
// int y=blockIdx.y;// channel number B
int x=chn/(int)Nchn;
int y=chn%(int)Nchn;
int synE=0;
int allE=0;
__shared__ int syn[2*BlockSize+1]; //0
// int dly=step_idx/(int)Nstart;
// int stp=step_idx%(int)Nstart;
int A=pattern[(int)(stp-delays_0+tim_idx*Nstep+x*Nvec*Nstep)];
int B=pattern[(int)((stp+dly)+tim_idx*Nstep+y*Nvec*Nstep)];
// int B=pattern[(int)(tim_idx+(stp+dly)*Nvec+y*Nvec*Nstep)];
syn[2*tim_idx]=A&B;
syn[1+2*tim_idx]=A+B;
__syncthreads();
if(tim_idx==0){
for(int idx=0;idx<Nvec;idx++){
synE+=syn[2*idx];
allE +=syn[1+2*idx];
}
if(allE!=0){
SL[(int)(x+y*Nchn+stp*Nchn*Nchn+dly*Nstart*Nchn*Nchn)]=2*((double) synE/ (double) allE);
}
else{
SL[(int)(x+y*Nchn+stp*Nchn*Nchn+dly*Nstart*Nchn*Nchn)]=0;
}
}
__syncthreads();
}
/*
* Host code----------------------------------------------MAIN FUNCTION--------------------------------------------
-----------------------------------------------------------------------------------------------------------------------*/
void mexFunction(int nlhs,mxArray *plhs[],int nrhs,mxArray const * prhs[]){
//(rawdata,w1,w2,embedding,sampling_delay,p_ref,stps,delays)
//plhs=output,need mxCreateDoubleMatrix to assign the space
int const threadsPerBlock=BlockSize;
/* Declare all variables.*/
mxGPUArray const *rawdata;
mxGPUArray * dist;
mxGPUArray * pattern;
mxGPUArray * SL;
mxGPUArray * cdist;
mxGPUArray const * GVref;
mxGPUArray const * Gtim;
// mxGPUArray * min_num;
mxArray * vref;
mxArray * tim;
// mxArray * para;
// mxArray * ref_out;
// mxArray * tim_out;
// mxArray * stp_out;
double * p_vref;
double * p_tim;
double const * p_rawdata;
double * p_dist;
int * p_pattern;
double * p_SL;
double * p_cdist;
double const * p_GVref;
double const * p_Gtim;
// double * p_min_num;
int Nchn;
int Datalen;
int Nvec;
int Nstep;
// char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
// char const * const errMsg = "Invalid input to MEX file.";
//initialize the MATLAB gpu API
mxInitGPU();
/*--------------------------------testing input parameters-----------------------------*/
/* Throw an error if the input is not a GPU array. */
// if ((nrhs!=8) || !(mxIsGPUArray(prhs[0]))) {
// mexErrMsgIdAndTxt(errId, errMsg);
// }
/*--------------------------------Creating mex vars----------------------------*/
//(rawdata,w1,w2,embedding,sampling_delay,p_ref,stps,delays)
Nchn=mxGetM(prhs[0]);
Datalen=mxGetN(prhs[0]);
rawdata=mxGPUCreateFromMxArray(prhs[0]); /*Create read-only mxGPUArray object from input mxArray
* ???????????????????const
*data transfered to GPUarray*/
p_rawdata=(double const *)(mxGPUGetDataReadOnly(rawdata));//Read-only raw pointer to underlying data
double w1=mxGetScalar(prhs[1]);
double w2=mxGetScalar(prhs[2]);
double embedding=mxGetScalar(prhs[3]);
double sampling_delay=mxGetScalar(prhs[4]);
double p_ref=mxGetScalar(prhs[5]);
double * stps=mxGetPr(prhs[6]);
double * delays=mxGetPr(prhs[7]);
int Nstart=mxGetN(prhs[6]);
int Ndelay=mxGetN(prhs[7]);
Nvec=(int) (w2-w1-(embedding-1)*sampling_delay+1)*2;
Nstep=Nstart+Ndelay;
int N_small=((double) Nvec)*p_ref;
int temp_size[]={Nchn*Nvec*Nstep};
/* Create a GPUArray to hold the result and get its underlying pointer. */
dist=mxGPUCreateGPUArray(1,
(const mwSize *)temp_size,//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
mxDOUBLE_CLASS,
mxREAL,
MX_GPU_INITIALIZE_VALUES); //
p_dist=(double * )mxGPUGetData(dist); //???????????????
pattern=mxGPUCreateGPUArray(1,
(const mwSize *)temp_size,
mxINT32_CLASS, //_________________________________________caution
mxREAL,
MX_GPU_INITIALIZE_VALUES); //
p_pattern=(int * )mxGPUGetData(pattern);
temp_size[0]=Nchn*Nchn*Nstart*Ndelay;
SL=mxGPUCreateGPUArray(1,
(const mwSize *)temp_size,
mxDOUBLE_CLASS,
mxREAL,
MX_GPU_INITIALIZE_VALUES); //
p_SL=(double * )mxGPUGetData(SL);
temp_size[0]=Nchn*Nstep;
cdist=mxGPUCreateGPUArray(1,
(const mwSize *)temp_size,//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
mxDOUBLE_CLASS,
mxREAL,
MX_GPU_INITIALIZE_VALUES); //
p_cdist=(double * )mxGPUGetData(cdist);
// temp_size[0]=N_small;
// min_num=mxGPUCreateGPUArray(1,
// (const mwSize *)temp_size,//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// mxDOUBLE_CLASS,
// mxREAL,
// MX_GPU_INITIALIZE_VALUES); //
// p_min_num=(double * )mxGPUGetData(min_num);
/*--------------------------------establish output variables---------------------------*/
// plhs[0]=mxCreateCellMatrix(10,1);
plhs[0]=mxCreateDoubleMatrix(temp_size[0],1,mxREAL);
//nlhs doesn't count?
/*--------------------------------main program--------------------------------------*/
// int blocksPerGrid=Nchn*(1+Nvec*Nstep/threadsPerBlock);
// int tim_size=2*(w2-w1-(embedding-1)*sampling_delay+1);
vref=mxCreateDoubleMatrix(Nstep,1,mxREAL);
tim=mxCreateDoubleMatrix(Nvec,1,mxREAL);
p_vref=mxGetPr(vref);
p_tim=mxGetPr(tim);
//step,stpdly0stps>1
int inc=stps[1]-stps[0];
for(int idx=0;idx<Nstep;idx++){
p_vref[idx]=inc*idx+w2+stps[0]+delays[0];//location of Ref vector, should be stps+w2
}
for(int idx=0;idx<Nvec;idx++){ //[-w2:-w1-ebd-1)*sampling_delay,w1:w2-(ebd-1)*sampling_delay
if(idx<Nvec/2){
p_tim[idx]=idx-w2;
}
else{
p_tim[idx]=w1+idx-Nvec/2;
}
}
GVref=mxGPUCreateFromMxArray(vref);
Gtim=mxGPUCreateFromMxArray(tim);
p_GVref=(double const *)(mxGPUGetDataReadOnly(GVref));
p_Gtim=(double const *)(mxGPUGetDataReadOnly(Gtim));
dim3 blocks(1+Nvec*Nstep/threadsPerBlock,Nchn);
hipLaunchKernelGGL(( CalDistance), dim3(blocks),dim3(threadsPerBlock), 0, 0, p_dist,p_rawdata,Datalen,Nchn,p_GVref,p_Gtim,(int) embedding,(int) sampling_delay,Nvec,Nstep);
mxGPUDestroyGPUArray(rawdata);
dim3 blocks_C(1+Nstep/threadsPerBlock,Nchn);
hipLaunchKernelGGL(( GPUprctile), dim3(blocks_C),dim3(threadsPerBlock), 0, 0, p_dist,p_cdist,N_small,Nstep,Nvec);
//calculate the critical distance in CPU
hipLaunchKernelGGL(( CalPattern), dim3(blocks),dim3(threadsPerBlock), 0, 0, p_pattern,p_dist,p_cdist,Nvec,Nstep);
mxGPUDestroyGPUArray(cdist);
mxGPUDestroyGPUArray(dist);
dim3 blocks_SL(Nchn*Nchn,Nstart,Ndelay);
hipLaunchKernelGGL(( SLGen), dim3(blocks_SL),dim3(Nvec), 0, 0, p_SL,p_pattern,p_GVref,Nvec,Nstep,Nchn,Nstart,delays[0]/inc);
mxGPUDestroyGPUArray(pattern);
//dist[ref_idx+tim_idx*Nvec+chn_idx*Nvec*Nstep]
//pattern[ref_idx+tim_idx*Nvec+chn_idx*Nvec*Nstep]
//SL[x+y*Nchn]
//convert the SL into CELL
// para=mxCreateDoubleMatrix(6,1,mxREAL);//
// ref_out=mxCreateDoubleMatrix(Nstart,1,mxREAL);
// tim_out=mxCreateDoubleMatrix(tim_size,1,mxREAL);
// stp_out=mxCreateDoubleMatrix(Nstart,1,mxREAL);
// double * para_C=mxGetPr(para);
// double * para_CR=mxGetPr(ref_out);
// double * para_CT=mxGetPr(tim_out);
// double * para_stp=mxGetPr(stp_out);
// double temp[]={Datalen,Nchn,temp_size[0],Nvec,blocksPerGrid,threadsPerBlock};
// para_C[0]=Datalen;
// para_C[1]=Nchn;
// para_C[2]=embedding;
// para_C[3]=sampling_delay;
// para_C[4]=Nvec;
// para_C[5]=Nstep;
// memcpy(para_C,temp,sizeof(temp));
// memcpy(para_CR,p_vref,Nstep*sizeof(double));
// memcpy(para_CT,p_tim,tim_size*sizeof(double));
// memcpy(para_stp,stps,Nstart*sizeof(double));
plhs[0]=mxGPUCreateMxArrayOnCPU(SL);
// mxSetCell(plhs[0],0,mxGPUCreateMxArrayOnCPU(SL)); //testing by output to 1
// mxSetCell(plhs[0],1,mxGPUCreateMxArrayOnCPU(rawdata)); //testing by output to 1
// mxSetCell(plhs[0],2,para);
// mxSetCell(plhs[0],3,vref);
// mxSetCell(plhs[0],4,tim);
// mxSetCell(plhs[0],3,ref_out);
// mxSetCell(plhs[0],4,tim_out);
// mxSetCell(plhs[0],5,mxGPUCreateMxArrayOnCPU(dist)); //testing by output to 1
// mxSetCell(plhs[0],6,mxGPUCreateMxArrayOnCPU(cdist)); //testing by output to 1
// mxSetCell(plhs[0],7,mxGPUCreateMxArrayOnCPU(pattern)); //testing by output to 1
// mxSetCell(plhs[0],8,stp_out);
// mxArray * SL_CPU=mxGPUCreateMxArrayOnCPU(SL);
//mxArray * SL_each=mxCreateDoubleMatrix(Nstart,Ndelay,mxREAL);
// double * p_SL_each=mxGetPr(SL_each);
// double * p_SL_CPU=mxGetPr(SL_CPU);
// for(int A=0;A<Nchn;A++){
// for(int B=0;B<Nchn;B++){
// for(int x=0;x<Nstart;x++){
// for(int y=0;y<Ndelay;y++){
// p_SL_each[x+y*Nstart]=p_SL_CPU[A+B*Nchn+x*Nchn*Nchn+y*Nchn*Nchn*Nstart];
// }
// }
// mxSetCell(plhs[0],8+A+B*Nchn,SL_each);
// }
// }
/*--------------------------------release resource------------------------------------*/
// delete[] vref;
// delete[] tim;
// delete[] temp_size;
// mxGPUDestroyGPUArray(dist);
// mxGPUDestroyGPUArray(pattern);
// mxGPUDestroyGPUArray(rawdata);
// mxGPUDestroyGPUArray(cdist);
mxGPUDestroyGPUArray(SL);
// mxDestroyArray(vref);
// mxDestroyArray(tim);
// mxDestroyArray(para);
// mxDestroyArray(ref_out);
// mxDestroyArray(tim_out);
// mxDestroyArray(stp_out);
// mxGPUDestroyGPUArray(min_num);
mxGPUDestroyGPUArray(GVref);
mxGPUDestroyGPUArray(Gtim);
// hipDeviceReset();
return;
} | cc9a906920df604967edacb5c24a6193eed40397.cu | # pragma warning (disable:4819)
#include "mex.h"
// #include "cuda.h"
// #include "cuda_runtime.h"
#include "gpu\mxGPUArray.h"
#define BlockSize 1024
#define NPRCMAX 1000
//Device code 1 for single channel distance calculation
void __global__ CalDistance(double * dist,const double * rawdata,int N_data,int Nchn,const double * vref,const double * tim_range, int embedding,int sampling_delay,int Nvec,int Nstep){
/*现在得到的数据为一段时间的连续的波形
*根据不同的step和window大小来判断需要计算的thread数量
*一个step里要计算w2-w1个向量的distance,在同一通道内分为step步,另有channel可以划分在block中
*
*/
int t_idx=threadIdx.x; //this is the vector idx plus step idx
int b_idx=blockIdx.x;// the number of single channel's block idx
int chn_idx=blockIdx.y;//this is the channel idx
int tim_idx;//keep the same with cal_dist_3
int ref_idx; //startpoint
// extern __shared__ double shared_data[]; //should be size of single channel rawdata;
//filling up the shared memory, this should have nothing to do with b_idx
// int temp_idx=t_idx; // this is for filling the shared memory
// int t;
// while(temp_idx<N_data){
// // t=rawdata[chn_idx+temp_idx*Nchn];
// shared_data[temp_idx]=rawdata[chn_idx+temp_idx*Nchn]; //将1个channel的数据分配到共享内存中,all thread is involved to acceletate
// temp_idx+=BlockSize;
//
// }
// __syncthreads();
//Data transfer complete,data now transferd to shared memory
//get the actural cordinate by tim_idx and ref_idx
int temp_idx=t_idx+b_idx*BlockSize; //get the current abs cord.
if(temp_idx<Nvec*Nstep){ //因为theradsperblock是固定的,因此会有线程块并不参与后期计算,只参与共享内存分配
ref_idx=(int) temp_idx/(Nvec);//stp
tim_idx=temp_idx % (Nvec);//vec_idx
// //------------------------------calculating the distance---------------------------------
int tim;
int refe;
double temp=0;
double len=0;
//计算所计算的向量在原始数据中的绝对索引
refe=vref[ref_idx]; //matlab索引从1开始,c从0开始 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
tim=refe+tim_range[tim_idx]; //tim_range=w1:w2;!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
for(int ebd=0;ebd<embedding;ebd++){
len=rawdata[chn_idx+(refe+ebd*sampling_delay)*Nchn]-rawdata[chn_idx+(tim+ebd*sampling_delay)*Nchn];
temp+=len*len;
// temp=shared_data[N_data-1];//shared data OK!
// temp=tim;//201-202????
}
// if(t_idx+b_idx*BlockSize<N_data){
// dist[t_idx+b_idx*BlockSize]=shared_data[t_idx+b_idx*BlockSize];
// }
dist[ref_idx+tim_idx*Nstep+chn_idx*Nvec*Nstep]=temp;//shared memory would work through block
}
}
void __device__ gpu_sort(double *min_num,double add_num,int N_small){ //pick up smallest N_small values
// int *temp=new temp[N_small+1];
double max_v=add_num;
double temp;
for(int i=0;i<N_small;i++){
if(min_num[i]>max_v){
temp=min_num[i];
min_num[i]=max_v;
max_v=temp;
}
}
}
double __device__ gpu_max(double * data, int N){
double max_v=data[0];
for(int i=1;i<N;i++){
if(data[i]>max_v){
max_v=data[i];
}
}
return max_v;
}
void __global__ GPUprctile(double * dist,double * cdist,int N_small,int Nstep,int Nvec){
int idx_stepA=threadIdx.x;
int idx_stepB=blockIdx.x;
int chn_idx=blockIdx.y;
int stp_idx=idx_stepA+BlockSize*idx_stepB;
double min_num[NPRCMAX];
for(int i=0;i<N_small;i++){
min_num[i]=1.79769e+308;
}
if(stp_idx<Nstep){
for(int i=0;i<Nvec;i++){
gpu_sort(min_num,dist[stp_idx+i*Nstep+chn_idx*Nvec*Nstep],N_small);
}
cdist[stp_idx+chn_idx*Nstep]=gpu_max(min_num,N_small);
}
__syncthreads();
}
/*---------------------------------------Convert distace into pattern-------------*/
void __global__ CalPattern(int * pattern, double * dist,double * cdist,int Nvec,int Nstep){
//-----------------------calculating pattern---------------------------
int t_idx=threadIdx.x; //this is the vector idx plus step idx
int b_idx=blockIdx.x;// the number of single channel's block idx
int chn_idx=blockIdx.y;//this is the channel idx
int tim_idx;//keep the same with cal_dist_3
int ref_idx;
int temp_idx=t_idx+b_idx*BlockSize; // get the current abs cord.
if(temp_idx<Nvec*Nstep){
ref_idx=(int) temp_idx/Nvec; //stp
tim_idx=temp_idx % Nvec;//vec_idx
pattern[ref_idx+tim_idx*Nstep+chn_idx*Nvec*Nstep]=(dist[ref_idx+tim_idx*Nstep+chn_idx*Nvec*Nstep]<=cdist[ref_idx+chn_idx*Nstep]);
}
}
/*---------------------------------------Convert pattern into SL matrix-------------*/
void __global__ SLGen(double * SL, int * pattern,const double * vref,double Nvec,double Nstep,double Nchn,double Nstart,double delays_0){
int tim_idx=threadIdx.x; //vecs location,this can not be longer than 1024
// int step_idx=blockIdx.y; //这个idx是startpoint和delay的混合一维,需要编码
int stp=blockIdx.y;
int dly=blockIdx.z;
int chn=blockIdx.x;// channel number A
// int y=blockIdx.y;// channel number B
int x=chn/(int)Nchn;
int y=chn%(int)Nchn;
int synE=0;
int allE=0;
__shared__ int syn[2*BlockSize+1]; //需要检查初始化是否为0
// int dly=step_idx/(int)Nstart;
// int stp=step_idx%(int)Nstart;
int A=pattern[(int)(stp-delays_0+tim_idx*Nstep+x*Nvec*Nstep)];
int B=pattern[(int)((stp+dly)+tim_idx*Nstep+y*Nvec*Nstep)];
// int B=pattern[(int)(tim_idx+(stp+dly)*Nvec+y*Nvec*Nstep)];
syn[2*tim_idx]=A&B;
syn[1+2*tim_idx]=A+B;
__syncthreads();
if(tim_idx==0){
for(int idx=0;idx<Nvec;idx++){
synE+=syn[2*idx];
allE +=syn[1+2*idx];
}
if(allE!=0){
SL[(int)(x+y*Nchn+stp*Nchn*Nchn+dly*Nstart*Nchn*Nchn)]=2*((double) synE/ (double) allE);
}
else{
SL[(int)(x+y*Nchn+stp*Nchn*Nchn+dly*Nstart*Nchn*Nchn)]=0;
}
}
__syncthreads();
}
/*
* Host code----------------------------------------------MAIN FUNCTION--------------------------------------------
-----------------------------------------------------------------------------------------------------------------------*/
void mexFunction(int nlhs,mxArray *plhs[],int nrhs,mxArray const * prhs[]){
//(rawdata,w1,w2,embedding,sampling_delay,p_ref,stps,delays)
//plhs=output,need mxCreateDoubleMatrix to assign the space
int const threadsPerBlock=BlockSize;
/* Declare all variables.*/
mxGPUArray const *rawdata;
mxGPUArray * dist;
mxGPUArray * pattern;
mxGPUArray * SL;
mxGPUArray * cdist;
mxGPUArray const * GVref;
mxGPUArray const * Gtim;
// mxGPUArray * min_num;
mxArray * vref;
mxArray * tim;
// mxArray * para;
// mxArray * ref_out;
// mxArray * tim_out;
// mxArray * stp_out;
double * p_vref;
double * p_tim;
double const * p_rawdata;
double * p_dist;
int * p_pattern;
double * p_SL;
double * p_cdist;
double const * p_GVref;
double const * p_Gtim;
// double * p_min_num;
int Nchn;
int Datalen;
int Nvec;
int Nstep;
// char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
// char const * const errMsg = "Invalid input to MEX file.";
//initialize the MATLAB gpu API
mxInitGPU();
/*--------------------------------testing input parameters-----------------------------*/
/* Throw an error if the input is not a GPU array. */
// if ((nrhs!=8) || !(mxIsGPUArray(prhs[0]))) {
// mexErrMsgIdAndTxt(errId, errMsg);
// }
/*--------------------------------Creating mex vars----------------------------*/
//(rawdata,w1,w2,embedding,sampling_delay,p_ref,stps,delays)
Nchn=mxGetM(prhs[0]);
Datalen=mxGetN(prhs[0]);
rawdata=mxGPUCreateFromMxArray(prhs[0]); /*Create read-only mxGPUArray object from input mxArray
* ???????????????????const的原因
*data transfered to GPUarray*/
p_rawdata=(double const *)(mxGPUGetDataReadOnly(rawdata));//Read-only raw pointer to underlying data
double w1=mxGetScalar(prhs[1]);
double w2=mxGetScalar(prhs[2]);
double embedding=mxGetScalar(prhs[3]);
double sampling_delay=mxGetScalar(prhs[4]);
double p_ref=mxGetScalar(prhs[5]);
double * stps=mxGetPr(prhs[6]);
double * delays=mxGetPr(prhs[7]);
int Nstart=mxGetN(prhs[6]);
int Ndelay=mxGetN(prhs[7]);
Nvec=(int) (w2-w1-(embedding-1)*sampling_delay+1)*2;
Nstep=Nstart+Ndelay;
int N_small=((double) Nvec)*p_ref;
int temp_size[]={Nchn*Nvec*Nstep};
/* Create a GPUArray to hold the result and get its underlying pointer. */
dist=mxGPUCreateGPUArray(1,
(const mwSize *)temp_size,//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!不一定正确
mxDOUBLE_CLASS,
mxREAL,
MX_GPU_INITIALIZE_VALUES); //最后一行决定是否初始化
p_dist=(double * )mxGPUGetData(dist); //???????????????为什么需要强制转换??
pattern=mxGPUCreateGPUArray(1,
(const mwSize *)temp_size,
mxINT32_CLASS, //_________________________________________caution
mxREAL,
MX_GPU_INITIALIZE_VALUES); //最后一行决定是否初始化
p_pattern=(int * )mxGPUGetData(pattern);
temp_size[0]=Nchn*Nchn*Nstart*Ndelay;
SL=mxGPUCreateGPUArray(1,
(const mwSize *)temp_size,
mxDOUBLE_CLASS,
mxREAL,
MX_GPU_INITIALIZE_VALUES); //最后一行决定是否初始化
p_SL=(double * )mxGPUGetData(SL);
temp_size[0]=Nchn*Nstep;
cdist=mxGPUCreateGPUArray(1,
(const mwSize *)temp_size,//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!不一定正确
mxDOUBLE_CLASS,
mxREAL,
MX_GPU_INITIALIZE_VALUES); //最后一行决定是否初始化
p_cdist=(double * )mxGPUGetData(cdist);
// temp_size[0]=N_small;
// min_num=mxGPUCreateGPUArray(1,
// (const mwSize *)temp_size,//!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!不一定正确
// mxDOUBLE_CLASS,
// mxREAL,
// MX_GPU_INITIALIZE_VALUES); //最后一行决定是否初始化
// p_min_num=(double * )mxGPUGetData(min_num);
/*--------------------------------establish output variables---------------------------*/
// plhs[0]=mxCreateCellMatrix(10,1);
plhs[0]=mxCreateDoubleMatrix(temp_size[0],1,mxREAL);
//nlhs doesn't count?
/*--------------------------------main program--------------------------------------*/
// int blocksPerGrid=Nchn*(1+Nvec*Nstep/threadsPerBlock);
// int tim_size=2*(w2-w1-(embedding-1)*sampling_delay+1);
vref=mxCreateDoubleMatrix(Nstep,1,mxREAL);
tim=mxCreateDoubleMatrix(Nvec,1,mxREAL);
p_vref=mxGetPr(vref);
p_tim=mxGetPr(tim);
//这里需要计算出实际需要的step,假设是stp和dly都是从0开始,并且步长一样,stps长度>1
int inc=stps[1]-stps[0];
for(int idx=0;idx<Nstep;idx++){
p_vref[idx]=inc*idx+w2+stps[0]+delays[0];//location of Ref vector, should be stps+w2
}
for(int idx=0;idx<Nvec;idx++){ //[-w2:-w1-ebd-1)*sampling_delay,w1:w2-(ebd-1)*sampling_delay
if(idx<Nvec/2){
p_tim[idx]=idx-w2;
}
else{
p_tim[idx]=w1+idx-Nvec/2;
}
}
GVref=mxGPUCreateFromMxArray(vref);
Gtim=mxGPUCreateFromMxArray(tim);
p_GVref=(double const *)(mxGPUGetDataReadOnly(GVref));
p_Gtim=(double const *)(mxGPUGetDataReadOnly(Gtim));
dim3 blocks(1+Nvec*Nstep/threadsPerBlock,Nchn);
CalDistance<<<blocks,threadsPerBlock>>>(p_dist,p_rawdata,Datalen,Nchn,p_GVref,p_Gtim,(int) embedding,(int) sampling_delay,Nvec,Nstep);
mxGPUDestroyGPUArray(rawdata);
dim3 blocks_C(1+Nstep/threadsPerBlock,Nchn);
GPUprctile<<<blocks_C,threadsPerBlock>>>(p_dist,p_cdist,N_small,Nstep,Nvec);
//calculate the critical distance in CPU
CalPattern<<<blocks,threadsPerBlock>>>(p_pattern,p_dist,p_cdist,Nvec,Nstep);
mxGPUDestroyGPUArray(cdist);
mxGPUDestroyGPUArray(dist);
dim3 blocks_SL(Nchn*Nchn,Nstart,Ndelay);
SLGen<<<blocks_SL,Nvec>>>(p_SL,p_pattern,p_GVref,Nvec,Nstep,Nchn,Nstart,delays[0]/inc);
mxGPUDestroyGPUArray(pattern);
//dist[ref_idx+tim_idx*Nvec+chn_idx*Nvec*Nstep]
//pattern[ref_idx+tim_idx*Nvec+chn_idx*Nvec*Nstep]
//SL[x+y*Nchn]
//convert the SL into CELL
// para=mxCreateDoubleMatrix(6,1,mxREAL);//创建一大片内存,提供指向地址的指针
// ref_out=mxCreateDoubleMatrix(Nstart,1,mxREAL);
// tim_out=mxCreateDoubleMatrix(tim_size,1,mxREAL);
// stp_out=mxCreateDoubleMatrix(Nstart,1,mxREAL);
// double * para_C=mxGetPr(para);
// double * para_CR=mxGetPr(ref_out);
// double * para_CT=mxGetPr(tim_out);
// double * para_stp=mxGetPr(stp_out);
// double temp[]={Datalen,Nchn,temp_size[0],Nvec,blocksPerGrid,threadsPerBlock};
// para_C[0]=Datalen;
// para_C[1]=Nchn;
// para_C[2]=embedding;
// para_C[3]=sampling_delay;
// para_C[4]=Nvec;
// para_C[5]=Nstep;
// memcpy(para_C,temp,sizeof(temp));
// memcpy(para_CR,p_vref,Nstep*sizeof(double));
// memcpy(para_CT,p_tim,tim_size*sizeof(double));
// memcpy(para_stp,stps,Nstart*sizeof(double));
plhs[0]=mxGPUCreateMxArrayOnCPU(SL);
// mxSetCell(plhs[0],0,mxGPUCreateMxArrayOnCPU(SL)); //testing by output to 1
// mxSetCell(plhs[0],1,mxGPUCreateMxArrayOnCPU(rawdata)); //testing by output to 1
// mxSetCell(plhs[0],2,para);
// mxSetCell(plhs[0],3,vref);
// mxSetCell(plhs[0],4,tim);
// mxSetCell(plhs[0],3,ref_out);
// mxSetCell(plhs[0],4,tim_out);
// mxSetCell(plhs[0],5,mxGPUCreateMxArrayOnCPU(dist)); //testing by output to 1
// mxSetCell(plhs[0],6,mxGPUCreateMxArrayOnCPU(cdist)); //testing by output to 1
// mxSetCell(plhs[0],7,mxGPUCreateMxArrayOnCPU(pattern)); //testing by output to 1
// mxSetCell(plhs[0],8,stp_out);
// mxArray * SL_CPU=mxGPUCreateMxArrayOnCPU(SL);
//mxArray * SL_each=mxCreateDoubleMatrix(Nstart,Ndelay,mxREAL);
// double * p_SL_each=mxGetPr(SL_each);
// double * p_SL_CPU=mxGetPr(SL_CPU);
// for(int A=0;A<Nchn;A++){
// for(int B=0;B<Nchn;B++){
// for(int x=0;x<Nstart;x++){
// for(int y=0;y<Ndelay;y++){
// p_SL_each[x+y*Nstart]=p_SL_CPU[A+B*Nchn+x*Nchn*Nchn+y*Nchn*Nchn*Nstart];
// }
// }
// mxSetCell(plhs[0],8+A+B*Nchn,SL_each);
// }
// }
/*--------------------------------release resource------------------------------------*/
// delete[] vref;
// delete[] tim;
// delete[] temp_size;
// mxGPUDestroyGPUArray(dist);
// mxGPUDestroyGPUArray(pattern);
// mxGPUDestroyGPUArray(rawdata);
// mxGPUDestroyGPUArray(cdist);
mxGPUDestroyGPUArray(SL);
// mxDestroyArray(vref);
// mxDestroyArray(tim);
// mxDestroyArray(para);
// mxDestroyArray(ref_out);
// mxDestroyArray(tim_out);
// mxDestroyArray(stp_out);
// mxGPUDestroyGPUArray(min_num);
mxGPUDestroyGPUArray(GVref);
mxGPUDestroyGPUArray(Gtim);
// cudaDeviceReset();
return;
} |
49019192d85949c3a5cbdd0626366df60084875f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
using namespace std;
__global__
void count_samples_in_circles(float* d_randNumsX, float* d_randNumsY, int* d_countInBlocks, int num_blocks, int nsamples)
{
__shared__ int shared_blocks[500];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * num_blocks;
int inCircle = 0;
for (int i = index; i < nsamples; i+= stride) {
float xValue = d_randNumsX[i];
float yValue = d_randNumsY[i];
if (xValue*xValue + yValue*yValue <= 1.0f) {
inCircle++;
}
}
shared_blocks[threadIdx.x] = inCircle;
__syncthreads();
if (threadIdx.x == 0)
{
int totalInCircleForABlock = 0;
for (int j = 0; j < blockDim.x; j++)
{
totalInCircleForABlock += shared_blocks[j];
}
d_countInBlocks[blockIdx.x] = totalInCircleForABlock;
}
}
int nsamples;
int main(int argc, char* argv[]) {
int nsamples = atoi(argv[1]);
printf("nsamples: %d\n", nsamples);
vector<float> h_randNumsX(nsamples);
vector<float> h_randNumsY(nsamples);
srand(time(NULL));
for (int i = 0; i < h_randNumsX.size(); ++i)
{
h_randNumsX[i] = float(rand()) / RAND_MAX;
h_randNumsY[i] = float(rand()) / RAND_MAX;
}
size_t size = nsamples * sizeof(float);
float* d_randNumsX;
float* d_randNumsY;
hipMalloc(&d_randNumsX, size);
hipMalloc(&d_randNumsY, size);
hipMemcpy(d_randNumsX, &h_randNumsX.front(), size, hipMemcpyHostToDevice);
hipMemcpy(d_randNumsY, &h_randNumsY.front(), size, hipMemcpyHostToDevice);
int threadsPerBlock = 500;
int num_blocks = nsamples / (1000 * threadsPerBlock);
size_t countBlocks = num_blocks * sizeof(int);
int* d_countInBlocks;
hipMalloc(&d_countInBlocks, countBlocks);
hipLaunchKernelGGL(( count_samples_in_circles), dim3(num_blocks), dim3(threadsPerBlock), 0, 0, d_randNumsX, d_randNumsY, d_countInBlocks, num_blocks, nsamples);
if ( hipSuccess != hipGetLastError() )
cout << "Error!\n";
int* h_countInBlocks = new int[num_blocks];
hipMemcpy(h_countInBlocks, d_countInBlocks, countBlocks, hipMemcpyDeviceToHost);
int nsamples_in_circle = 0;
for (int i = 0 ; i < num_blocks; i++) {
nsamples_in_circle = nsamples_in_circle + h_countInBlocks[i];
}
hipFree(d_randNumsX);
hipFree(d_randNumsY);
hipFree(d_countInBlocks);
float estimatedValue = 4.0 * float(nsamples_in_circle) / nsamples;
cout << "Estimated Value: " << estimatedValue << endl;
}
| 49019192d85949c3a5cbdd0626366df60084875f.cu | #include <iostream>
#include <vector>
using namespace std;
__global__
void count_samples_in_circles(float* d_randNumsX, float* d_randNumsY, int* d_countInBlocks, int num_blocks, int nsamples)
{
__shared__ int shared_blocks[500];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * num_blocks;
int inCircle = 0;
for (int i = index; i < nsamples; i+= stride) {
float xValue = d_randNumsX[i];
float yValue = d_randNumsY[i];
if (xValue*xValue + yValue*yValue <= 1.0f) {
inCircle++;
}
}
shared_blocks[threadIdx.x] = inCircle;
__syncthreads();
if (threadIdx.x == 0)
{
int totalInCircleForABlock = 0;
for (int j = 0; j < blockDim.x; j++)
{
totalInCircleForABlock += shared_blocks[j];
}
d_countInBlocks[blockIdx.x] = totalInCircleForABlock;
}
}
int nsamples;
int main(int argc, char* argv[]) {
int nsamples = atoi(argv[1]);
printf("nsamples: %d\n", nsamples);
vector<float> h_randNumsX(nsamples);
vector<float> h_randNumsY(nsamples);
srand(time(NULL));
for (int i = 0; i < h_randNumsX.size(); ++i)
{
h_randNumsX[i] = float(rand()) / RAND_MAX;
h_randNumsY[i] = float(rand()) / RAND_MAX;
}
size_t size = nsamples * sizeof(float);
float* d_randNumsX;
float* d_randNumsY;
cudaMalloc(&d_randNumsX, size);
cudaMalloc(&d_randNumsY, size);
cudaMemcpy(d_randNumsX, &h_randNumsX.front(), size, cudaMemcpyHostToDevice);
cudaMemcpy(d_randNumsY, &h_randNumsY.front(), size, cudaMemcpyHostToDevice);
int threadsPerBlock = 500;
int num_blocks = nsamples / (1000 * threadsPerBlock);
size_t countBlocks = num_blocks * sizeof(int);
int* d_countInBlocks;
cudaMalloc(&d_countInBlocks, countBlocks);
count_samples_in_circles<<<num_blocks, threadsPerBlock>>>(d_randNumsX, d_randNumsY, d_countInBlocks, num_blocks, nsamples);
if ( cudaSuccess != cudaGetLastError() )
cout << "Error!\n";
int* h_countInBlocks = new int[num_blocks];
cudaMemcpy(h_countInBlocks, d_countInBlocks, countBlocks, cudaMemcpyDeviceToHost);
int nsamples_in_circle = 0;
for (int i = 0 ; i < num_blocks; i++) {
nsamples_in_circle = nsamples_in_circle + h_countInBlocks[i];
}
cudaFree(d_randNumsX);
cudaFree(d_randNumsY);
cudaFree(d_countInBlocks);
float estimatedValue = 4.0 * float(nsamples_in_circle) / nsamples;
cout << "Estimated Value: " << estimatedValue << endl;
}
|
ad35071467d8a7dea8b7074957a4a33ec5136fc3.hip | // !!! This is a file automatically generated by hipify!!!
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Definition Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "reference_calc.cpp"
#include "utils.h"
#include "hip/hip_runtime.h"
#define BLOCKSIZEMAX 16
#define BLOCKSIZEMAXHISTOGRAM 22
#define BLOCKSIZEMAXSCAN 512
//Modified simple histo from notes
__global__ void simple_histo(unsigned int *d_bins, const float *d_In, const unsigned int binCount, float _min, float _range, int numRows, int numCols)
{
int myId = ((blockIdx.x + (blockIdx.y * gridDim.x)) * (blockDim.x * blockDim.y)) + (threadIdx.x + (threadIdx.y * blockDim.x));
if ( myId < (numRows * numCols) )
{
float myItem = d_In[myId];
unsigned int myBin = min(static_cast<unsigned int>(binCount - 1), static_cast<unsigned int>((myItem - _min) / _range * binCount));
atomicAdd(&(d_bins[myBin]), 1);
}
else{
return;
}
}
//Found original in discussion forum... modified for a .2 speed increase by reducing logic sequences
__global__ void globalMinMax(float *d_Out, const float *d_In, int numRows, int numCols, bool firstTime)
{
__shared__ float sharedValue[2 * BLOCKSIZEMAX * BLOCKSIZEMAX];
int myId = ((blockIdx.x + (blockIdx.y * gridDim.x)) * (blockDim.x * blockDim.y)) + (threadIdx.x + (threadIdx.y * blockDim.x));
if ( myId < (numRows * numCols) )
{
if (!firstTime)
{
sharedValue[(threadIdx.y * blockDim.x + threadIdx.x)] = d_In[myId];
sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y)] = d_In[myId + (numRows * numCols)];
}
else
{
sharedValue[(threadIdx.y * blockDim.x + threadIdx.x)] = d_In[myId];
sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y)] = d_In[myId];
}
for (unsigned int s = (blockDim.x * blockDim.y) / 2; s > 0; s >>= 1)
{
if ((threadIdx.y * blockDim.x + threadIdx.x) < s && (myId + s) < (numRows * numCols))
{
sharedValue[(threadIdx.y * blockDim.x + threadIdx.x)] = min(sharedValue[(threadIdx.y * blockDim.x + threadIdx.x)], sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + s]);
sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y)] = max(sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y)], sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y) + s]);
}
__syncthreads();
}
if ((threadIdx.y * blockDim.x + threadIdx.x) == 0)
{
d_Out[myId / (blockDim.x * blockDim.y)] = d_In[myId];
d_Out[myId / (blockDim.x * blockDim.y)] = sharedValue[(threadIdx.y * blockDim.x + threadIdx.x)];
if (gridDim.x <= 1 || gridDim.y <= 1)
{
d_Out[(myId / (blockDim.x * blockDim.y)) + 1] = d_In[myId + (numRows * numCols)];
d_Out[(myId / (blockDim.x * blockDim.y)) + 1] = sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y)];
}
else
{
d_Out[(myId / (blockDim.x * blockDim.y)) + (blockDim.x * blockDim.y)] = d_In[myId + (numRows * numCols)];
d_Out[(myId / (blockDim.x * blockDim.y)) + (blockDim.x * blockDim.y)] = sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y)];
}
}
}
else{
return;
}
}
//https://github.com/steffenmartin/CUDA/blob/master/Scan/kernel.cu found cool exclusive Blelloch Scan
//was curious how would react after class discussion
//Suprisingly fast.... next step compare to inclusivew
// this really help me understadn Blelloch
//modified logic for .05 increase in speed....
__global__ void blellochScanExclusive(const unsigned int *d_In, unsigned int *d_Out, size_t size, size_t offset, bool isLastCall)
{
__shared__ unsigned int CurrentBoundaryValue;
__shared__ unsigned int finalSum;
unsigned int remember;
size_t remainingSteps = size;
unsigned int neighbor = 1;
unsigned int addTurn = 1;
__shared__ unsigned int sharedValue[BLOCKSIZEMAXSCAN];
if (threadIdx.x == 0)
{
CurrentBoundaryValue = 0;
remember = d_In[offset + size - 1];
if (offset > 0)
{
finalSum = d_Out[0] + d_Out[offset - 1];
}
}
if (threadIdx.x < size)
{
// Initial data fetch
sharedValue[threadIdx.x] = d_In[threadIdx.x + offset];
__syncthreads();
// Step 1: Adding neighbors
while (remainingSteps)
{
if ((addTurn & threadIdx.x) == addTurn)
{
sharedValue[threadIdx.x] += sharedValue[threadIdx.x - neighbor];
}
remainingSteps >>= 1;
neighbor <<= 1;
addTurn <<= 1;
addTurn++;
__syncthreads();
}
// Step 2: Down-sweep and adding neighbors again
addTurn--;
addTurn >>= 1;
neighbor >>= 1;
remainingSteps = size;
while (remainingSteps)
{
bool fillBoundary= true;
if ((addTurn & threadIdx.x) == addTurn)
{
unsigned int tempValue = sharedValue[threadIdx.x];
sharedValue[threadIdx.x] += sharedValue[threadIdx.x - neighbor];
sharedValue[threadIdx.x - neighbor] = tempValue;
fillBoundary= false;
}
__syncthreads();
unsigned int crossSweep = addTurn >> 1;
if (fillBoundary&&((addTurn & threadIdx.x) ^ crossSweep) == 0 && (threadIdx.x + neighbor) >= size)
{
sharedValue[threadIdx.x] = CurrentBoundaryValue;
CurrentBoundaryValue = CurrentBoundaryValue + sharedValue[(threadIdx.x)];
}
addTurn--;
addTurn >>= 1;
neighbor >>= 1;
remainingSteps >>= 1;
__syncthreads();
}
if (offset > 0){sharedValue[threadIdx.x] += finalSum;}
__syncthreads();
d_Out[threadIdx.x + offset] = sharedValue[threadIdx.x];
if (threadIdx.x == 0 && !isLastCall)
{
d_Out[0] = remember;
}
else
{
d_Out[0] = 0;
}
__syncthreads();
}
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
int gridSizeX = (numCols - 1) / BLOCKSIZEMAX + 1;
int gridSizeY = (numRows - 1) / BLOCKSIZEMAX + 1;
dim3 blockSize(BLOCKSIZEMAX, BLOCKSIZEMAX, 1);
dim3 gridSize(gridSizeX, gridSizeY, 1);
int numBinsLeft = numBins;
float h_MinMaxOut[2];
float *d_center;
checkCudaErrors(hipMalloc(&d_center, max((unsigned int)(2 * sizeof(float) * gridSizeX * gridSizeY), (unsigned int)(sizeof(unsigned int) * numBins))));
checkCudaErrors(hipMemset(d_center,0x0, 2 * sizeof(float) * gridSizeX * gridSizeY));
hipLaunchKernelGGL(( globalMinMax), dim3(gridSize), dim3(blockSize), 0, 0, d_center, d_logLuminance, numRows, numCols, true);
hipLaunchKernelGGL(( globalMinMax), dim3(1), dim3(blockSize), 0, 0, d_center, d_center, gridSizeX, gridSizeY, false);
checkCudaErrors(hipMemcpy(&h_MinMaxOut[0], d_center, 2 * sizeof(float),hipMemcpyDeviceToHost));
min_logLum = h_MinMaxOut[0];
max_logLum = h_MinMaxOut[1];
float lumRange = max_logLum - min_logLum;
unsigned int *d_Bins = reinterpret_cast<unsigned int *>(d_center);
checkCudaErrors(hipMemset(d_Bins, 0x0, sizeof(unsigned int) * numBins));
blockSize.x = BLOCKSIZEMAXHISTOGRAM;
blockSize.y = BLOCKSIZEMAXHISTOGRAM;
gridSize.x = (numCols - 1) / BLOCKSIZEMAXHISTOGRAM + 1;
gridSize.y = (numRows - 1) / BLOCKSIZEMAXHISTOGRAM + 1;
hipLaunchKernelGGL(( simple_histo), dim3(gridSize), dim3(blockSize), 0, 0, d_Bins, d_logLuminance, numBins, h_MinMaxOut[0], lumRange, numRows, numCols);
while (numBinsLeft)
{
blockSize.x = numBinsLeft > BLOCKSIZEMAXSCAN ? BLOCKSIZEMAXSCAN : numBinsLeft;
blockSize.y = 1;
gridSize.x = 1;
gridSize.y = 1;
hipLaunchKernelGGL(( blellochScanExclusive), dim3(gridSize), dim3(blockSize), 0, 0, d_Bins, d_cdf, blockSize.x, numBins - numBinsLeft, (numBinsLeft - blockSize.x) <= 0);
numBinsLeft -= blockSize.x;
}
} | ad35071467d8a7dea8b7074957a4a33ec5136fc3.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Definition Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "reference_calc.cpp"
#include "utils.h"
#include "cuda_runtime.h"
#define BLOCKSIZEMAX 16
#define BLOCKSIZEMAXHISTOGRAM 22
#define BLOCKSIZEMAXSCAN 512
//Modified simple histo from notes
__global__ void simple_histo(unsigned int *d_bins, const float *d_In, const unsigned int binCount, float _min, float _range, int numRows, int numCols)
{
int myId = ((blockIdx.x + (blockIdx.y * gridDim.x)) * (blockDim.x * blockDim.y)) + (threadIdx.x + (threadIdx.y * blockDim.x));
if ( myId < (numRows * numCols) )
{
float myItem = d_In[myId];
unsigned int myBin = min(static_cast<unsigned int>(binCount - 1), static_cast<unsigned int>((myItem - _min) / _range * binCount));
atomicAdd(&(d_bins[myBin]), 1);
}
else{
return;
}
}
//Found original in discussion forum... modified for a .2 speed increase by reducing logic sequences
__global__ void globalMinMax(float *d_Out, const float *d_In, int numRows, int numCols, bool firstTime)
{
__shared__ float sharedValue[2 * BLOCKSIZEMAX * BLOCKSIZEMAX];
int myId = ((blockIdx.x + (blockIdx.y * gridDim.x)) * (blockDim.x * blockDim.y)) + (threadIdx.x + (threadIdx.y * blockDim.x));
if ( myId < (numRows * numCols) )
{
if (!firstTime)
{
sharedValue[(threadIdx.y * blockDim.x + threadIdx.x)] = d_In[myId];
sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y)] = d_In[myId + (numRows * numCols)];
}
else
{
sharedValue[(threadIdx.y * blockDim.x + threadIdx.x)] = d_In[myId];
sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y)] = d_In[myId];
}
for (unsigned int s = (blockDim.x * blockDim.y) / 2; s > 0; s >>= 1)
{
if ((threadIdx.y * blockDim.x + threadIdx.x) < s && (myId + s) < (numRows * numCols))
{
sharedValue[(threadIdx.y * blockDim.x + threadIdx.x)] = min(sharedValue[(threadIdx.y * blockDim.x + threadIdx.x)], sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + s]);
sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y)] = max(sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y)], sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y) + s]);
}
__syncthreads();
}
if ((threadIdx.y * blockDim.x + threadIdx.x) == 0)
{
d_Out[myId / (blockDim.x * blockDim.y)] = d_In[myId];
d_Out[myId / (blockDim.x * blockDim.y)] = sharedValue[(threadIdx.y * blockDim.x + threadIdx.x)];
if (gridDim.x <= 1 || gridDim.y <= 1)
{
d_Out[(myId / (blockDim.x * blockDim.y)) + 1] = d_In[myId + (numRows * numCols)];
d_Out[(myId / (blockDim.x * blockDim.y)) + 1] = sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y)];
}
else
{
d_Out[(myId / (blockDim.x * blockDim.y)) + (blockDim.x * blockDim.y)] = d_In[myId + (numRows * numCols)];
d_Out[(myId / (blockDim.x * blockDim.y)) + (blockDim.x * blockDim.y)] = sharedValue[(threadIdx.y * blockDim.x + threadIdx.x) + (blockDim.x * blockDim.y)];
}
}
}
else{
return;
}
}
//https://github.com/steffenmartin/CUDA/blob/master/Scan/kernel.cu found cool exclusive Blelloch Scan
//was curious how would react after class discussion
//Suprisingly fast.... next step compare to inclusivew
// this really help me understadn Blelloch
//modified logic for .05 increase in speed....
__global__ void blellochScanExclusive(const unsigned int *d_In, unsigned int *d_Out, size_t size, size_t offset, bool isLastCall)
{
__shared__ unsigned int CurrentBoundaryValue;
__shared__ unsigned int finalSum;
unsigned int remember;
size_t remainingSteps = size;
unsigned int neighbor = 1;
unsigned int addTurn = 1;
__shared__ unsigned int sharedValue[BLOCKSIZEMAXSCAN];
if (threadIdx.x == 0)
{
CurrentBoundaryValue = 0;
remember = d_In[offset + size - 1];
if (offset > 0)
{
finalSum = d_Out[0] + d_Out[offset - 1];
}
}
if (threadIdx.x < size)
{
// Initial data fetch
sharedValue[threadIdx.x] = d_In[threadIdx.x + offset];
__syncthreads();
// Step 1: Adding neighbors
while (remainingSteps)
{
if ((addTurn & threadIdx.x) == addTurn)
{
sharedValue[threadIdx.x] += sharedValue[threadIdx.x - neighbor];
}
remainingSteps >>= 1;
neighbor <<= 1;
addTurn <<= 1;
addTurn++;
__syncthreads();
}
// Step 2: Down-sweep and adding neighbors again
addTurn--;
addTurn >>= 1;
neighbor >>= 1;
remainingSteps = size;
while (remainingSteps)
{
bool fillBoundary= true;
if ((addTurn & threadIdx.x) == addTurn)
{
unsigned int tempValue = sharedValue[threadIdx.x];
sharedValue[threadIdx.x] += sharedValue[threadIdx.x - neighbor];
sharedValue[threadIdx.x - neighbor] = tempValue;
fillBoundary= false;
}
__syncthreads();
unsigned int crossSweep = addTurn >> 1;
if (fillBoundary&&((addTurn & threadIdx.x) ^ crossSweep) == 0 && (threadIdx.x + neighbor) >= size)
{
sharedValue[threadIdx.x] = CurrentBoundaryValue;
CurrentBoundaryValue = CurrentBoundaryValue + sharedValue[(threadIdx.x)];
}
addTurn--;
addTurn >>= 1;
neighbor >>= 1;
remainingSteps >>= 1;
__syncthreads();
}
if (offset > 0){sharedValue[threadIdx.x] += finalSum;}
__syncthreads();
d_Out[threadIdx.x + offset] = sharedValue[threadIdx.x];
if (threadIdx.x == 0 && !isLastCall)
{
d_Out[0] = remember;
}
else
{
d_Out[0] = 0;
}
__syncthreads();
}
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
int gridSizeX = (numCols - 1) / BLOCKSIZEMAX + 1;
int gridSizeY = (numRows - 1) / BLOCKSIZEMAX + 1;
dim3 blockSize(BLOCKSIZEMAX, BLOCKSIZEMAX, 1);
dim3 gridSize(gridSizeX, gridSizeY, 1);
int numBinsLeft = numBins;
float h_MinMaxOut[2];
float *d_center;
checkCudaErrors(cudaMalloc(&d_center, max((unsigned int)(2 * sizeof(float) * gridSizeX * gridSizeY), (unsigned int)(sizeof(unsigned int) * numBins))));
checkCudaErrors(cudaMemset(d_center,0x0, 2 * sizeof(float) * gridSizeX * gridSizeY));
globalMinMax<<<gridSize, blockSize>>>(d_center, d_logLuminance, numRows, numCols, true);
globalMinMax<<<1, blockSize>>>(d_center, d_center, gridSizeX, gridSizeY, false);
checkCudaErrors(cudaMemcpy(&h_MinMaxOut[0], d_center, 2 * sizeof(float),cudaMemcpyDeviceToHost));
min_logLum = h_MinMaxOut[0];
max_logLum = h_MinMaxOut[1];
float lumRange = max_logLum - min_logLum;
unsigned int *d_Bins = reinterpret_cast<unsigned int *>(d_center);
checkCudaErrors(cudaMemset(d_Bins, 0x0, sizeof(unsigned int) * numBins));
blockSize.x = BLOCKSIZEMAXHISTOGRAM;
blockSize.y = BLOCKSIZEMAXHISTOGRAM;
gridSize.x = (numCols - 1) / BLOCKSIZEMAXHISTOGRAM + 1;
gridSize.y = (numRows - 1) / BLOCKSIZEMAXHISTOGRAM + 1;
simple_histo<<<gridSize, blockSize>>>(d_Bins, d_logLuminance, numBins, h_MinMaxOut[0], lumRange, numRows, numCols);
while (numBinsLeft)
{
blockSize.x = numBinsLeft > BLOCKSIZEMAXSCAN ? BLOCKSIZEMAXSCAN : numBinsLeft;
blockSize.y = 1;
gridSize.x = 1;
gridSize.y = 1;
blellochScanExclusive<<<gridSize, blockSize>>>(d_Bins, d_cdf, blockSize.x, numBins - numBinsLeft, (numBinsLeft - blockSize.x) <= 0);
numBinsLeft -= blockSize.x;
}
} |
c6e9cf664b65a3be1c6cbce8957a7535bb68c618.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data,
bool aligned) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not use rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
// When the grid is empty, output zeros == 0/1, instead of NaN.
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignBackwardFeature(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois,
bool aligned) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not use rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
namespace detectron2 {
at::Tensor ROIAlign_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
bool aligned) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty(
{num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(at::cuda::ATenCeilDiv(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] {
hipLaunchKernelGGL(( RoIAlignForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
aligned);
});
hipDeviceSynchronize();
AT_CUDA_CHECK(hipGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlign_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio,
bool aligned) {
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(at::cuda::ATenCeilDiv(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] {
hipLaunchKernelGGL(( RoIAlignBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<scalar_t>(),
aligned);
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
} // namespace detectron2
| c6e9cf664b65a3be1c6cbce8957a7535bb68c618.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data,
bool aligned) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not use rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
// When the grid is empty, output zeros == 0/1, instead of NaN.
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height,
const int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignBackwardFeature(
const int nthreads,
const T* top_diff,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois,
bool aligned) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not use rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_bottom_rois[1] * spatial_scale - offset;
T roi_start_h = offset_bottom_rois[2] * spatial_scale - offset;
T roi_end_w = offset_bottom_rois[3] * spatial_scale - offset;
T roi_end_h = offset_bottom_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) { // for backward-compatibility only
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
namespace detectron2 {
at::Tensor ROIAlign_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
bool aligned) {
AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty(
{num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(at::cuda::ATenCeilDiv(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] {
RoIAlignForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data_ptr<scalar_t>(),
output.data_ptr<scalar_t>(),
aligned);
});
cudaDeviceSynchronize();
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlign_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio,
bool aligned) {
AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "ROIAlign_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
auto num_rois = rois.size(0);
auto grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(at::cuda::ATenCeilDiv(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] {
RoIAlignBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data_ptr<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data_ptr<scalar_t>(),
rois.contiguous().data_ptr<scalar_t>(),
aligned);
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
} // namespace detectron2
|
e954182fa2f8579ad63ea2b8f13d3e75548dfb57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data,
int* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype maxval = -FLT_MAX;
int maxidx = -1;
if (bottom_data_a[index] > bottom_data_b[index]) {
// only update for very first bottom_data blob (blob_idx == 0)
if (blob_idx == 0) {
maxval = bottom_data_a[index];
top_data[index] = maxval;
maxidx = blob_idx;
mask[index] = maxidx;
}
} else {
maxval = bottom_data_b[index];
top_data[index] = maxval;
maxidx = blob_idx + 1;
mask[index] = maxidx;
}
}
}
template <typename Dtype>
void myEltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int* mask = NULL;
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
mask = max_idx_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask);
for (int i = 2; i < bottom.size(); ++i) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask);
}
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff,
const int blob_idx, const int* mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype gradient = 0;
if (mask[index] == blob_idx) {
gradient += top_diff[index];
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void myEltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
mask = max_idx_.gpu_data();
MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, i, mask, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(myEltwiseLayer);
} // namespace caffe
| e954182fa2f8579ad63ea2b8f13d3e75548dfb57.cu | #include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data,
int* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype maxval = -FLT_MAX;
int maxidx = -1;
if (bottom_data_a[index] > bottom_data_b[index]) {
// only update for very first bottom_data blob (blob_idx == 0)
if (blob_idx == 0) {
maxval = bottom_data_a[index];
top_data[index] = maxval;
maxidx = blob_idx;
mask[index] = maxidx;
}
} else {
maxval = bottom_data_b[index];
top_data[index] = maxval;
maxidx = blob_idx + 1;
mask[index] = maxidx;
}
}
}
template <typename Dtype>
void myEltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int* mask = NULL;
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
mask = max_idx_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
MaxForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask);
for (int i = 2; i < bottom.size(); ++i) {
// NOLINT_NEXT_LINE(whitespace/operators)
MaxForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask);
}
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff,
const int blob_idx, const int* mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype gradient = 0;
if (mask[index] == blob_idx) {
gradient += top_diff[index];
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void myEltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
mask = max_idx_.gpu_data();
MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, i, mask, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(myEltwiseLayer);
} // namespace caffe
|
24547345da1ffd17efeb340c6e035d913480a4d5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// updateVectorByMatrixPro.cpp :
//
#include "stdafx.h"
#include "Vertex.h"
#include "Joint.h"
#include "Vector.h"
#include "../common/stopwatch_win.h"
#include "updateVectorByMatrixPro.cuh"
#include "updateVectorByMatrix.h"
float PROBLEM_SCALE[] ={ 0.25f, 0.5f, 1, 2, 4, 8, 16, 32 }; // 8250K32M2
int PROBLEM_SIZE = MEGA_SIZE * PROBLEM_SCALE[2] ;// , 1M
int iClass=6; // 16M/1G32M/2G
//
Vertexes _vertexesStatic;//
Vertexes _vertexesDynamic;//
Joints _joints;//
//
void initialize(int problem_size, int joint_size);
//
void unInitialize();
int _tmain(int argc, _TCHAR* argv[])
{
int nRepeatPerSecond = 0;//
StopWatchWin timer;
{
// 764K256M4
PROBLEM_SIZE = MEGA_SIZE * PROBLEM_SCALE[iClass] ;
//
initialize(PROBLEM_SIZE, JOINT_SIZE);
timer.start();
while ( timer.getTime() < 10000 )
{
//
hipLaunchKernelGGL(( updateVectorByMatrix), dim3(64), dim3(256), 0, 0, _vertexesStatic.pVertexDevice, PROBLEM_SIZE, _joints.pMatrixDevice, _vertexesDynamic.pVertexDevice);
hipDeviceSynchronize();
nRepeatPerSecond ++;
}
timer.stop();
timer.reset();
// GPUCPU
bool bResult = false;
// CPU
updateVectorByMatrixGold(_vertexesStatic.pVertex, PROBLEM_SIZE, _joints.pMatrix, _vertexesDynamic.pVertex);
// GPU
Vector4 *pVertex = new Vector4[PROBLEM_SIZE];
hipMemcpy( pVertex, _vertexesDynamic.pVertexDevice, sizeof(Vector4) * PROBLEM_SIZE, hipMemcpyDeviceToHost );
//
bResult = equalVector( _vertexesDynamic.pVertex , PROBLEM_SIZE, pVertex );
printf("%s\n", bResult?"Right":"Wrong");
//
unInitialize();
//
printf("%d: F=%d, T=%.1f ms\n", iClass+1, nRepeatPerSecond/10, 10000.0f/nRepeatPerSecond);
}
//
// ...
return 0;
}
//
void initialize(int problem_size, int joint_size)
{
_joints.initialize( JOINT_SIZE );
_vertexesStatic.initialize( PROBLEM_SIZE, JOINT_SIZE );
_vertexesDynamic.initialize( PROBLEM_SIZE, JOINT_SIZE );
}
//
void unInitialize()
{
_joints.unInitialize();
_vertexesStatic.unInitialize();
_vertexesDynamic.unInitialize();
}
| 24547345da1ffd17efeb340c6e035d913480a4d5.cu | // updateVectorByMatrixPro.cpp : 定义控制台应用程序的入口点。
//
#include "stdafx.h"
#include "Vertex.h"
#include "Joint.h"
#include "Vector.h"
#include "../common/stopwatch_win.h"
#include "updateVectorByMatrixPro.cuh"
#include "updateVectorByMatrix.h"
float PROBLEM_SCALE[] ={ 0.25f, 0.5f, 1, 2, 4, 8, 16, 32 }; // 问题规模档次,8档,250K至32M,2倍递增
int PROBLEM_SIZE = MEGA_SIZE * PROBLEM_SCALE[2] ;// 问题规模, 初始设为1M,即一百万
int iClass=6; // 问题规模最大值,16M/1G显存、32M/2G显存
// 数据定义
Vertexes _vertexesStatic;//静态顶点坐标
Vertexes _vertexesDynamic;//动态顶点坐标
Joints _joints;//关节矩阵
// 数据初始化:坐标、矩阵
void initialize(int problem_size, int joint_size);
// 数据销毁:坐标、矩阵
void unInitialize();
int _tmain(int argc, _TCHAR* argv[])
{
int nRepeatPerSecond = 0;// 每秒重复次数,表示时间效率
StopWatchWin timer;
{
// 问题规模档次,7档,64K至256M,4倍递增
PROBLEM_SIZE = MEGA_SIZE * PROBLEM_SCALE[iClass] ;
// 数据初始化:坐标、矩阵
initialize(PROBLEM_SIZE, JOINT_SIZE);
timer.start();
while ( timer.getTime() < 10000 )
{
// 执行运算:坐标矩阵变换
updateVectorByMatrix<<<64, 256>>>(_vertexesStatic.pVertexDevice, PROBLEM_SIZE, _joints.pMatrixDevice, _vertexesDynamic.pVertexDevice);
cudaDeviceSynchronize();
nRepeatPerSecond ++;
}
timer.stop();
timer.reset();
// 验证GPU运算的正确性,是否和CPU运算结果一致
bool bResult = false;
// 获取CPU运算结果
updateVectorByMatrixGold(_vertexesStatic.pVertex, PROBLEM_SIZE, _joints.pMatrix, _vertexesDynamic.pVertex);
// 获取GPU运算结果
Vector4 *pVertex = new Vector4[PROBLEM_SIZE];
cudaMemcpy( pVertex, _vertexesDynamic.pVertexDevice, sizeof(Vector4) * PROBLEM_SIZE, cudaMemcpyDeviceToHost );
// 比较结果
bResult = equalVector( _vertexesDynamic.pVertex , PROBLEM_SIZE, pVertex );
printf("%s\n", bResult?"Right":"Wrong");
// 数据销毁:坐标、矩阵
unInitialize();
// 查看时间效率
printf("%d: F=%d, T=%.1f ms\n", iClass+1, nRepeatPerSecond/10, 10000.0f/nRepeatPerSecond);
}
// 输出结果:绘制坐标,按照点、线、面的形式
// ...省略
return 0;
}
// 数据初始化:坐标、矩阵
void initialize(int problem_size, int joint_size)
{
_joints.initialize( JOINT_SIZE );
_vertexesStatic.initialize( PROBLEM_SIZE, JOINT_SIZE );
_vertexesDynamic.initialize( PROBLEM_SIZE, JOINT_SIZE );
}
// 数据销毁:坐标、矩阵
void unInitialize()
{
_joints.unInitialize();
_vertexesStatic.unInitialize();
_vertexesDynamic.unInitialize();
}
|
ba35477b47d7efdfedca6b31f7eff7f0b75a6ff7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<assert.h>
#include<math.h>
#define GET_BLOB_ADDRESS(ptr, offset) (&((ptr)[(offset)/sizeof((ptr)[0])]))
#define GET_ARRAY_CAPACITY(ptr) (((long *)(ptr))[0])
#define GET_ARRAY_LENGTH(ptr) (((long *)(ptr))[1])
#define GET_ARRAY_BODY(ptr) (&((ptr)[128/sizeof((ptr)[0])]))
#define SET_ARRAY_CAPACITY(ptr, val) { (((long *)(ptr))[0]) = (val); }
#define SET_ARRAY_LENGTH(ptr, val) { (((long *)(ptr))[1]) = (val); }
// very simple test kernel
extern "C"
__global__ void identity(int size, long *in, long *out) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
out[ix] = in[ix];
}
}
extern "C"
// very simple test kernel for int array
__global__ void intArrayIdentity(int size, int *input, int *output, int length) {
const int ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
// copy int array
const int *inArrayBody = &input[ix * length];
int *outArrayBody = &output[ix * length];
for (long i = 0; i < length; i++) {
outArrayBody[i] = inArrayBody[i];
}
}
}
extern "C"
// very simple test kernel for IntDataPoint class
__global__ void IntDataPointIdentity(int size, const int *inputX, const int *inputY, int *outputX, int *outputY, int length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
// copy int array
const int *inArrayBody = &inputX[ix * length];
int *outArrayBody = &outputX[ix * length];
for (long i = 0; i < length; i++) {
outArrayBody[i] = inArrayBody[i];
}
// copy int scalar value
outputY[ix] = inputY[ix];
}
}
extern "C"
// very simple test kernel for int array with free var
__global__ void intArrayAdd(int size, const int *input, int *output, const int *inFreeArray, int length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
// copy int array
const int *inArrayBody = &input[ix* length];
int *outArrayBody = &output[ix* length];
for (long i = 0; i < length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
}
extern "C"
// test kernel for multiple input columns
__global__ void vectorLength(int size, const double *x, const double *y, double *len) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
len[ix] = sqrt(x[ix] * x[ix] + y[ix] * y[ix]);
}
}
extern "C"
// test kernel for multiple input and multiple output columns, with different types
__global__ void plusMinus(int size, const double *base, const float *deviation, double *a, float *b) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
a[ix] = base[ix] - deviation[ix];
b[ix] = base[ix] + deviation[ix];
}
}
extern "C"
// test kernel for two const arguments
__global__ void applyLinearFunction(int size, const short *x, short *y, short a, short b) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
y[ix] = a + b * x[ix];
}
}
extern "C"
// test kernel for custom number of blocks + const argument
// manual SIMD, to be ran on size / 8 threads, assumes size % 8 == 0
// note that key is reversed, since it's little endian
__global__ void blockXOR(int size, const char *input, char *output, long key) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix * 8 < size) {
((long *)output)[ix] = ((const long *)input)[ix] ^ key;
}
}
extern "C"
// another simple test kernel
__global__ void multiplyBy2(int size, long *in, long *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < size) {
out[ix] = in[ix] * 2;
}
}
extern "C"
// another simple test kernel
__global__ void multiplyBy2_self(int size, long *inout) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < size) {
inout[ix] = inout[ix] * 2;
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void sum(int size, int *input, int *output, int stage, int totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (stage == 0) {
if (ix < size) {
assert(jump == blockDim.x * gridDim.x);
int result = 0;
for (long i = ix; i < size; i += jump) {
result += input[i];
}
input[ix] = result;
}
} else if (ix == 0) {
const long count = (size < (long)jump) ? size : (long)jump;
int result = 0;
for (long i = 0; i < count; ++i) {
result += input[i];
}
output[0] = result;
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void suml(int size, long *input, long *output, int stage, int totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (stage == 0) {
if (ix < size) {
assert(jump == blockDim.x * gridDim.x);
long result = 0;
for (long i = ix; i < size; i += jump) {
result += input[i];
}
input[ix] = result;
}
} else if (ix == 0) {
const long count = (size < (long)jump) ? size : (long)jump;
long result = 0;
for (long i = 0; i < count; ++i) {
result += input[i];
}
output[0] = result;
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void intArraySum(int size, const int *input, int *output, int length, int stage, int totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (stage == 0) {
if (ix < size) {
assert(jump == blockDim.x * gridDim.x);
int *accArrayBody = const_cast<int *>(&input[ix* length]);
for (long i = ix + jump; i < size; i += jump) {
const int *inArrayBody = &input[(ix* length) + i];
for (long j = 0; j < length; j++) {
accArrayBody[j] += inArrayBody[j];
}
}
}
} else if (ix == 0) {
const long count = (size < jump) ? size : (long)jump;
int *outArrayBody = &output[ix* length];
for (long i = 0; i < count; i++) {
const int *inArrayBody = &input[(i* length)];
if (i == 0) {
for (long j = 0; j < length; j++) {
outArrayBody[j] = 0;
}
}
for (long j = 0; j < length; j++) {
outArrayBody[j] += inArrayBody[j];
}
}
}
}
extern "C"
// map for DataPoint class
__global__ void DataPointMap(int size, const double *inputX, const double *inputY, double *output, const double *inFreeArray, int length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
// copy int array
const double *inArrayBody = &inputX[ix* length];
double *outArrayBody = &output[ix* length];
for (long i = 0; i < length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
}
extern "C"
// reduce for DataPoint class
__global__ void DataPointReduce(int size, const double *input, double *output, int length, int stage, int totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (stage == 0) {
if (ix < size) {
assert(jump == blockDim.x * gridDim.x);
double *accArrayBody = const_cast<double *>(&input[ix* length]);
for (long i = ix + jump; i < size; i += jump) {
const double *inArrayBody = &input[(ix* length) + i];
for (long j = 0; j < length; j++) {
accArrayBody[j] += inArrayBody[j];
}
}
}
} else if (ix == 0) {
const long count = (size < (long)jump) ? size : (long)jump;
double *outArrayBody = &output[ix* length];
for (long i = 0; i < count; i++) {
const double *inArrayBody = &input[(i* length)];
if (i == 0) {
for (long j = 0; j < length; j++) {
outArrayBody[j] = 0;
}
}
for (long j = 0; j < length; j++) {
outArrayBody[j] += inArrayBody[j];
}
}
}
}
// map for Logistic regression
__device__ double sdotvv(const double * __restrict__ x, const double * __restrict__ y, int n) {
double ans = 0.0;
for(int i = 0; i < n; i++) {
ans += x[i] * y[i];
}
return ans;
}
__device__ void dmulvs(double *result, const double * __restrict__ x, double c, int n) {
for(int i = 0; i < n; i++) {
result[i] = x[i] * c;
}
}
__device__ void map(double *result, const double * __restrict__ x, double y, const double * __restrict__ w, int n) {
dmulvs(result, x, (1 / (1 + exp(-y * (sdotvv(w, x, n)))) - 1) * y, n);
}
#define WARPSIZE 32
__device__ inline double atomicAddDouble(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#if (__CUDA_ARCH__ >= 300)
__device__ inline double __shfl_double(double d, int lane) {
// Split the double number into 2 32b registers.
int lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(d));
// Shuffle the two 32b registers.
lo = __shfl(lo, lane);
hi = __shfl(hi, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d) : "r"(lo), "r"(hi));
return d;
}
__device__ inline double warpReduceSum(double val) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
val += __shfl_double(val, (i + offset) % WARPSIZE);
}
return val;
}
__device__ inline double4 __shfl_double4(double4 d, int lane) {
// Split the double number into 2 32b registers.
int lox, loy, loz, low, hix, hiy, hiz, hiw;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lox), "=r"(hix) : "d"(d.x));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loy), "=r"(hiy) : "d"(d.y));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loz), "=r"(hiz) : "d"(d.z));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(low), "=r"(hiw) : "d"(d.w));
// Shuffle the two 32b registers.
lox = __shfl(lox, lane);
hix = __shfl(hix, lane);
loy = __shfl(loy, lane);
hiy = __shfl(hiy, lane);
loz = __shfl(loz, lane);
hiz = __shfl(hiz, lane);
low = __shfl(low, lane);
hiw = __shfl(hiw, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.x) : "r"(lox), "r"(hix));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.y) : "r"(loy), "r"(hiy));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.z) : "r"(loz), "r"(hiz));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.w) : "r"(low), "r"(hiw));
return d;
}
__device__ inline double4 warpReduceVSum(double4 val4) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
double4 shiftedVal4 = __shfl_double4(val4, (i + offset) % WARPSIZE);
val4.x += shiftedVal4.x;
val4.y += shiftedVal4.y;
val4.z += shiftedVal4.z;
val4.w += shiftedVal4.w;
}
return val4;
}
__device__ double* deviceReduceKernel(double * inArray, double *out, long i, long n, long length) {
double sum = 0;
double *inArrayBody;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum += inArrayBody[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
atomicAddDouble(out, sum);
}
return out;
}
__device__ void deviceReduceArrayKernel(double * inArray, double *outputArrayBody, long length, long n) {
long i = 0;
double *inArrayBody;
// unrolled version
while ((length - i) >= 4) {
double4 sum4;
sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum4.x += inArrayBody[i];
sum4.y += inArrayBody[i+1];
sum4.z += inArrayBody[i+2];
sum4.w += inArrayBody[i+3];
}
sum4 = warpReduceVSum(sum4);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
double *outx = &outputArrayBody[i];
double *outy = &outputArrayBody[i+1];
double *outz = &outputArrayBody[i+2];
double *outw = &outputArrayBody[i+3];
atomicAddDouble(outx, sum4.x);
atomicAddDouble(outy, sum4.y);
atomicAddDouble(outz, sum4.z);
atomicAddDouble(outw, sum4.w);
}
i += 4;
}
for (; i < length; i++) {
deviceReduceKernel(inArray, &outputArrayBody[i], i, n, length);
}
}
#endif
extern "C"
__global__
void blockReduce(int count, double *data, double * result, int user_D) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#if (__CUDA_ARCH__ >= 300)
if (idx < count)
deviceReduceArrayKernel(data, result, user_D, count);
#else
printf("not supported");
#endif
}
extern "C"
__global__ void
mapAll(int count, double *x, double *y, double *result, double *w, int user_D) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < count)
map(&result[idx * user_D], &x[idx * user_D ], y[idx],w, user_D);
}
extern "C"
// matrix multiplication simple test kernel
__global__ void MNKernel(int count, long * Md, long *Nd, long *Pd, int width) {
// 2D thread ID
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
// Pvalue stores the Pd element that is computed by the thread
long Pvalue = 0;
for (int k=0; k < width; k++)
Pvalue += Md[row * width + k] * Nd[k * width + col];
Pd[row * width + col] = Pvalue;
} | ba35477b47d7efdfedca6b31f7eff7f0b75a6ff7.cu | #include<assert.h>
#include<math.h>
#define GET_BLOB_ADDRESS(ptr, offset) (&((ptr)[(offset)/sizeof((ptr)[0])]))
#define GET_ARRAY_CAPACITY(ptr) (((long *)(ptr))[0])
#define GET_ARRAY_LENGTH(ptr) (((long *)(ptr))[1])
#define GET_ARRAY_BODY(ptr) (&((ptr)[128/sizeof((ptr)[0])]))
#define SET_ARRAY_CAPACITY(ptr, val) { (((long *)(ptr))[0]) = (val); }
#define SET_ARRAY_LENGTH(ptr, val) { (((long *)(ptr))[1]) = (val); }
// very simple test kernel
extern "C"
__global__ void identity(int size, long *in, long *out) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
out[ix] = in[ix];
}
}
extern "C"
// very simple test kernel for int array
__global__ void intArrayIdentity(int size, int *input, int *output, int length) {
const int ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
// copy int array
const int *inArrayBody = &input[ix * length];
int *outArrayBody = &output[ix * length];
for (long i = 0; i < length; i++) {
outArrayBody[i] = inArrayBody[i];
}
}
}
extern "C"
// very simple test kernel for IntDataPoint class
__global__ void IntDataPointIdentity(int size, const int *inputX, const int *inputY, int *outputX, int *outputY, int length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
// copy int array
const int *inArrayBody = &inputX[ix * length];
int *outArrayBody = &outputX[ix * length];
for (long i = 0; i < length; i++) {
outArrayBody[i] = inArrayBody[i];
}
// copy int scalar value
outputY[ix] = inputY[ix];
}
}
extern "C"
// very simple test kernel for int array with free var
__global__ void intArrayAdd(int size, const int *input, int *output, const int *inFreeArray, int length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
// copy int array
const int *inArrayBody = &input[ix* length];
int *outArrayBody = &output[ix* length];
for (long i = 0; i < length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
}
extern "C"
// test kernel for multiple input columns
__global__ void vectorLength(int size, const double *x, const double *y, double *len) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
len[ix] = sqrt(x[ix] * x[ix] + y[ix] * y[ix]);
}
}
extern "C"
// test kernel for multiple input and multiple output columns, with different types
__global__ void plusMinus(int size, const double *base, const float *deviation, double *a, float *b) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
a[ix] = base[ix] - deviation[ix];
b[ix] = base[ix] + deviation[ix];
}
}
extern "C"
// test kernel for two const arguments
__global__ void applyLinearFunction(int size, const short *x, short *y, short a, short b) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
y[ix] = a + b * x[ix];
}
}
extern "C"
// test kernel for custom number of blocks + const argument
// manual SIMD, to be ran on size / 8 threads, assumes size % 8 == 0
// note that key is reversed, since it's little endian
__global__ void blockXOR(int size, const char *input, char *output, long key) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix * 8 < size) {
((long *)output)[ix] = ((const long *)input)[ix] ^ key;
}
}
extern "C"
// another simple test kernel
__global__ void multiplyBy2(int size, long *in, long *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < size) {
out[ix] = in[ix] * 2;
}
}
extern "C"
// another simple test kernel
__global__ void multiplyBy2_self(int size, long *inout) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < size) {
inout[ix] = inout[ix] * 2;
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void sum(int size, int *input, int *output, int stage, int totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (stage == 0) {
if (ix < size) {
assert(jump == blockDim.x * gridDim.x);
int result = 0;
for (long i = ix; i < size; i += jump) {
result += input[i];
}
input[ix] = result;
}
} else if (ix == 0) {
const long count = (size < (long)jump) ? size : (long)jump;
int result = 0;
for (long i = 0; i < count; ++i) {
result += input[i];
}
output[0] = result;
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void suml(int size, long *input, long *output, int stage, int totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (stage == 0) {
if (ix < size) {
assert(jump == blockDim.x * gridDim.x);
long result = 0;
for (long i = ix; i < size; i += jump) {
result += input[i];
}
input[ix] = result;
}
} else if (ix == 0) {
const long count = (size < (long)jump) ? size : (long)jump;
long result = 0;
for (long i = 0; i < count; ++i) {
result += input[i];
}
output[0] = result;
}
}
extern "C"
// test reduce kernel that sums elements
__global__ void intArraySum(int size, const int *input, int *output, int length, int stage, int totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (stage == 0) {
if (ix < size) {
assert(jump == blockDim.x * gridDim.x);
int *accArrayBody = const_cast<int *>(&input[ix* length]);
for (long i = ix + jump; i < size; i += jump) {
const int *inArrayBody = &input[(ix* length) + i];
for (long j = 0; j < length; j++) {
accArrayBody[j] += inArrayBody[j];
}
}
}
} else if (ix == 0) {
const long count = (size < jump) ? size : (long)jump;
int *outArrayBody = &output[ix* length];
for (long i = 0; i < count; i++) {
const int *inArrayBody = &input[(i* length)];
if (i == 0) {
for (long j = 0; j < length; j++) {
outArrayBody[j] = 0;
}
}
for (long j = 0; j < length; j++) {
outArrayBody[j] += inArrayBody[j];
}
}
}
}
extern "C"
// map for DataPoint class
__global__ void DataPointMap(int size, const double *inputX, const double *inputY, double *output, const double *inFreeArray, int length) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix < size) {
// copy int array
const double *inArrayBody = &inputX[ix* length];
double *outArrayBody = &output[ix* length];
for (long i = 0; i < length; i++) {
outArrayBody[i] = inArrayBody[i] + inFreeArray[i];
}
}
}
extern "C"
// reduce for DataPoint class
__global__ void DataPointReduce(int size, const double *input, double *output, int length, int stage, int totalStages) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
const int jump = 64 * 256;
if (stage == 0) {
if (ix < size) {
assert(jump == blockDim.x * gridDim.x);
double *accArrayBody = const_cast<double *>(&input[ix* length]);
for (long i = ix + jump; i < size; i += jump) {
const double *inArrayBody = &input[(ix* length) + i];
for (long j = 0; j < length; j++) {
accArrayBody[j] += inArrayBody[j];
}
}
}
} else if (ix == 0) {
const long count = (size < (long)jump) ? size : (long)jump;
double *outArrayBody = &output[ix* length];
for (long i = 0; i < count; i++) {
const double *inArrayBody = &input[(i* length)];
if (i == 0) {
for (long j = 0; j < length; j++) {
outArrayBody[j] = 0;
}
}
for (long j = 0; j < length; j++) {
outArrayBody[j] += inArrayBody[j];
}
}
}
}
// map for Logistic regression
__device__ double sdotvv(const double * __restrict__ x, const double * __restrict__ y, int n) {
double ans = 0.0;
for(int i = 0; i < n; i++) {
ans += x[i] * y[i];
}
return ans;
}
__device__ void dmulvs(double *result, const double * __restrict__ x, double c, int n) {
for(int i = 0; i < n; i++) {
result[i] = x[i] * c;
}
}
__device__ void map(double *result, const double * __restrict__ x, double y, const double * __restrict__ w, int n) {
dmulvs(result, x, (1 / (1 + exp(-y * (sdotvv(w, x, n)))) - 1) * y, n);
}
#define WARPSIZE 32
__device__ inline double atomicAddDouble(double *address, double val) {
unsigned long long int *address_as_ull = (unsigned long long int *)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#if (__CUDA_ARCH__ >= 300)
__device__ inline double __shfl_double(double d, int lane) {
// Split the double number into 2 32b registers.
int lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "d"(d));
// Shuffle the two 32b registers.
lo = __shfl(lo, lane);
hi = __shfl(hi, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d) : "r"(lo), "r"(hi));
return d;
}
__device__ inline double warpReduceSum(double val) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
val += __shfl_double(val, (i + offset) % WARPSIZE);
}
return val;
}
__device__ inline double4 __shfl_double4(double4 d, int lane) {
// Split the double number into 2 32b registers.
int lox, loy, loz, low, hix, hiy, hiz, hiw;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lox), "=r"(hix) : "d"(d.x));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loy), "=r"(hiy) : "d"(d.y));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(loz), "=r"(hiz) : "d"(d.z));
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(low), "=r"(hiw) : "d"(d.w));
// Shuffle the two 32b registers.
lox = __shfl(lox, lane);
hix = __shfl(hix, lane);
loy = __shfl(loy, lane);
hiy = __shfl(hiy, lane);
loz = __shfl(loz, lane);
hiz = __shfl(hiz, lane);
low = __shfl(low, lane);
hiw = __shfl(hiw, lane);
// Recreate the 64b number.
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.x) : "r"(lox), "r"(hix));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.y) : "r"(loy), "r"(hiy));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.z) : "r"(loz), "r"(hiz));
asm volatile("mov.b64 %0, {%1,%2};" : "=d"(d.w) : "r"(low), "r"(hiw));
return d;
}
__device__ inline double4 warpReduceVSum(double4 val4) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
#pragma unroll
for (int offset = WARPSIZE / 2; offset > 0; offset /= 2) {
double4 shiftedVal4 = __shfl_double4(val4, (i + offset) % WARPSIZE);
val4.x += shiftedVal4.x;
val4.y += shiftedVal4.y;
val4.z += shiftedVal4.z;
val4.w += shiftedVal4.w;
}
return val4;
}
__device__ double* deviceReduceKernel(double * inArray, double *out, long i, long n, long length) {
double sum = 0;
double *inArrayBody;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum += inArrayBody[i];
}
sum = warpReduceSum(sum);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
atomicAddDouble(out, sum);
}
return out;
}
__device__ void deviceReduceArrayKernel(double * inArray, double *outputArrayBody, long length, long n) {
long i = 0;
double *inArrayBody;
// unrolled version
while ((length - i) >= 4) {
double4 sum4;
sum4.x = 0; sum4.y = 0; sum4.z = 0; sum4.w = 0;
for (long idx = blockIdx.x * blockDim.x + threadIdx.x; idx < n; idx += blockDim.x * gridDim.x) {
inArrayBody = &inArray[idx*length];
sum4.x += inArrayBody[i];
sum4.y += inArrayBody[i+1];
sum4.z += inArrayBody[i+2];
sum4.w += inArrayBody[i+3];
}
sum4 = warpReduceVSum(sum4);
if ((threadIdx.x & (WARPSIZE - 1)) == 0) {
double *outx = &outputArrayBody[i];
double *outy = &outputArrayBody[i+1];
double *outz = &outputArrayBody[i+2];
double *outw = &outputArrayBody[i+3];
atomicAddDouble(outx, sum4.x);
atomicAddDouble(outy, sum4.y);
atomicAddDouble(outz, sum4.z);
atomicAddDouble(outw, sum4.w);
}
i += 4;
}
for (; i < length; i++) {
deviceReduceKernel(inArray, &outputArrayBody[i], i, n, length);
}
}
#endif
extern "C"
__global__
void blockReduce(int count, double *data, double * result, int user_D) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#if (__CUDA_ARCH__ >= 300)
if (idx < count)
deviceReduceArrayKernel(data, result, user_D, count);
#else
printf("not supported");
#endif
}
extern "C"
__global__ void
mapAll(int count, double *x, double *y, double *result, double *w, int user_D) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < count)
map(&result[idx * user_D], &x[idx * user_D ], y[idx],w, user_D);
}
extern "C"
// matrix multiplication simple test kernel
__global__ void MNKernel(int count, long * Md, long *Nd, long *Pd, int width) {
// 2D thread ID
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
// Pvalue stores the Pd element that is computed by the thread
long Pvalue = 0;
for (int k=0; k < width; k++)
Pvalue += Md[row * width + k] * Nd[k * width + col];
Pd[row * width + col] = Pvalue;
} |
e7c49affcd167686375ca929695312c09b25ebc0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* EXAMPLE OF SQUARE MATRIX MULTIPLICATION CHAPTER 4
*/
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <math.h>
#define CHECK_ERROR(call) { \
hipError_t err = call; \
if (err != hipSuccess) { \
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
#define TILE_WIDTH 16
#define DIM 1024
__global__
void matrixMulKernel(int *P, int *M, int *N) {
__shared__ int Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ int Nds[TILE_WIDTH][2*TILE_WIDTH];
int tx = threadIdx.x, bx = blockIdx.x;
int ty = threadIdx.y, by = blockIdx.y;
// identify row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
int pValue = 0;
int pValue2 = 0;
// Loop over the d_M and d_N tiles required to compute the d_P element
for (int ph = 0; ph < DIM/TILE_WIDTH; ph++) {
// Collaborative loading of d_M and d_N tiles n to the shared memory
Mds[ty][tx] = M[Row * DIM + ph * TILE_WIDTH + tx];
Nds[ty][tx] = N[(ph * TILE_WIDTH + ty) * DIM + Col];
Nds[ty][tx+TILE_WIDTH] = N[(ph * TILE_WIDTH + ty) * DIM + Col + (DIM/2)];
__syncthreads();
for(int k = 0; k < TILE_WIDTH; k++){
pValue += Mds[ty][k] * Nds[k][tx];
pValue2 += Mds[ty][k] * Nds[k][tx+TILE_WIDTH];
}
__syncthreads();
}
P[Row*DIM+Col] = pValue;
P[Row*DIM+Col + (DIM/2)] = pValue2;
}
float matrixMul(int *h_P, int *h_M, int *h_N) {
// ------------------- CUDA INIT ---------------------------
int size = (DIM*DIM)*sizeof(int); // assume square matricies
int *d_M, *d_N, *d_P;
//1. Allocate global memory on the device for d_M, d_N and d_P
// With this type of allocation it isn't possible acces using higher-dimensional indexing syntax
// it need to linearize first.
CHECK_ERROR(hipMalloc((void**)&d_M, size));
CHECK_ERROR(hipMalloc((void**)&d_N, size));
CHECK_ERROR(hipMalloc((void**)&d_P, size));
// copy h_M and h_N to device memory
hipMemcpy(d_M, h_M, size, hipMemcpyHostToDevice);
hipMemcpy(d_N, h_N, size, hipMemcpyHostToDevice);
hipEvent_t startTimeCuda, stopTimeCuda;
hipEventCreate(&startTimeCuda);
hipEventCreate(&stopTimeCuda);
// ------------------- CUDA COMPUTATION ---------------------------
hipEventRecord(startTimeCuda, 0);
//2. Kernel launch code - with TILE_WIDTH^2 threads per block
//dim3 dimGrid(ceil((DIM/TILE_WIDTH)/2.0), ceil(DIM/TILE_WIDTH), 1);
dim3 dimGrid((DIM/TILE_WIDTH)/2, DIM/TILE_WIDTH, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
hipLaunchKernelGGL(( matrixMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_P, d_M, d_N);
hipEventRecord(stopTimeCuda,0);
// ---------------------- CUDA ENDING -----------------------------
hipEventSynchronize(stopTimeCuda);
float msTime;
hipEventElapsedTime(&msTime, startTimeCuda, stopTimeCuda);
printf("KernelTime: %f\n", msTime);
//3. copy d_P from the device memory
hipMemcpy(h_P, d_P, size, hipMemcpyDeviceToHost);
// Free device matricies
hipFree(d_M);
hipFree(d_N);
hipFree(d_P);
return msTime;
}
void sequentialMM(int* h_M, int* h_N, int* h_C) {
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
int sum = 0;
for (int k = 0; k < DIM; ++k)
sum += h_M[i * DIM + k] * h_N[k * DIM + j];
h_C[i * DIM + j] = sum;
}
}
}
void printMatrix(int *M){
for (int i = 0; i < DIM ; i++) {
for (int j = 0; j < DIM ; j++) {
printf("%d ", M[i*DIM+j]);
}
printf("\n");
}
printf("\n");
}
int main(int argc, char *argv[]) {
// ------------------ HOST INIT ---------------------------
int *h_M, *h_N, *h_P, *h_C;
hipEvent_t startTimeCuda, stopTimeCuda;
float msTime, msTime_seq;
hipEventCreate(&startTimeCuda);
hipEventCreate(&stopTimeCuda);
h_M = (int*)malloc(sizeof(int)*DIM*DIM);
h_N = (int*)malloc(sizeof(int)*DIM*DIM);
h_P = (int*)malloc(sizeof(int)*DIM*DIM);
h_C = (int*)malloc(sizeof(int)*DIM*DIM);
// fill M and N with int numbers
srand(time(NULL));
for (int i = 0; i < DIM * DIM ; i++) {
h_M[i] = ((float)rand() / RAND_MAX) * 100;
h_N[i] = ((float)rand() / RAND_MAX) * 100;
h_C[i] = 0;
}
//printf("----- MATRIX M -----\n");
//printMatrix(h_M);
//printf("----- MATRIX N -----\n");
//printMatrix(h_N);
// ------- perform matrix multiplication on device -------
msTime = matrixMul(h_P, h_M, h_N);
//printf("----- MATRIX P -----\n");
//printMatrix(h_P);
// ------- perform matrix multiplication on host ---------
hipEventRecord(startTimeCuda, 0);
sequentialMM(h_M, h_N, h_C);
hipEventRecord(stopTimeCuda,0);
hipEventSynchronize(stopTimeCuda);
hipEventElapsedTime(&msTime_seq, startTimeCuda, stopTimeCuda);
printf("HostTime: %f\n", msTime_seq);
// verify the result
for (int i = 0; i < DIM * DIM; ++i) {
if (h_C[i] != h_P[i]) {
printf("\x1b[31mError\x1b[0m into result: h_C[%d] = %d != %d = h_P[%d]\n", i, h_C[i], h_P[i], i);
goto Error;
}
}
printf("Ok multiplication completed with \x1b[32msuccess\x1b[0m!\n\n");
printf("Speedup: %f\n", msTime_seq/msTime);
free(h_M);
free(h_N);
free(h_P);
free(h_C);
return 0;
Error:
free(h_M);
free(h_N);
free(h_P);
free(h_C);
return -1;
}
| e7c49affcd167686375ca929695312c09b25ebc0.cu | /*
* EXAMPLE OF SQUARE MATRIX MULTIPLICATION CHAPTER 4
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <math.h>
#define CHECK_ERROR(call) { \
cudaError_t err = call; \
if (err != cudaSuccess) { \
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
#define TILE_WIDTH 16
#define DIM 1024
__global__
void matrixMulKernel(int *P, int *M, int *N) {
__shared__ int Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ int Nds[TILE_WIDTH][2*TILE_WIDTH];
int tx = threadIdx.x, bx = blockIdx.x;
int ty = threadIdx.y, by = blockIdx.y;
// identify row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
int pValue = 0;
int pValue2 = 0;
// Loop over the d_M and d_N tiles required to compute the d_P element
for (int ph = 0; ph < DIM/TILE_WIDTH; ph++) {
// Collaborative loading of d_M and d_N tiles n to the shared memory
Mds[ty][tx] = M[Row * DIM + ph * TILE_WIDTH + tx];
Nds[ty][tx] = N[(ph * TILE_WIDTH + ty) * DIM + Col];
Nds[ty][tx+TILE_WIDTH] = N[(ph * TILE_WIDTH + ty) * DIM + Col + (DIM/2)];
__syncthreads();
for(int k = 0; k < TILE_WIDTH; k++){
pValue += Mds[ty][k] * Nds[k][tx];
pValue2 += Mds[ty][k] * Nds[k][tx+TILE_WIDTH];
}
__syncthreads();
}
P[Row*DIM+Col] = pValue;
P[Row*DIM+Col + (DIM/2)] = pValue2;
}
float matrixMul(int *h_P, int *h_M, int *h_N) {
// ------------------- CUDA INIT ---------------------------
int size = (DIM*DIM)*sizeof(int); // assume square matricies
int *d_M, *d_N, *d_P;
//1. Allocate global memory on the device for d_M, d_N and d_P
// With this type of allocation it isn't possible acces using higher-dimensional indexing syntax
// it need to linearize first.
CHECK_ERROR(cudaMalloc((void**)&d_M, size));
CHECK_ERROR(cudaMalloc((void**)&d_N, size));
CHECK_ERROR(cudaMalloc((void**)&d_P, size));
// copy h_M and h_N to device memory
cudaMemcpy(d_M, h_M, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, h_N, size, cudaMemcpyHostToDevice);
cudaEvent_t startTimeCuda, stopTimeCuda;
cudaEventCreate(&startTimeCuda);
cudaEventCreate(&stopTimeCuda);
// ------------------- CUDA COMPUTATION ---------------------------
cudaEventRecord(startTimeCuda, 0);
//2. Kernel launch code - with TILE_WIDTH^2 threads per block
//dim3 dimGrid(ceil((DIM/TILE_WIDTH)/2.0), ceil(DIM/TILE_WIDTH), 1);
dim3 dimGrid((DIM/TILE_WIDTH)/2, DIM/TILE_WIDTH, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
matrixMulKernel<<<dimGrid, dimBlock>>>(d_P, d_M, d_N);
cudaEventRecord(stopTimeCuda,0);
// ---------------------- CUDA ENDING -----------------------------
cudaEventSynchronize(stopTimeCuda);
float msTime;
cudaEventElapsedTime(&msTime, startTimeCuda, stopTimeCuda);
printf("KernelTime: %f\n", msTime);
//3. copy d_P from the device memory
cudaMemcpy(h_P, d_P, size, cudaMemcpyDeviceToHost);
// Free device matricies
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
return msTime;
}
void sequentialMM(int* h_M, int* h_N, int* h_C) {
for (int i = 0; i < DIM; ++i) {
for (int j = 0; j < DIM; ++j) {
int sum = 0;
for (int k = 0; k < DIM; ++k)
sum += h_M[i * DIM + k] * h_N[k * DIM + j];
h_C[i * DIM + j] = sum;
}
}
}
void printMatrix(int *M){
for (int i = 0; i < DIM ; i++) {
for (int j = 0; j < DIM ; j++) {
printf("%d ", M[i*DIM+j]);
}
printf("\n");
}
printf("\n");
}
int main(int argc, char *argv[]) {
// ------------------ HOST INIT ---------------------------
int *h_M, *h_N, *h_P, *h_C;
cudaEvent_t startTimeCuda, stopTimeCuda;
float msTime, msTime_seq;
cudaEventCreate(&startTimeCuda);
cudaEventCreate(&stopTimeCuda);
h_M = (int*)malloc(sizeof(int)*DIM*DIM);
h_N = (int*)malloc(sizeof(int)*DIM*DIM);
h_P = (int*)malloc(sizeof(int)*DIM*DIM);
h_C = (int*)malloc(sizeof(int)*DIM*DIM);
// fill M and N with int numbers
srand(time(NULL));
for (int i = 0; i < DIM * DIM ; i++) {
h_M[i] = ((float)rand() / RAND_MAX) * 100;
h_N[i] = ((float)rand() / RAND_MAX) * 100;
h_C[i] = 0;
}
//printf("----- MATRIX M -----\n");
//printMatrix(h_M);
//printf("----- MATRIX N -----\n");
//printMatrix(h_N);
// ------- perform matrix multiplication on device -------
msTime = matrixMul(h_P, h_M, h_N);
//printf("----- MATRIX P -----\n");
//printMatrix(h_P);
// ------- perform matrix multiplication on host ---------
cudaEventRecord(startTimeCuda, 0);
sequentialMM(h_M, h_N, h_C);
cudaEventRecord(stopTimeCuda,0);
cudaEventSynchronize(stopTimeCuda);
cudaEventElapsedTime(&msTime_seq, startTimeCuda, stopTimeCuda);
printf("HostTime: %f\n", msTime_seq);
// verify the result
for (int i = 0; i < DIM * DIM; ++i) {
if (h_C[i] != h_P[i]) {
printf("\x1b[31mError\x1b[0m into result: h_C[%d] = %d != %d = h_P[%d]\n", i, h_C[i], h_P[i], i);
goto Error;
}
}
printf("Ok multiplication completed with \x1b[32msuccess\x1b[0m!\n\n");
printf("Speedup: %f\n", msTime_seq/msTime);
free(h_M);
free(h_N);
free(h_P);
free(h_C);
return 0;
Error:
free(h_M);
free(h_N);
free(h_P);
free(h_C);
return -1;
}
|
79615d7f00fd5ab79b97fd3218265766707724c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cstring>
#include "../common/cutlass_unit_test.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/util/host_tensor.h"
namespace test {
template <typename PredicateVector>
__global__ void load_predicates(unsigned *output, unsigned const *input) {
PredicateVector predicates;
int const word_count = (PredicateVector::kPredicates + 31) / 32;
int i = 0;
for (int word_idx = 0; word_idx < word_count; ++word_idx) {
unsigned word = input[word_idx];
CUTLASS_PRAGMA_UNROLL
for (int bit = 0; bit < sizeof(unsigned) * 8; ++bit) {
bool pred = ((word >> bit) & 1);
predicates.set(i, pred);
if (predicates.at(i) != pred) {
printf("ERROR - cannot read back predicate\n");
}
++i;
}
}
__syncthreads();
i = 0;
for (int word_idx = 0; word_idx < word_count; ++word_idx) {
unsigned result = 0;
for (int bit = 0; bit < sizeof(unsigned) * 8; ++bit) {
bool pred = predicates.at(i ++);
result |= (unsigned(pred) << bit);
}
output[word_idx] = result;
}
}
}
TEST(PredicateVector, Basic) {
static int const Bits = 32;
static int const Words = (Bits + 31) / 32;
typedef cutlass::PredicateVector<Bits> PredicateVector;
cutlass::HostTensor<unsigned, cutlass::IdentityTensorLayout<1> > output;
cutlass::HostTensor<unsigned, cutlass::IdentityTensorLayout<1>> input;
output.reserve(Words);
input.reserve(Words);
// some arbitrary test bits
unsigned values[] = {
0xdeadbeef,
0xa0070032,
0x9076d001,
0x00000000,
0xabdfc0ad
};
for (int test = 0; test < 5; ++test) {
input.host_data(0) = values[test];
output.host_data(0) = 0;
input.sync_device();
output.sync_device();
hipLaunchKernelGGL(( test::load_predicates<PredicateVector>),
dim3(dim3(1,1,1)), dim3(dim3(1,1,1))
, 0, 0,
output.device_data(),
input.device_data()
);
output.sync_host();
for (int word = 0; word < Words; ++word) {
EXPECT_EQ(input.host_data(word), output.host_data(word))
<< "Expected: 0x" << std::hex << input.host_data(word)
<< ", got: 0x" << output.host_data(word)
<< std::dec;
}
}
}
TEST(PredicateVector, Count) {
{
typedef cutlass::PredicateVector<4, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<4, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<4, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<4, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<8, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<8, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<8, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<8, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<16, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<16, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<16, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 4)
<< "PredicateVector<16, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<32, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<32, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 4)
<< "PredicateVector<32, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 8)
<< "PredicateVector<32, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<64, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 4)
<< "PredicateVector<64, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 8)
<< "PredicateVector<64, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 16)
<< "PredicateVector<64, 1> word count: " << int(PredicateVector::kWordCount);
}
}
| 79615d7f00fd5ab79b97fd3218265766707724c9.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cstring>
#include "../common/cutlass_unit_test.h"
#include "cutlass/predicate_vector.h"
#include "cutlass/util/host_tensor.h"
namespace test {
template <typename PredicateVector>
__global__ void load_predicates(unsigned *output, unsigned const *input) {
PredicateVector predicates;
int const word_count = (PredicateVector::kPredicates + 31) / 32;
int i = 0;
for (int word_idx = 0; word_idx < word_count; ++word_idx) {
unsigned word = input[word_idx];
CUTLASS_PRAGMA_UNROLL
for (int bit = 0; bit < sizeof(unsigned) * 8; ++bit) {
bool pred = ((word >> bit) & 1);
predicates.set(i, pred);
if (predicates.at(i) != pred) {
printf("ERROR - cannot read back predicate\n");
}
++i;
}
}
__syncthreads();
i = 0;
for (int word_idx = 0; word_idx < word_count; ++word_idx) {
unsigned result = 0;
for (int bit = 0; bit < sizeof(unsigned) * 8; ++bit) {
bool pred = predicates.at(i ++);
result |= (unsigned(pred) << bit);
}
output[word_idx] = result;
}
}
}
TEST(PredicateVector, Basic) {
static int const Bits = 32;
static int const Words = (Bits + 31) / 32;
typedef cutlass::PredicateVector<Bits> PredicateVector;
cutlass::HostTensor<unsigned, cutlass::IdentityTensorLayout<1> > output;
cutlass::HostTensor<unsigned, cutlass::IdentityTensorLayout<1>> input;
output.reserve(Words);
input.reserve(Words);
// some arbitrary test bits
unsigned values[] = {
0xdeadbeef,
0xa0070032,
0x9076d001,
0x00000000,
0xabdfc0ad
};
for (int test = 0; test < 5; ++test) {
input.host_data(0) = values[test];
output.host_data(0) = 0;
input.sync_device();
output.sync_device();
test::load_predicates<PredicateVector><<<
dim3(1,1,1), dim3(1,1,1)
>>>(
output.device_data(),
input.device_data()
);
output.sync_host();
for (int word = 0; word < Words; ++word) {
EXPECT_EQ(input.host_data(word), output.host_data(word))
<< "Expected: 0x" << std::hex << input.host_data(word)
<< ", got: 0x" << output.host_data(word)
<< std::dec;
}
}
}
TEST(PredicateVector, Count) {
{
typedef cutlass::PredicateVector<4, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<4, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<4, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<4, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<4, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<8, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<8, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<8, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<8, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<8, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<16, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<16, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<16, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<16, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 4)
<< "PredicateVector<16, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 1)
<< "PredicateVector<32, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<32, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 4)
<< "PredicateVector<32, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<32, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 8)
<< "PredicateVector<32, 1> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 8> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 2)
<< "PredicateVector<64, 8> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 4> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 4)
<< "PredicateVector<64, 4> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 2> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 8)
<< "PredicateVector<64, 2> word count: " << int(PredicateVector::kWordCount);
}
{
typedef cutlass::PredicateVector<64, 1> PredicateVector;
EXPECT_EQ(int(PredicateVector::kWordCount), 16)
<< "PredicateVector<64, 1> word count: " << int(PredicateVector::kWordCount);
}
}
|
1bc039f9b9a74e2ab6cdb83dcadac596c06705dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-------------------------------------------------------------------------
*
* CUDA functions for Steepest descend in POCS-type algorithms.
*
* This file will iteratively minimize by stepest descend the total variation
* of the input image, with the parameters given, using GPUs.
*
* CODE by Ander Biguri
*
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define MAXTHREADS 1024
#define MAX_BUFFER 60
#include "POCS_TV.hpp"
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
hipDeviceReset();\
mexErrMsgIdAndTxt("CBCT:CUDA:POCS_TV",hipGetErrorString(__err));\
} \
} while (0)
// CUDA kernels
//https://stackoverflow.com/questions/21332040/simple-cuda-kernel-optimization/21340927#21340927
__global__ void divideArrayScalar(float* vec,float scalar,const size_t n){
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]/=scalar;
}
}
__global__ void multiplyArrayScalar(float* vec,float scalar,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]*=scalar;
}
}
__global__ void substractArrays(float* vec,float* vec2,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]-=vec2[i];
}
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols){
unsigned long size2d = rows*cols;
unsigned long long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z - 1 >= 0 && z<depth) {
grad[0] = (uidx-u[(z-1)*size2d + y*cols + x]) ;
}
if ( y - 1 >= 0 && y<rows){
grad[1] = (uidx-u[z*size2d + (y-1)*cols + x]) ;
}
if ( x - 1 >= 0 && x<cols) {
grad[2] = (uidx-u[z*size2d + y*cols + (x-1)]);
}
}
__global__ void gradientTV(const float* f, float* dftv,
long depth, long rows, long cols){
unsigned long x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned long z = threadIdx.z + blockIdx.z * blockDim.z;
unsigned long long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float df[3] ={0.f,0.f,0.f};
float dfi[3]={0.f,0.f,0.f}; // dfi== \partial f_{i+1,j,k}
float dfj[3]={0.f,0.f,0.f};
float dfk[3]={0.f,0.f,0.f};
gradient(f,df ,z ,y ,x , depth,rows,cols);
gradient(f,dfi ,z ,y ,x+1, depth,rows,cols);
gradient(f,dfj ,z ,y+1,x , depth,rows,cols);
gradient(f,dfk ,z+1,y ,x , depth,rows,cols);
float eps=0.00000001; //% avoid division by zero
dftv[idx]=(df[0]+df[1]+df[2])/(sqrt(df[0] *df[0] +df[1] *df[1] +df[2] *df[2])+eps)
-dfi[2]/(sqrt(dfi[0]*dfi[0]+dfi[1]*dfi[1]+dfi[2]*dfi[2]) +eps) // I wish I coudl precompute this, but if I do then Id need to recompute the gradient.
-dfj[1]/(sqrt(dfj[0]*dfj[0]+dfj[1]*dfj[1]+dfj[2]*dfj[2]) +eps)
-dfk[0]/(sqrt(dfk[0]*dfk[0]+dfk[1]*dfk[1]+dfk[2]*dfk[2]) +eps);
return;
}
__device__ void warpReduce(volatile float *sdata, size_t tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reduceNorm2(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
float value=0;
while (i < n) {
value=g_idata[i]; //avoid reading twice
mySum += value*value;
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
__global__ void reduceSum(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
// float value=0;
while (i < n) {
mySum += g_idata[i];
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
// main function
void pocs_tv(float* img,float* dst,float alpha,const long* image_size, int maxIter){
// Prepare for MultiGPU
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("minimizeTV:POCS_TV:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
char * devicenames;
hipDeviceProp_t deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
hipSetDevice(dev);
hipGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicenames,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("minimizeTV:POCS_TV:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n POCS_TV.cu line 277.");
break;
}
}
devicenames=deviceProp.name;
}
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// %5 of free memory shoudl be enough, we have almsot no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
size_t mem_slice_image = sizeof(float)* image_size[0] * image_size[1] ;
size_t mem_size_image = sizeof(float)* total_pixels;
size_t mem_auxiliary = sizeof(float)* (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=2;
//Does everything fit in the GPU?
unsigned int slices_per_split;
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 3*mem_size_image+3*(deviceCount-1)*mem_slice_image*buffer_length+mem_auxiliary){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*((slices_per_split+buffer_length*2));
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global-mem_auxiliary;
splits=(unsigned int)(ceil(((float)(3*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){
// one more splot shoudl do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amountf of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices should be able to fit in here??!?!
// Only do it if there are splits needed.
if(splits>1){
mem_free=mem_GPU_global-(3*mem_img_each_GPU+mem_auxiliary);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/3; // we need double whatever this results in, rounded down.
buffer_length=max(buffer_length,2);// minimum 2
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=mem_slice_image*(slices_per_split+buffer_length*2);
}else{
buffer_length=2;
}
// Assert
if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){
mexErrMsgIdAndTxt("minimizeTV:POCS_TV:GPU","Assertion Failed. Logic behind spliting flawed! Please tell: [email protected]\n");
}
}
// Assert
if ((slices_per_split+buffer_length*2)*image_size[0]*image_size[1]* sizeof(float)!= mem_img_each_GPU){
mexErrMsgIdAndTxt("minimizeTV:POCS_TV:GPU","Assertion Failed. Memory needed calculation broken! Please tell: [email protected]\n");
}
float** d_image= (float**)malloc(deviceCount*sizeof(float*));
float** d_dimgTV= (float**)malloc(deviceCount*sizeof(float*));
float** d_norm2aux= (float**)malloc(deviceCount*sizeof(float*));
float** d_norm2= (float**)malloc(deviceCount*sizeof(float*));
// allocate memory in each GPU
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMalloc((void**)&d_image[dev] , mem_img_each_GPU);
hipMemset(d_image[dev],0 , mem_img_each_GPU);
hipMalloc((void**)&d_dimgTV[dev] , mem_img_each_GPU);
hipMalloc((void**)&d_norm2[dev] , slices_per_split*mem_slice_image);
hipMalloc((void**)&d_norm2aux[dev] , mem_auxiliary);
cudaCheckErrors("Malloc error");
}
unsigned long long buffer_pixels=buffer_length*image_size[0]*image_size[1];
float* buffer;
if(splits>1){
mexWarnMsgIdAndTxt("minimizeTV:POCS_TV:Image_split","Your image can not be fully split between the available GPUs. The computation of minTV will be significantly slowed due to the image size.\nApproximated mathematics turned on for computational speed.");
}else{
hipHostMalloc((void**)&buffer,buffer_length*image_size[0]*image_size[1]*sizeof(float));
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,0);
// splits>2 is completely empirical observation
if (isHostRegisterSupported & splits>2){
hipHostRegister(img ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
hipHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),hipHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Create streams
int nStream_device=2;
int nStreams=deviceCount*nStream_device;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
hipStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
// For the reduction
double totalsum_prev;
double totalsum;
float sum_curr_spl;
float * sumnorm2;
hipHostMalloc((void**)&sumnorm2,deviceCount*sizeof(float));
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
for(unsigned int i=0;i<maxIter;i+=(buffer_length-1)){
if(splits>1){
totalsum_prev=0;
}
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to comptue all the image. The ordering of these loops
// need to be like this due to the boudnign layers between slpits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared ebtween GPUs fully without extra splits, then there is an easy way of syncronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*image_size[0]*image_size[1];
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemcpyAsync(d_image[dev]+offset_device[dev], img+offset_host[dev] , bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
}
// if we need to split and its not the first iteration, then we need to copy from Host memory the previosu result.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemcpyAsync(d_image[dev]+offset_device[dev], dst+offset_host[dev] , bytes_device[dev]*sizeof(float), hipMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
}
cudaCheckErrors("Memcpy failure on multi split");
for(unsigned int ib=0; (ib<(buffer_length-1)) && ((i+ib)<maxIter); ib++){
// For the gradient
dim3 blockGrad(10, 10, 10);
dim3 gridGrad((image_size[0]+blockGrad.x-1)/blockGrad.x, (image_size[1]+blockGrad.y-1)/blockGrad.y, (curr_slices+buffer_length*2+blockGrad.z-1)/blockGrad.z);
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
// Compute the gradient of the TV norm
// I Dont understand why I need to store 2 layers to compute correctly with 1 buffer. The bounding checks shoudl
// be enough but they are not.
hipLaunchKernelGGL(( gradientTV), dim3(gridGrad), dim3(blockGrad),0,stream[dev*nStream_device], d_image[dev],d_dimgTV[dev],(long)(curr_slices+buffer_length*2-1), image_size[1],image_size[0]);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
// no need to copy the 2 aux slices here
hipStreamSynchronize(stream[dev*nStream_device]);
hipMemcpyAsync(d_norm2[dev], d_dimgTV[dev]+buffer_pixels, image_size[0]*image_size[1]*curr_slices*sizeof(float), hipMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
}
// Compute the L2 norm of the gradint. For that, reduction is used.
//REDUCE
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
hipStreamSynchronize(stream[dev*nStream_device+1]);
reduceNorm2 << <dimgridRed, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device]>> >(d_norm2[dev], d_norm2aux[dev], total_pixels);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
if (dimgridRed > 1) {
reduceSum << <1, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device] >> >(d_norm2aux[dev], d_norm2[dev], dimgridRed);
hipStreamSynchronize(stream[dev*nStream_device]);
hipMemcpyAsync(&sumnorm2[dev], d_norm2[dev], sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
else {
hipStreamSynchronize(stream[dev*nStream_device]);
hipMemcpyAsync(&sumnorm2[dev], d_norm2aux[dev], sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Reduction error");
// Accumulate the nomr accross devices
sum_curr_spl=0;
// this is CPU code
for (dev = 0; dev < deviceCount; dev++){
sum_curr_spl+=sumnorm2[dev];
}
sum_curr_spl+=0.0000001f; // avoid division by zero
// If we have more than one splits, lets use the result from prior calls
if(i>0 && splits>1){
// this is already stored:
//totalsum=totalsum_prev;
}else{
totalsum=sum_curr_spl;
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
//NOMRALIZE
//in a Tesla, maximum blocks =15 SM * 4 blocks/SM
hipLaunchKernelGGL(( divideArrayScalar) , dim3(60),dim3(MAXTHREADS),0,stream[dev*nStream_device], d_dimgTV[dev]+buffer_pixels,(float)sqrt(totalsum),total_pixels);
//MULTIPLY HYPERPARAMETER
hipLaunchKernelGGL(( multiplyArrayScalar), dim3(60),dim3(MAXTHREADS),0,stream[dev*nStream_device], d_dimgTV[dev]+buffer_pixels,alpha, total_pixels);
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Scalar operations error");
//SUBSTRACT GRADIENT
//////////////////////////////////////////////
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
hipLaunchKernelGGL(( substractArrays), dim3(60),dim3(MAXTHREADS),0,stream[dev*nStream_device], d_image[dev]+buffer_pixels,d_dimgTV[dev]+buffer_pixels, total_pixels);
}
}
// Syncronize mathematics, make sure bounding pixels are correct
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
if (dev<deviceCount-1){
hipSetDevice(dev+1);
hipMemcpy(buffer, d_image[dev+1], buffer_pixels*sizeof(float), hipMemcpyDeviceToHost);
hipSetDevice(dev);
hipMemcpy(d_image[dev]+total_pixels+buffer_pixels,buffer, buffer_pixels*sizeof(float), hipMemcpyHostToDevice);
}
hipDeviceSynchronize();
if (dev>0){
hipSetDevice(dev-1);
hipMemcpyAsync(buffer, d_image[dev-1]+total_pixels+buffer_pixels, buffer_pixels*sizeof(float), hipMemcpyDeviceToHost);
hipSetDevice(dev);
hipMemcpyAsync(d_image[dev],buffer, buffer_pixels*sizeof(float), hipMemcpyHostToDevice);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
hipMemcpyAsync(&dst[linear_idx_start], d_image[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Memory gather error");
totalsum_prev+=sum_curr_spl;
}
totalsum=totalsum_prev;
}
// If there has not been splits, we still have data in memory
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
curr_slices=((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels=curr_slices*image_size[0]*image_size[1];
hipMemcpy(dst+slices_per_split*image_size[0]*image_size[1]*dev, d_image[dev]+buffer_pixels,total_pixels*sizeof(float), hipMemcpyDeviceToHost);
}
}
cudaCheckErrors("Copy result back");
for(dev=0; dev<deviceCount;dev++){
hipSetDevice(dev);
hipFree(d_image[dev]);
hipFree(d_norm2aux[dev]);
hipFree(d_dimgTV[dev]);
hipFree(d_norm2[dev]);
}
if (splits==1){
hipHostFree(buffer);
}
if (isHostRegisterSupported& splits>2){
hipHostUnregister(img);
hipHostUnregister(dst);
}
for (int i = 0; i < nStreams; ++i)
hipStreamDestroy(stream[i]) ;
for (dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipDeviceSynchronize();
}
cudaCheckErrors("Memory free");
hipDeviceReset();
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
hipSetDevice(dev);
hipMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
| 1bc039f9b9a74e2ab6cdb83dcadac596c06705dc.cu | /*-------------------------------------------------------------------------
*
* CUDA functions for Steepest descend in POCS-type algorithms.
*
* This file will iteratively minimize by stepest descend the total variation
* of the input image, with the parameters given, using GPUs.
*
* CODE by Ander Biguri
*
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#define MAXTHREADS 1024
#define MAX_BUFFER 60
#include "POCS_TV.hpp"
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
cudaDeviceReset();\
mexErrMsgIdAndTxt("CBCT:CUDA:POCS_TV",cudaGetErrorString(__err));\
} \
} while (0)
// CUDA kernels
//https://stackoverflow.com/questions/21332040/simple-cuda-kernel-optimization/21340927#21340927
__global__ void divideArrayScalar(float* vec,float scalar,const size_t n){
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]/=scalar;
}
}
__global__ void multiplyArrayScalar(float* vec,float scalar,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]*=scalar;
}
}
__global__ void substractArrays(float* vec,float* vec2,const size_t n)
{
unsigned long long i = (blockIdx.x * blockDim.x) + threadIdx.x;
for(; i<n; i+=gridDim.x*blockDim.x) {
vec[i]-=vec2[i];
}
}
__device__ __inline__
void gradient(const float* u, float* grad,
long z, long y, long x,
long depth, long rows, long cols){
unsigned long size2d = rows*cols;
unsigned long long idx = z * size2d + y * cols + x;
float uidx = u[idx];
if ( z - 1 >= 0 && z<depth) {
grad[0] = (uidx-u[(z-1)*size2d + y*cols + x]) ;
}
if ( y - 1 >= 0 && y<rows){
grad[1] = (uidx-u[z*size2d + (y-1)*cols + x]) ;
}
if ( x - 1 >= 0 && x<cols) {
grad[2] = (uidx-u[z*size2d + y*cols + (x-1)]);
}
}
__global__ void gradientTV(const float* f, float* dftv,
long depth, long rows, long cols){
unsigned long x = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = threadIdx.y + blockIdx.y * blockDim.y;
unsigned long z = threadIdx.z + blockIdx.z * blockDim.z;
unsigned long long idx = z * rows * cols + y * cols + x;
if ( x >= cols || y >= rows || z >= depth )
return;
float df[3] ={0.f,0.f,0.f};
float dfi[3]={0.f,0.f,0.f}; // dfi== \partial f_{i+1,j,k}
float dfj[3]={0.f,0.f,0.f};
float dfk[3]={0.f,0.f,0.f};
gradient(f,df ,z ,y ,x , depth,rows,cols);
gradient(f,dfi ,z ,y ,x+1, depth,rows,cols);
gradient(f,dfj ,z ,y+1,x , depth,rows,cols);
gradient(f,dfk ,z+1,y ,x , depth,rows,cols);
float eps=0.00000001; //% avoid division by zero
dftv[idx]=(df[0]+df[1]+df[2])/(sqrt(df[0] *df[0] +df[1] *df[1] +df[2] *df[2])+eps)
-dfi[2]/(sqrt(dfi[0]*dfi[0]+dfi[1]*dfi[1]+dfi[2]*dfi[2]) +eps) // I wish I coudl precompute this, but if I do then Id need to recompute the gradient.
-dfj[1]/(sqrt(dfj[0]*dfj[0]+dfj[1]*dfj[1]+dfj[2]*dfj[2]) +eps)
-dfk[0]/(sqrt(dfk[0]*dfk[0]+dfk[1]*dfk[1]+dfk[2]*dfk[2]) +eps);
return;
}
__device__ void warpReduce(volatile float *sdata, size_t tid) {
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reduceNorm2(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
float value=0;
while (i < n) {
value=g_idata[i]; //avoid reading twice
mySum += value*value;
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
__global__ void reduceSum(float *g_idata, float *g_odata, size_t n){
extern __shared__ volatile float sdata[];
//http://stackoverflow.com/a/35133396/1485872
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + tid;
size_t gridSize = blockDim.x*gridDim.x;
float mySum = 0;
// float value=0;
while (i < n) {
mySum += g_idata[i];
i += gridSize;
}
sdata[tid] = mySum;
__syncthreads();
if (tid < 512)
sdata[tid] += sdata[tid + 512];
__syncthreads();
if (tid < 256)
sdata[tid] += sdata[tid + 256];
__syncthreads();
if (tid < 128)
sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64)
sdata[tid] += sdata[tid + 64];
__syncthreads();
#if (__CUDA_ARCH__ >= 300)
if ( tid < 32 )
{
mySum = sdata[tid] + sdata[tid + 32];
for (int offset = warpSize/2; offset > 0; offset /= 2) {
mySum += __shfl_down_sync(0xFFFFFFFF, mySum, offset,32);
}
}
#else
if (tid < 32) {
warpReduce(sdata, tid);
mySum = sdata[0];
}
#endif
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
// main function
void pocs_tv(float* img,float* dst,float alpha,const long* image_size, int maxIter){
// Prepare for MultiGPU
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
cudaCheckErrors("Device query fail");
if (deviceCount == 0) {
mexErrMsgIdAndTxt("minimizeTV:POCS_TV:GPUselect","There are no available device(s) that support CUDA\n");
}
//
// CODE assumes
// 1.-All available devices are usable by this code
// 2.-All available devices are equal, they are the same machine (warning trhown)
int dev;
char * devicenames;
cudaDeviceProp deviceProp;
for (dev = 0; dev < deviceCount; dev++) {
cudaSetDevice(dev);
cudaGetDeviceProperties(&deviceProp, dev);
if (dev>0){
if (strcmp(devicenames,deviceProp.name)!=0){
mexWarnMsgIdAndTxt("minimizeTV:POCS_TV:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n POCS_TV.cu line 277.");
break;
}
}
devicenames=deviceProp.name;
}
// We don't know if the devices are being used. lets check that. and only use the amount of memory we need.
size_t mem_GPU_global;
checkFreeMemory(deviceCount,&mem_GPU_global);
// %5 of free memory shoudl be enough, we have almsot no variables in these kernels
size_t total_pixels = image_size[0] * image_size[1] * image_size[2] ;
size_t mem_slice_image = sizeof(float)* image_size[0] * image_size[1] ;
size_t mem_size_image = sizeof(float)* total_pixels;
size_t mem_auxiliary = sizeof(float)* (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
// Decide how are we handling the distribution of computation
size_t mem_img_each_GPU;
unsigned int buffer_length=2;
//Does everything fit in the GPU?
unsigned int slices_per_split;
unsigned int splits=1; // if the number does not fit in an uint, you have more serious trouble than this.
if(mem_GPU_global> 3*mem_size_image+3*(deviceCount-1)*mem_slice_image*buffer_length+mem_auxiliary){
// We only need to split if we have extra GPUs
slices_per_split=(image_size[2]+deviceCount-1)/deviceCount;
mem_img_each_GPU=mem_slice_image*((slices_per_split+buffer_length*2));
}else{
// As mem_auxiliary is not expected to be a large value (for a 2000^3 image is around 28Mbytes), lets for now assume we need it all
size_t mem_free=mem_GPU_global-mem_auxiliary;
splits=(unsigned int)(ceil(((float)(3*mem_size_image)/(float)(deviceCount))/mem_free));
// Now, there is an overhead here, as each splits should have 2 slices more, to accoutn for overlap of images.
// lets make sure these 2 slices fit, if they do not, add 1 to splits.
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits);
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
// if the new stuff does not fit in the GPU, it measn we are in the edge case where adding that extra slice will overflow memory
if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){
// one more splot shoudl do the job, as its an edge case.
splits++;
//recompute for later
slices_per_split=(image_size[2]+deviceCount*splits-1)/(deviceCount*splits); // amountf of slices that fit on a GPU. Later we add 2 to these, as we need them for overlap
mem_img_each_GPU=(mem_slice_image*(slices_per_split+buffer_length*2));
}
// How many EXTRA buffer slices should be able to fit in here??!?!
// Only do it if there are splits needed.
if(splits>1){
mem_free=mem_GPU_global-(3*mem_img_each_GPU+mem_auxiliary);
unsigned int extra_buff=(mem_free/mem_slice_image);
buffer_length=(extra_buff/2)/3; // we need double whatever this results in, rounded down.
buffer_length=max(buffer_length,2);// minimum 2
buffer_length=min(MAX_BUFFER,buffer_length);
mem_img_each_GPU=mem_slice_image*(slices_per_split+buffer_length*2);
}else{
buffer_length=2;
}
// Assert
if (mem_GPU_global< 3*mem_img_each_GPU+mem_auxiliary){
mexErrMsgIdAndTxt("minimizeTV:POCS_TV:GPU","Assertion Failed. Logic behind spliting flawed! Please tell: [email protected]\n");
}
}
// Assert
if ((slices_per_split+buffer_length*2)*image_size[0]*image_size[1]* sizeof(float)!= mem_img_each_GPU){
mexErrMsgIdAndTxt("minimizeTV:POCS_TV:GPU","Assertion Failed. Memory needed calculation broken! Please tell: [email protected]\n");
}
float** d_image= (float**)malloc(deviceCount*sizeof(float*));
float** d_dimgTV= (float**)malloc(deviceCount*sizeof(float*));
float** d_norm2aux= (float**)malloc(deviceCount*sizeof(float*));
float** d_norm2= (float**)malloc(deviceCount*sizeof(float*));
// allocate memory in each GPU
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMalloc((void**)&d_image[dev] , mem_img_each_GPU);
cudaMemset(d_image[dev],0 , mem_img_each_GPU);
cudaMalloc((void**)&d_dimgTV[dev] , mem_img_each_GPU);
cudaMalloc((void**)&d_norm2[dev] , slices_per_split*mem_slice_image);
cudaMalloc((void**)&d_norm2aux[dev] , mem_auxiliary);
cudaCheckErrors("Malloc error");
}
unsigned long long buffer_pixels=buffer_length*image_size[0]*image_size[1];
float* buffer;
if(splits>1){
mexWarnMsgIdAndTxt("minimizeTV:POCS_TV:Image_split","Your image can not be fully split between the available GPUs. The computation of minTV will be significantly slowed due to the image size.\nApproximated mathematics turned on for computational speed.");
}else{
cudaMallocHost((void**)&buffer,buffer_length*image_size[0]*image_size[1]*sizeof(float));
}
// Lets try to make the host memory pinned:
// We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes.
int isHostRegisterSupported;
cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,0);
// splits>2 is completely empirical observation
if (isHostRegisterSupported & splits>2){
cudaHostRegister(img ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
cudaHostRegister(dst ,image_size[2]*image_size[1]*image_size[0]*sizeof(float),cudaHostRegisterPortable);
}
cudaCheckErrors("Error pinning memory");
// Create streams
int nStream_device=2;
int nStreams=deviceCount*nStream_device;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
for (int i = 0; i < nStream_device; ++i){
cudaStreamCreate(&stream[i+dev*nStream_device]);
}
}
cudaCheckErrors("Stream creation fail");
// For the reduction
double totalsum_prev;
double totalsum;
float sum_curr_spl;
float * sumnorm2;
cudaMallocHost((void**)&sumnorm2,deviceCount*sizeof(float));
unsigned int curr_slices;
unsigned long long curr_pixels;
size_t linear_idx_start;
unsigned long long* offset_device=(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* offset_host =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
unsigned long long* bytes_device =(unsigned long long*)malloc(deviceCount*sizeof(unsigned long long));
bool is_first_chunk;
bool is_last_chunk;
for(unsigned int i=0;i<maxIter;i+=(buffer_length-1)){
if(splits>1){
totalsum_prev=0;
}
for(unsigned int sp=0;sp<splits;sp++){
// For each iteration we need to comptue all the image. The ordering of these loops
// need to be like this due to the boudnign layers between slpits. If more than 1 split is needed
// for each GPU then there is no other way that taking the entire memory out of GPU and putting it back.
// If the memory can be shared ebtween GPUs fully without extra splits, then there is an easy way of syncronizing the memory
// Copy image to memory
for (dev = 0; dev < deviceCount; dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
curr_pixels=curr_slices*image_size[0]*image_size[1];
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
// Check if its the first or last chunck
is_last_chunk=!((sp*deviceCount+dev)<deviceCount*splits-1);
is_first_chunk=!(sp*deviceCount+dev);
// lets compute where we start copyes and how much. This avoids 3 calls to Memcpy
offset_device[dev]=buffer_pixels*is_first_chunk;
offset_host[dev]=linear_idx_start-buffer_pixels*!is_first_chunk;
bytes_device[dev]=curr_pixels+buffer_pixels*!is_first_chunk+buffer_pixels*!is_last_chunk;
}
if(i==0){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemcpyAsync(d_image[dev]+offset_device[dev], img+offset_host[dev] , bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
}
// if we need to split and its not the first iteration, then we need to copy from Host memory the previosu result.
if (splits>1 & i>0){
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemcpyAsync(d_image[dev]+offset_device[dev], dst+offset_host[dev] , bytes_device[dev]*sizeof(float), cudaMemcpyHostToDevice,stream[dev*nStream_device+1]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
}
cudaCheckErrors("Memcpy failure on multi split");
for(unsigned int ib=0; (ib<(buffer_length-1)) && ((i+ib)<maxIter); ib++){
// For the gradient
dim3 blockGrad(10, 10, 10);
dim3 gridGrad((image_size[0]+blockGrad.x-1)/blockGrad.x, (image_size[1]+blockGrad.y-1)/blockGrad.y, (curr_slices+buffer_length*2+blockGrad.z-1)/blockGrad.z);
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
// Compute the gradient of the TV norm
// I Dont understand why I need to store 2 layers to compute correctly with 1 buffer. The bounding checks shoudl
// be enough but they are not.
gradientTV<<<gridGrad, blockGrad,0,stream[dev*nStream_device]>>>(d_image[dev],d_dimgTV[dev],(long)(curr_slices+buffer_length*2-1), image_size[1],image_size[0]);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
// no need to copy the 2 aux slices here
cudaStreamSynchronize(stream[dev*nStream_device]);
cudaMemcpyAsync(d_norm2[dev], d_dimgTV[dev]+buffer_pixels, image_size[0]*image_size[1]*curr_slices*sizeof(float), cudaMemcpyDeviceToDevice,stream[dev*nStream_device+1]);
}
// Compute the L2 norm of the gradint. For that, reduction is used.
//REDUCE
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
cudaStreamSynchronize(stream[dev*nStream_device+1]);
reduceNorm2 << <dimgridRed, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device]>> >(d_norm2[dev], d_norm2aux[dev], total_pixels);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
size_t dimblockRed = MAXTHREADS;
size_t dimgridRed = (total_pixels + MAXTHREADS - 1) / MAXTHREADS;
if (dimgridRed > 1) {
reduceSum << <1, dimblockRed, MAXTHREADS*sizeof(float),stream[dev*nStream_device] >> >(d_norm2aux[dev], d_norm2[dev], dimgridRed);
cudaStreamSynchronize(stream[dev*nStream_device]);
cudaMemcpyAsync(&sumnorm2[dev], d_norm2[dev], sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
else {
cudaStreamSynchronize(stream[dev*nStream_device]);
cudaMemcpyAsync(&sumnorm2[dev], d_norm2aux[dev], sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Reduction error");
// Accumulate the nomr accross devices
sum_curr_spl=0;
// this is CPU code
for (dev = 0; dev < deviceCount; dev++){
sum_curr_spl+=sumnorm2[dev];
}
sum_curr_spl+=0.0000001f; // avoid division by zero
// If we have more than one splits, lets use the result from prior calls
if(i>0 && splits>1){
// this is already stored:
//totalsum=totalsum_prev;
}else{
totalsum=sum_curr_spl;
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
//NOMRALIZE
//in a Tesla, maximum blocks =15 SM * 4 blocks/SM
divideArrayScalar <<<60,MAXTHREADS,0,stream[dev*nStream_device]>>>(d_dimgTV[dev]+buffer_pixels,(float)sqrt(totalsum),total_pixels);
//MULTIPLY HYPERPARAMETER
multiplyArrayScalar<<<60,MAXTHREADS,0,stream[dev*nStream_device]>>>(d_dimgTV[dev]+buffer_pixels,alpha, total_pixels);
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Scalar operations error");
//SUBSTRACT GRADIENT
//////////////////////////////////////////////
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
substractArrays<<<60,MAXTHREADS,0,stream[dev*nStream_device]>>>(d_image[dev]+buffer_pixels,d_dimgTV[dev]+buffer_pixels, total_pixels);
}
}
// Syncronize mathematics, make sure bounding pixels are correct
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
if (dev<deviceCount-1){
cudaSetDevice(dev+1);
cudaMemcpy(buffer, d_image[dev+1], buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost);
cudaSetDevice(dev);
cudaMemcpy(d_image[dev]+total_pixels+buffer_pixels,buffer, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice);
}
cudaDeviceSynchronize();
if (dev>0){
cudaSetDevice(dev-1);
cudaMemcpyAsync(buffer, d_image[dev-1]+total_pixels+buffer_pixels, buffer_pixels*sizeof(float), cudaMemcpyDeviceToHost);
cudaSetDevice(dev);
cudaMemcpyAsync(d_image[dev],buffer, buffer_pixels*sizeof(float), cudaMemcpyHostToDevice);
}
}
}else{
// We need to take it out :(
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices=((sp*deviceCount+dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*(sp*deviceCount+dev);
linear_idx_start=image_size[0]*image_size[1]*slices_per_split*(sp*deviceCount+dev);
total_pixels=curr_slices*image_size[0]*image_size[1];
cudaMemcpyAsync(&dst[linear_idx_start], d_image[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*nStream_device+1]);
}
}
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Memory gather error");
totalsum_prev+=sum_curr_spl;
}
totalsum=totalsum_prev;
}
// If there has not been splits, we still have data in memory
if(splits==1){
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
curr_slices=((dev+1)*slices_per_split<image_size[2])? slices_per_split: image_size[2]-slices_per_split*dev;
total_pixels=curr_slices*image_size[0]*image_size[1];
cudaMemcpy(dst+slices_per_split*image_size[0]*image_size[1]*dev, d_image[dev]+buffer_pixels,total_pixels*sizeof(float), cudaMemcpyDeviceToHost);
}
}
cudaCheckErrors("Copy result back");
for(dev=0; dev<deviceCount;dev++){
cudaSetDevice(dev);
cudaFree(d_image[dev]);
cudaFree(d_norm2aux[dev]);
cudaFree(d_dimgTV[dev]);
cudaFree(d_norm2[dev]);
}
if (splits==1){
cudaFreeHost(buffer);
}
if (isHostRegisterSupported& splits>2){
cudaHostUnregister(img);
cudaHostUnregister(dst);
}
for (int i = 0; i < nStreams; ++i)
cudaStreamDestroy(stream[i]) ;
for (dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaDeviceSynchronize();
}
cudaCheckErrors("Memory free");
cudaDeviceReset();
}
void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){
size_t memfree;
size_t memtotal;
for (int dev = 0; dev < deviceCount; dev++){
cudaSetDevice(dev);
cudaMemGetInfo(&memfree,&memtotal);
if(dev==0) *mem_GPU_global=memfree;
if(memfree<memtotal/2){
mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n");
}
cudaCheckErrors("Check mem error");
*mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global;
}
*mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95);
//*mem_GPU_global= insert your known number here, in bytes.
}
|
b1bf9bba0d0bef17909a9588a22afbb73d59be6b.hip | // !!! This is a file automatically generated by hipify!!!
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <hip/hip_runtime_api.h>
#include <hip/hip_fp16.h>
namespace nvinfer1
{
namespace plugin
{
const int PILLARS_PER_BLOCK = 64;
const int PILLAR_FEATURE_SIZE = 64;
template <typename Element>
__global__ void scatterBEV_kernel(const Element *pillar_features_data,
const unsigned int *coords_data, const unsigned int *params_data,
unsigned int featureX, unsigned int featureY,
Element *spatial_feature_data)
{
int pillar_idx = blockIdx.x * PILLARS_PER_BLOCK + threadIdx.x;
int valid_pillars_inBlock = PILLARS_PER_BLOCK;
const int num_pillars = params_data[0];
int valid_blocks = (num_pillars+PILLARS_PER_BLOCK-1)/PILLARS_PER_BLOCK;
if(blockIdx.x >= valid_blocks) return;
if(blockIdx.x == (valid_blocks-1)) {
valid_pillars_inBlock = num_pillars % PILLARS_PER_BLOCK;
}
valid_pillars_inBlock = (valid_pillars_inBlock==0) ? PILLARS_PER_BLOCK : valid_pillars_inBlock;
__shared__ Element pillarSM[PILLARS_PER_BLOCK][PILLAR_FEATURE_SIZE]; //pillar*64
for (int i = 0; i < valid_pillars_inBlock; i++)
{
pillarSM[i][threadIdx.x] = pillar_features_data[ (blockIdx.x * PILLARS_PER_BLOCK +i)*PILLAR_FEATURE_SIZE + threadIdx.x];
}
__syncthreads();
if(pillar_idx >= num_pillars) return;
int4 coord = ((const int4 *)coords_data)[pillar_idx];
int x = coord.w;
int y = coord.z;
for (int i = 0; i < PILLAR_FEATURE_SIZE; i++)
{
spatial_feature_data[i*featureY*featureX + y*featureX + x] = pillarSM[threadIdx.x][i];
}
}
template <typename Element>
int pillarScatterKernelLaunch(
int batch_size,
int max_pillar_num,
int num_features,
const Element *pillar_features_data,
const unsigned int *coords_data,
const unsigned int *params_data,
unsigned int featureX, unsigned int featureY,
Element *spatial_feature_data,
hipStream_t stream)
{
dim3 blocks( (featureX*featureY+PILLARS_PER_BLOCK-1)/PILLARS_PER_BLOCK);
dim3 threads(PILLARS_PER_BLOCK);
for (int b = 0; b < batch_size; b++) {
hipLaunchKernelGGL(( scatterBEV_kernel<Element>), dim3(blocks), dim3(threads), 0, stream,
pillar_features_data + b*max_pillar_num*num_features,
coords_data + b*max_pillar_num*4,
params_data + b,
featureX,
featureY,
spatial_feature_data + b*num_features*featureX*featureY
);
auto err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
return -1;
}
}
return 0;
}
template int pillarScatterKernelLaunch<half>(
int batch_size,
int max_pillar_num,
int num_features,
const half *pillar_features_data,
const unsigned int *coords_data,
const unsigned int *params_data,
unsigned int featureX, unsigned int featureY,
half *spatial_feature_data,
hipStream_t stream);
template int pillarScatterKernelLaunch<float>(
int batch_size,
int max_pillar_num,
int num_features,
const float *pillar_features_data,
const unsigned int *coords_data,
const unsigned int *params_data,
unsigned int featureX, unsigned int featureY,
float *spatial_feature_data,
hipStream_t stream);
} // namespace plugin
} // namespace nvinfer1
| b1bf9bba0d0bef17909a9588a22afbb73d59be6b.cu | /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <iostream>
#include <cuda_runtime_api.h>
#include <cuda_fp16.h>
namespace nvinfer1
{
namespace plugin
{
const int PILLARS_PER_BLOCK = 64;
const int PILLAR_FEATURE_SIZE = 64;
template <typename Element>
__global__ void scatterBEV_kernel(const Element *pillar_features_data,
const unsigned int *coords_data, const unsigned int *params_data,
unsigned int featureX, unsigned int featureY,
Element *spatial_feature_data)
{
int pillar_idx = blockIdx.x * PILLARS_PER_BLOCK + threadIdx.x;
int valid_pillars_inBlock = PILLARS_PER_BLOCK;
const int num_pillars = params_data[0];
int valid_blocks = (num_pillars+PILLARS_PER_BLOCK-1)/PILLARS_PER_BLOCK;
if(blockIdx.x >= valid_blocks) return;
if(blockIdx.x == (valid_blocks-1)) {
valid_pillars_inBlock = num_pillars % PILLARS_PER_BLOCK;
}
valid_pillars_inBlock = (valid_pillars_inBlock==0) ? PILLARS_PER_BLOCK : valid_pillars_inBlock;
__shared__ Element pillarSM[PILLARS_PER_BLOCK][PILLAR_FEATURE_SIZE]; //pillar*64
for (int i = 0; i < valid_pillars_inBlock; i++)
{
pillarSM[i][threadIdx.x] = pillar_features_data[ (blockIdx.x * PILLARS_PER_BLOCK +i)*PILLAR_FEATURE_SIZE + threadIdx.x];
}
__syncthreads();
if(pillar_idx >= num_pillars) return;
int4 coord = ((const int4 *)coords_data)[pillar_idx];
int x = coord.w;
int y = coord.z;
for (int i = 0; i < PILLAR_FEATURE_SIZE; i++)
{
spatial_feature_data[i*featureY*featureX + y*featureX + x] = pillarSM[threadIdx.x][i];
}
}
template <typename Element>
int pillarScatterKernelLaunch(
int batch_size,
int max_pillar_num,
int num_features,
const Element *pillar_features_data,
const unsigned int *coords_data,
const unsigned int *params_data,
unsigned int featureX, unsigned int featureY,
Element *spatial_feature_data,
cudaStream_t stream)
{
dim3 blocks( (featureX*featureY+PILLARS_PER_BLOCK-1)/PILLARS_PER_BLOCK);
dim3 threads(PILLARS_PER_BLOCK);
for (int b = 0; b < batch_size; b++) {
scatterBEV_kernel<Element><<<blocks, threads, 0, stream>>>
(pillar_features_data + b*max_pillar_num*num_features,
coords_data + b*max_pillar_num*4,
params_data + b,
featureX,
featureY,
spatial_feature_data + b*num_features*featureX*featureY
);
auto err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
return -1;
}
}
return 0;
}
template int pillarScatterKernelLaunch<half>(
int batch_size,
int max_pillar_num,
int num_features,
const half *pillar_features_data,
const unsigned int *coords_data,
const unsigned int *params_data,
unsigned int featureX, unsigned int featureY,
half *spatial_feature_data,
cudaStream_t stream);
template int pillarScatterKernelLaunch<float>(
int batch_size,
int max_pillar_num,
int num_features,
const float *pillar_features_data,
const unsigned int *coords_data,
const unsigned int *params_data,
unsigned int featureX, unsigned int featureY,
float *spatial_feature_data,
cudaStream_t stream);
} // namespace plugin
} // namespace nvinfer1
|
42975a025ee54822505b9f7205b80a9e4fae7122.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2020 ETH Zurich. All Rights Reserved.
#include "minimize.h"
#include "integration_kernel.h"
#include <mirheo/core/logger.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/utils/config.h>
namespace mirheo
{
IntegratorMinimize::IntegratorMinimize(const MirState *state, const std::string& name, real maxDisplacement) :
Integrator(state, name), maxDisplacement_{maxDisplacement}
{}
IntegratorMinimize::IntegratorMinimize(const MirState *state, Loader&, const ConfigObject& object) :
IntegratorMinimize(state, object["name"], object["maxDisplacement"])
{}
void IntegratorMinimize::execute(ParticleVector *pv, hipStream_t stream)
{
const auto t = static_cast<real>(getState()->currentTime);
const auto dt = static_cast<real>(getState()->dt);
auto st2 = [max = maxDisplacement_] __device__ (Particle& p, real3 f, real invm, real dt)
{
// Limit the displacement magnitude to `max`.
real3 dr = dt * dt * invm * f;
real dr2 = dot(dr, dr);
if (dr2 > max * max)
dr *= max * math::rsqrt(dr2);
p.r += dr;
};
integrate(pv, dt, st2, stream);
invalidatePV_(pv);
}
void IntegratorMinimize::saveSnapshotAndRegister(Saver& saver)
{
saver.registerObject(this, _saveSnapshot(saver, "IntegratorMinimize"));
}
ConfigObject IntegratorMinimize::_saveSnapshot(Saver& saver, const std::string& typeName)
{
ConfigObject config = Integrator::_saveSnapshot(saver, typeName);
config.emplace("maxDisplacement", saver(maxDisplacement_));
return config;
}
} // namespace mirheo
| 42975a025ee54822505b9f7205b80a9e4fae7122.cu | // Copyright 2020 ETH Zurich. All Rights Reserved.
#include "minimize.h"
#include "integration_kernel.h"
#include <mirheo/core/logger.h>
#include <mirheo/core/pvs/particle_vector.h>
#include <mirheo/core/utils/config.h>
namespace mirheo
{
IntegratorMinimize::IntegratorMinimize(const MirState *state, const std::string& name, real maxDisplacement) :
Integrator(state, name), maxDisplacement_{maxDisplacement}
{}
IntegratorMinimize::IntegratorMinimize(const MirState *state, Loader&, const ConfigObject& object) :
IntegratorMinimize(state, object["name"], object["maxDisplacement"])
{}
void IntegratorMinimize::execute(ParticleVector *pv, cudaStream_t stream)
{
const auto t = static_cast<real>(getState()->currentTime);
const auto dt = static_cast<real>(getState()->dt);
auto st2 = [max = maxDisplacement_] __device__ (Particle& p, real3 f, real invm, real dt)
{
// Limit the displacement magnitude to `max`.
real3 dr = dt * dt * invm * f;
real dr2 = dot(dr, dr);
if (dr2 > max * max)
dr *= max * math::rsqrt(dr2);
p.r += dr;
};
integrate(pv, dt, st2, stream);
invalidatePV_(pv);
}
void IntegratorMinimize::saveSnapshotAndRegister(Saver& saver)
{
saver.registerObject(this, _saveSnapshot(saver, "IntegratorMinimize"));
}
ConfigObject IntegratorMinimize::_saveSnapshot(Saver& saver, const std::string& typeName)
{
ConfigObject config = Integrator::_saveSnapshot(saver, typeName);
config.emplace("maxDisplacement", saver(maxDisplacement_));
return config;
}
} // namespace mirheo
|
9d9f766eec786385347333a32691f809937c30f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sys/time.h>
#include "diffBraggCUDA.h"
#include "diffBragg_gpu_kernel.h"
#include <stdio.h>
//lkalskdlaksdlkalsd
//#define BLOCKSIZE 128
//#define NUMBLOCKS 128
//https://stackoverflow.com/a/14038590/2077270
#define gpuErr(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void error_msg(hipError_t err, const char* msg){
if (err != hipSuccess){
printf("%s: CUDA error message: %s\n", msg, hipGetErrorString(err));
exit(err);
}
}
void diffBragg_sum_over_steps_cuda(
int Npix_to_model,
std::vector<unsigned int>& panels_fasts_slows,
image_type& floatimage,
images& d_image,
images& d2_image,
step_arrays& db_steps,
detector& db_det,
beam& db_beam,
crystal& db_cryst,
flags& db_flags,
cuda_flags& db_cu_flags,
diffBragg_cudaPointers& cp,
timer_variables& TIMERS){
if (db_cryst.phi0 != 0 || db_cryst.phisteps > 1){
printf("PHI (goniometer position) not supported in GPU code: phi0=%f phisteps=%d, phistep=%d\n", db_cryst.phi0, db_cryst.phisteps, db_cryst.phistep);
exit(-1);
}
int numblocks;
int blocksize;
char* diffBragg_blocks = getenv("DIFFBRAGG_NUM_BLOCKS");
char* diffBragg_threads = getenv("DIFFBRAGG_THREADS_PER_BLOCK");
if (diffBragg_threads==NULL)
blocksize=128;
else
blocksize=atoi(diffBragg_threads);
if (diffBragg_blocks==NULL)
numblocks = (Npix_to_model+blocksize-1)/blocksize;
else
numblocks = atoi(diffBragg_blocks);
int cuda_devices;
hipGetDeviceCount(&cuda_devices);
error_msg(hipGetLastError(), "after device count");
if (db_flags.verbose > 1)
printf("Found %d CUDA-capable devices\n", cuda_devices);
//if (device_Id <= cuda_devices)
gpuErr(hipSetDevice(db_cu_flags.device_Id));
double time;
struct timeval t1, t2;//, t3 ,t4;
gettimeofday(&t1, 0);
// determine if we need to allocate pixels, and how many.
// For best usage, one should use the diffBragg property (visible from Python) Npix_to_allocate
// in order to just allocate to the GPU - this is useful for ensemble refinement, where each shot
// can have a variable number of pixels being modeled, and ony only needs to allocate the device once
// (with the largest expected number of pixels for a given shot)
// TODO clean up this logic a bit
if (cp.device_is_allocated && (cp.npix_allocated < Npix_to_model)){
printf("Need to re-allocate pixels, currently have %d allocated, but trying to model %d\n",
cp.npix_allocated, Npix_to_model);
exit(-1);
}
else if (db_cu_flags.Npix_to_allocate==-1){
db_cu_flags.Npix_to_allocate = Npix_to_model;
}
else if (Npix_to_model > db_cu_flags.Npix_to_allocate){
printf("Npix to model=%d is greater than the number of pixel requested for allocation (%d)!\n",
Npix_to_model, db_cu_flags.Npix_to_allocate);
exit(-1);
}
// support dynamic allocation for different numbers of sources
if ( cp.previous_nsource != 0 && cp.previous_nsource != db_beam.number_of_sources){
gpuErr(hipFree(cp.cu_source_X));
gpuErr(hipFree(cp.cu_source_Y));
gpuErr(hipFree(cp.cu_source_Z));
gpuErr(hipFree(cp.cu_source_I));
gpuErr(hipFree(cp.cu_source_lambda));
printf("Reallocating for %d sources!:\n", db_beam.number_of_sources);
gpuErr(hipMallocManaged(&cp.cu_source_X, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_source_Y, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_source_Z, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_source_I, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_source_lambda, db_beam.number_of_sources*sizeof(CUDAREAL)));
cp.previous_nsource = db_beam.number_of_sources;
}
if(cp.device_is_allocated){
if (db_flags.verbose){
printf("Will model %d pixels (GPU has %d pre-allocated pix)\n", Npix_to_model, cp.npix_allocated);
}
}
else{
if (db_flags.verbose){
printf("Will model %d pixels and allocate %d pix\n", Npix_to_model, db_cu_flags.Npix_to_allocate);
}
gpuErr(hipMallocManaged(&cp.cu_source_X, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_source_Y, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_source_Z, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_source_I, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_source_lambda, db_beam.number_of_sources*sizeof(CUDAREAL)));
cp.previous_nsource = db_beam.number_of_sources;
gpuErr(hipMallocManaged((void **)&cp.cu_UMATS, db_cryst.UMATS.size()*sizeof(MAT3)));
gpuErr(hipMallocManaged((void **)&cp.cu_UMATS_RXYZ, db_cryst.UMATS_RXYZ.size()*sizeof(MAT3)));
gpuErr(hipMallocManaged((void **)&cp.cu_AMATS, db_cryst.UMATS_RXYZ.size()*sizeof(MAT3)));
if (db_cryst.UMATS_RXYZ_prime.size()>0)
gpuErr(hipMallocManaged((void **)&cp.cu_UMATS_RXYZ_prime, db_cryst.UMATS_RXYZ_prime.size()*sizeof(MAT3)));
if (db_cryst.UMATS_RXYZ_dbl_prime.size()>0)
gpuErr(hipMallocManaged((void **)&cp.cu_UMATS_RXYZ_dbl_prime, db_cryst.UMATS_RXYZ_dbl_prime.size()*sizeof(MAT3)));
gpuErr(hipMallocManaged((void **)&cp.cu_dB_Mats, db_cryst.dB_Mats.size()*sizeof(MAT3)));
gpuErr(hipMallocManaged((void **)&cp.cu_dB2_Mats, db_cryst.dB2_Mats.size()*sizeof(MAT3)));
gpuErr(hipMallocManaged((void **)&cp.cu_RotMats, db_cryst.RotMats.size()*sizeof(MAT3)));
gpuErr(hipMallocManaged((void **)&cp.cu_dRotMats, db_cryst.dRotMats.size()*sizeof(MAT3)));
gpuErr(hipMallocManaged((void **)&cp.cu_d2RotMats, db_cryst.d2RotMats.size()*sizeof(MAT3)));
gpuErr(hipMallocManaged(&cp.cu_fdet_vectors, db_det.fdet_vectors.size()*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_sdet_vectors, db_det.fdet_vectors.size()*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_odet_vectors, db_det.fdet_vectors.size()*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_pix0_vectors, db_det.fdet_vectors.size()*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_close_distances, db_det.close_distances.size()*sizeof(CUDAREAL)));
if (db_cryst.fpfdp.size() > 0){
gpuErr(hipMallocManaged(&cp.cu_fpfdp, db_cryst.fpfdp.size()*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_atom_data, db_cryst.atom_data.size()*sizeof(CUDAREAL)));
}
if(db_cryst.fpfdp_derivs.size() > 0)
gpuErr(hipMallocManaged(&cp.cu_fpfdp_derivs, db_cryst.fpfdp_derivs.size()*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_refine_Bmat, 6*sizeof(bool)));
gpuErr(hipMallocManaged(&cp.cu_refine_Umat, 3*sizeof(bool)));
gpuErr(hipMallocManaged(&cp.cu_refine_Ncells, 3*sizeof(bool)));
gpuErr(hipMallocManaged(&cp.cu_refine_panel_origin, 3*sizeof(bool)));
gpuErr(hipMallocManaged(&cp.cu_refine_panel_rot, 3*sizeof(bool)));
gpuErr(hipMallocManaged(&cp.cu_refine_lambda, 2*sizeof(bool)));
gpuErr(hipMallocManaged(&cp.cu_Fhkl, db_cryst.FhklLinear.size()*sizeof(CUDAREAL)));
if (db_flags.complex_miller)
gpuErr(hipMallocManaged(&cp.cu_Fhkl2, db_cryst.FhklLinear.size()*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged((void **)&cp.cu_dF_vecs, db_det.dF_vecs.size()*sizeof(VEC3)));
gpuErr(hipMallocManaged((void **)&cp.cu_dS_vecs, db_det.dF_vecs.size()*sizeof(VEC3)));
//gettimeofday(&t3, 0));
gpuErr(hipMallocManaged(&cp.cu_floatimage, db_cu_flags.Npix_to_allocate*sizeof(CUDAREAL) ));
if (db_flags.wavelength_img){
gpuErr(hipMallocManaged(&cp.cu_wavelenimage, db_cu_flags.Npix_to_allocate*sizeof(CUDAREAL) ));
}
if (db_flags.refine_diffuse){
gpuErr(hipMallocManaged(&cp.cu_d_diffuse_gamma_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_d_diffuse_sigma_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL)));
}
if (db_flags.refine_fcell){
gpuErr(hipMallocManaged(&cp.cu_d_fcell_images, db_cu_flags.Npix_to_allocate*1*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_d2_fcell_images, db_cu_flags.Npix_to_allocate*1*sizeof(CUDAREAL)));
}
if (db_flags.refine_eta){
gpuErr(hipMallocManaged(&cp.cu_d_eta_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_d2_eta_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL)));
}
if (std::count(db_flags.refine_Umat.begin(), db_flags.refine_Umat.end(), true) > 0){
gpuErr(hipMallocManaged(&cp.cu_d_Umat_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL) ));
gpuErr(hipMallocManaged(&cp.cu_d2_Umat_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL) ));
}
if (std::count(db_flags.refine_Ncells.begin(), db_flags.refine_Ncells.end(), true) > 0 || db_flags.refine_Ncells_def){
gpuErr(hipMallocManaged(&cp.cu_d_Ncells_images, db_cu_flags.Npix_to_allocate*6*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_d2_Ncells_images, db_cu_flags.Npix_to_allocate*6*sizeof(CUDAREAL)));
}
if (std::count(db_flags.refine_panel_rot.begin(), db_flags.refine_panel_rot.end(), true) > 0)
gpuErr(hipMallocManaged(&cp.cu_d_panel_rot_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL)));
if (std::count(db_flags.refine_panel_origin.begin(), db_flags.refine_panel_origin.end(), true) > 0)
gpuErr(hipMallocManaged(&cp.cu_d_panel_orig_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL)));
if (std::count(db_flags.refine_lambda.begin(), db_flags.refine_lambda.end(), true) > 0)
gpuErr(hipMallocManaged(&cp.cu_d_lambda_images, db_cu_flags.Npix_to_allocate*2*sizeof(CUDAREAL)));
if (std::count(db_flags.refine_Bmat.begin(), db_flags.refine_Bmat.end(), true) > 0){
gpuErr(hipMallocManaged(&cp.cu_d_Bmat_images, db_cu_flags.Npix_to_allocate*6*sizeof(CUDAREAL)));
gpuErr(hipMallocManaged(&cp.cu_d2_Bmat_images, db_cu_flags.Npix_to_allocate*6*sizeof(CUDAREAL)));
}
if (db_flags.refine_fp_fdp)
gpuErr(hipMallocManaged(&cp.cu_d_fp_fdp_images, db_cu_flags.Npix_to_allocate*2*sizeof(CUDAREAL)));
if(db_cryst.nominal_hkl.size() >0)
gpuErr(hipMallocManaged(&cp.cu_nominal_hkl, db_cu_flags.Npix_to_allocate*3*sizeof(int)));
//gettimeofday(&t4, 0);
//time = (1000000.0*(t4.tv_sec-t3.tv_sec) + t4.tv_usec-t3.tv_usec)/1000.0;
//printf("TIME SPENT ALLOCATING (IMAGES ONLY): %3.10f ms \n", time);
gpuErr(hipMallocManaged(&cp.cu_panels_fasts_slows, db_cu_flags.Npix_to_allocate*3*sizeof(panels_fasts_slows[0])));
cp.npix_allocated = db_cu_flags.Npix_to_allocate;
} // END of allocation
bool ALLOC = !cp.device_is_allocated; // shortcut variable
gettimeofday(&t2, 0);
time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
if (TIMERS.recording) TIMERS.cuda_alloc+= time;
if(db_flags.verbose>1)
printf("TIME SPENT ALLOCATING (TOTAL): %3.10f ms \n", time);
//ALLOC = false;
// BEGIN COPYING DATA
gettimeofday(&t1, 0);
bool FORCE_COPY=true;
// END step position
// BEGIN sources
if (db_cu_flags.update_sources || ALLOC || FORCE_COPY){
for (int i=0; i< db_beam.number_of_sources; i++){
VEC3 incident(db_beam.source_X[i], db_beam.source_Y[i], db_beam.source_Z[i]);
incident /= incident.norm();
cp.cu_source_X[i] = incident[0];
cp.cu_source_Y[i] = incident[1];
cp.cu_source_Z[i] = incident[2];
cp.cu_source_I[i] = db_beam.source_I[i];
cp.cu_source_lambda[i] = db_beam.source_lambda[i];
}
if(db_flags.verbose>1 )
printf("H2D sources\n");
}
// END sources
// UMATS
if (db_cu_flags.update_umats || ALLOC||FORCE_COPY){
for (int i=0; i< db_cryst.UMATS.size(); i++)
cp.cu_UMATS[i] = db_cryst.UMATS[i];
for (int i=0; i < db_cryst.UMATS_RXYZ.size(); i++)
cp.cu_UMATS_RXYZ[i] = db_cryst.UMATS_RXYZ[i];
for (int i=0; i < db_cryst.UMATS_RXYZ_prime.size(); i++)
cp.cu_UMATS_RXYZ_prime[i] = db_cryst.UMATS_RXYZ_prime[i];
for (int i=0; i < db_cryst.UMATS_RXYZ_dbl_prime.size(); i++)
cp.cu_UMATS_RXYZ_dbl_prime[i] = db_cryst.UMATS_RXYZ_dbl_prime[i];
if(db_flags.verbose>1)
printf("H2D Done copying Umats\n") ;
}
// END UMATS
if (db_cu_flags.update_umats || ALLOC||FORCE_COPY){
MAT3 Amat_init = db_cryst.eig_U*db_cryst.eig_B*1e10*(db_cryst.eig_O.transpose());
for(int i_mos =0; i_mos< db_cryst.UMATS_RXYZ.size(); i_mos++){
cp.cu_AMATS[i_mos] = (db_cryst.UMATS_RXYZ[i_mos]*Amat_init).transpose();
}
if(db_flags.verbose>1)
printf("H2D Done copying Amats\n") ;
}
// BMATS
if(db_cu_flags.update_dB_mats || ALLOC || FORCE_COPY){
for (int i=0; i< db_cryst.dB_Mats.size(); i++)
cp.cu_dB_Mats[i] = db_cryst.dB_Mats[i];
for (int i=0; i< db_cryst.dB2_Mats.size(); i++)
cp.cu_dB2_Mats[i] = db_cryst.dB2_Mats[i];
if(db_flags.verbose>1)
printf("H2D Done copying dB_Mats\n") ;
}
// END BMATS
// ROT MATS
if(db_cu_flags.update_rotmats || ALLOC || FORCE_COPY){
for (int i=0; i<db_cryst.RotMats.size(); i++)
cp.cu_RotMats[i] = db_cryst.RotMats[i];
for (int i=0; i<db_cryst.dRotMats.size(); i++)
cp.cu_dRotMats[i] = db_cryst.dRotMats[i];
for (int i=0; i<db_cryst.d2RotMats.size(); i++)
cp.cu_d2RotMats[i] = db_cryst.d2RotMats[i];
if (db_flags.verbose>1)
printf("H2D Done copying rotmats\n");
}
// END ROT MATS
// DETECTOR VECTORS
if (db_cu_flags.update_detector || ALLOC || FORCE_COPY){
for (int i=0; i<db_det.fdet_vectors.size(); i++){
cp.cu_fdet_vectors[i] = db_det.fdet_vectors[i];
cp.cu_sdet_vectors[i] = db_det.sdet_vectors[i];
cp.cu_odet_vectors[i] = db_det.odet_vectors[i];
cp.cu_pix0_vectors[i] = db_det.pix0_vectors[i];
}
for(int i=0; i < db_det.close_distances.size();i++){
cp.cu_close_distances[i] = db_det.close_distances[i];
}
if (db_flags.verbose>1)
printf("H2D Done copying detector vectors\n");
}
// END DETECTOR VECTORS
if ( ALLOC || FORCE_COPY){
for(int i=0; i< db_cryst.nominal_hkl.size(); i++){
cp.cu_nominal_hkl[i] = db_cryst.nominal_hkl[i];
}
for (int i=0; i< db_cryst.atom_data.size(); i++){
cp.cu_atom_data[i] = db_cryst.atom_data[i];
}
if (db_flags.verbose>1)
printf("H2D Done copying atom data\n");
for(int i=0; i< db_cryst.fpfdp.size(); i++){
cp.cu_fpfdp[i] = db_cryst.fpfdp[i];
}
for(int i=0; i< db_cryst.fpfdp_derivs.size(); i++){
cp.cu_fpfdp_derivs[i] = db_cryst.fpfdp_derivs[i];
}
if (db_flags.verbose>1)
printf("H2D Done copying fprime and fdblprime\n");
}
// BEGIN REFINEMENT FLAGS
if (db_cu_flags.update_refine_flags || ALLOC || FORCE_COPY){
for (int i=0; i<3; i++){
cp.cu_refine_Umat[i] = db_flags.refine_Umat[i];
cp.cu_refine_Ncells[i] = db_flags.refine_Ncells[i];
cp.cu_refine_panel_origin[i] = db_flags.refine_panel_origin[i];
cp.cu_refine_panel_rot[i] = db_flags.refine_panel_rot[i];
}
for(int i=0; i<2; i++)
cp.cu_refine_lambda[i] = db_flags.refine_lambda[i];
for(int i=0; i<6; i++)
cp.cu_refine_Bmat[i] = db_flags.refine_Bmat[i];
if (db_flags.verbose>1)
printf("H2D Done copying refinement flags\n");
}
// END REFINEMENT FLAGS
// BEGIN Fhkl
if (db_cu_flags.update_Fhkl || ALLOC || FORCE_COPY){
for(int i=0; i < db_cryst.FhklLinear.size(); i++){
cp.cu_Fhkl[i] = db_cryst.FhklLinear[i];
if (db_flags.complex_miller)
cp.cu_Fhkl2[i] = db_cryst.Fhkl2Linear[i];
}
if (db_flags.verbose>1)
printf("H2D Done copying step Fhkl\n");
}
// END Fhkl
// BEGIN panel derivative vecs
if(db_cu_flags.update_panel_deriv_vecs || ALLOC || FORCE_COPY){
for (int i=0; i<db_det.dF_vecs.size(); i++){
cp.cu_dF_vecs[i] = db_det.dF_vecs[i];
cp.cu_dS_vecs[i] = db_det.dS_vecs[i];
}
if (db_flags.verbose>1)
printf("H2D Done copying step panel derivative vectors\n");
}
// END panel derivative vecs
// BEGIN panels fasts slows
if (db_cu_flags.update_panels_fasts_slows || ALLOC || FORCE_COPY){
for (int i=0; i< panels_fasts_slows.size(); i++)
cp.cu_panels_fasts_slows[i] = panels_fasts_slows[i];
if (db_flags.verbose>1)
printf("H2D Done copying panels_fasts_slows\n");
}
// END panels fasts slows
gettimeofday(&t2, 0);
time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
if (TIMERS.recording) TIMERS.cuda_copy_to_dev += time;
if(db_flags.verbose>1)
printf("TIME SPENT COPYING DATA HOST->DEV: %3.10f ms \n", time);
cp.device_is_allocated = true;
error_msg(hipGetLastError(), "after copy to device");
gettimeofday(&t1, 0);
int Npanels = db_det.fdet_vectors.size()/3;
int num_atoms = db_cryst.atom_data.size()/5;
if (db_cryst.fpfdp.size() == 0){ // note cannot use atom data if fpfdp is 0, make this cleaner
num_atoms=0;
}
//int sm_size = number_of_sources*5*sizeof(CUDAREAL);
//gpu_sum_over_steps<<<numblocks, blocksize, sm_size >>>(
bool aniso_eta = db_cryst.UMATS_RXYZ.size() != db_cryst.UMATS_RXYZ_prime.size();
bool use_nominal_hkl = !db_cryst.nominal_hkl.empty();
hipLaunchKernelGGL(( gpu_sum_over_steps), dim3(numblocks), dim3(blocksize), 0, 0,
Npix_to_model, cp.cu_panels_fasts_slows,
cp.cu_floatimage,
cp.cu_wavelenimage,
cp.cu_d_Umat_images, cp.cu_d2_Umat_images,
cp.cu_d_Bmat_images, cp.cu_d2_Bmat_images,
cp.cu_d_Ncells_images, cp.cu_d2_Ncells_images,
cp.cu_d_fcell_images, cp.cu_d2_fcell_images,
cp.cu_d_eta_images, cp.cu_d2_eta_images,
cp.cu_d_lambda_images, cp.cu_d2_lambda_images,
cp.cu_d_panel_rot_images, cp.cu_d2_panel_rot_images,
cp.cu_d_panel_orig_images, cp.cu_d2_panel_orig_images,
cp.cu_d_fp_fdp_images,
db_steps.Nsteps, db_flags.printout_fpixel, db_flags.printout_spixel, db_flags.printout, db_cryst.default_F,
db_det.oversample, db_flags.oversample_omega, db_det.subpixel_size, db_det.pixel_size,
db_det.detector_thickstep, db_det.detector_thick, cp.cu_close_distances, db_det.detector_attnlen,
db_det.detector_thicksteps, db_beam.number_of_sources, db_cryst.phisteps, db_cryst.UMATS.size(),
db_flags.use_lambda_coefficients, db_beam.lambda0, db_beam.lambda1,
db_cryst.eig_U, db_cryst.eig_O, db_cryst.eig_B, db_cryst.RXYZ,
cp.cu_dF_vecs,
cp.cu_dS_vecs,
cp.cu_UMATS_RXYZ,
cp.cu_UMATS_RXYZ_prime,
cp.cu_UMATS_RXYZ_dbl_prime,
cp.cu_RotMats,
cp.cu_dRotMats,
cp.cu_d2RotMats,
cp.cu_UMATS,
cp.cu_dB_Mats,
cp.cu_dB2_Mats,
cp.cu_AMATS,
cp.cu_source_X, cp.cu_source_Y, cp.cu_source_Z, cp.cu_source_lambda, cp.cu_source_I,
db_beam.kahn_factor,
db_cryst.Na, db_cryst.Nb, db_cryst.Nc,
db_cryst.Nd, db_cryst.Ne, db_cryst.Nf,
db_cryst.phi0, db_cryst.phistep,
db_cryst.spindle_vec, db_beam.polarization_axis,
db_cryst.h_range, db_cryst.k_range, db_cryst.l_range,
db_cryst.h_max, db_cryst.h_min, db_cryst.k_max, db_cryst.k_min, db_cryst.l_max, db_cryst.l_min, db_cryst.dmin,
db_cryst.fudge, db_flags.complex_miller, db_flags.verbose, db_flags.only_save_omega_kahn,
db_flags.isotropic_ncells, db_flags.compute_curvatures,
cp.cu_Fhkl, cp.cu_Fhkl2,
cp.cu_refine_Bmat, cp.cu_refine_Ncells, db_flags.refine_Ncells_def, cp.cu_refine_panel_origin, cp.cu_refine_panel_rot,
db_flags.refine_fcell, cp.cu_refine_lambda, db_flags.refine_eta, cp.cu_refine_Umat,
cp.cu_fdet_vectors, cp.cu_sdet_vectors,
cp.cu_odet_vectors, cp.cu_pix0_vectors,
db_flags.nopolar, db_flags.point_pixel, db_beam.fluence, db_cryst.r_e_sqr, db_cryst.spot_scale, Npanels, aniso_eta, db_flags.no_Nabc_scale,
cp.cu_fpfdp, cp.cu_fpfdp_derivs, cp.cu_atom_data, num_atoms,
db_flags.refine_fp_fdp, cp.cu_nominal_hkl, use_nominal_hkl, db_cryst.anisoU, db_cryst.anisoG, db_flags.use_diffuse,
cp.cu_d_diffuse_gamma_images, cp.cu_d_diffuse_sigma_images,
db_flags.refine_diffuse, db_flags.gamma_miller_units, db_flags.refine_Icell,
db_flags.wavelength_img, db_cryst.laue_group_num, db_cryst.stencil_size);
error_msg(hipGetLastError(), "after kernel call");
hipDeviceSynchronize();
error_msg(hipGetLastError(), "after kernel completion");
if(db_flags.verbose>1)
printf("KERNEL_COMPLETE gpu_sum_over_steps\n");
gettimeofday(&t2, 0);
time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
if (TIMERS.recording) TIMERS.cuda_kernel += time;
if(db_flags.verbose>1)
printf("TIME SPENT(KERNEL): %3.10f ms \n", time);
gettimeofday(&t1, 0);
// COPY BACK FROM DEVICE
for (int i=0; i< Npix_to_model; i++){
floatimage[i] = cp.cu_floatimage[i];
}
if(db_flags.wavelength_img){
for (int i=0; i< Npix_to_model; i++){
d_image.wavelength[i] = cp.cu_wavelenimage[i];
}
}
if (db_flags.refine_fcell){
for (int i=0; i<Npix_to_model; i++){
d_image.fcell[i] = cp.cu_d_fcell_images[i];
d2_image.fcell[i] = cp.cu_d2_fcell_images[i];
}
}
if (std::count(db_flags.refine_Umat.begin(), db_flags.refine_Umat.end(), true) > 0){
for (int i=0; i<3*Npix_to_model; i++){
d_image.Umat[i] = cp.cu_d_Umat_images[i];
d2_image.Umat[i] = cp.cu_d2_Umat_images[i];
}
}
if (std::count(db_flags.refine_panel_rot.begin(), db_flags.refine_panel_rot.end(), true) > 0){
for (int i=0; i<3*Npix_to_model; i++)
d_image.panel_rot[i] = cp.cu_d_panel_rot_images[i];
}
if (std::count(db_flags.refine_panel_origin.begin(), db_flags.refine_panel_origin.end(), true) > 0){
for (int i=0; i<3*Npix_to_model; i++)
d_image.panel_orig[i] = cp.cu_d_panel_orig_images[i];
}
if (db_flags.refine_eta){
for (int i=0; i<3*Npix_to_model; i++){
d_image.eta[i] = cp.cu_d_eta_images[i];
d2_image.eta[i] = cp.cu_d2_eta_images[i];
}
}
if (std::count(db_flags.refine_Ncells.begin(), db_flags.refine_Ncells.end(), true) > 0 || db_flags.refine_Ncells_def){
for(int i=0; i<6*Npix_to_model; i++){
d_image.Ncells[i] = cp.cu_d_Ncells_images[i];
d2_image.Ncells[i] = cp.cu_d2_Ncells_images[i];
}
}
if (db_flags.refine_diffuse){
for(int i=0; i<3*Npix_to_model; i++){
d_image.diffuse_gamma[i] = cp.cu_d_diffuse_gamma_images[i];
d_image.diffuse_sigma[i] = cp.cu_d_diffuse_sigma_images[i];
}
}
if (std::count(db_flags.refine_Bmat.begin(), db_flags.refine_Bmat.end(), true) > 0){
for(int i=0; i<6*Npix_to_model; i++){
d_image.Bmat[i] = cp.cu_d_Bmat_images[i];
d2_image.Bmat[i] = cp.cu_d2_Bmat_images[i];
}
}
if (std::count(db_flags.refine_lambda.begin(), db_flags.refine_lambda.end(), true) > 0){
for(int i=0; i<2*Npix_to_model; i++)
d_image.lambda[i] = cp.cu_d_lambda_images[i];
}
if (db_flags.refine_fp_fdp){
for (int i=0; i< 2*Npix_to_model; i++)
d_image.fp_fdp[i] = cp.cu_d_fp_fdp_images[i];
}
gettimeofday(&t2, 0);
time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
if (TIMERS.recording) TIMERS.cuda_copy_from_dev += time;
if(db_flags.verbose>1)
printf("TIME SPENT COPYING BACK : %3.10f ms \n", time);
error_msg(hipGetLastError(), "After copy to host");
}
void freedom(diffBragg_cudaPointers& cp){
if (cp.device_is_allocated){
gpuErr(hipFree( cp.cu_floatimage));
gpuErr(hipFree( cp.cu_wavelenimage));
gpuErr(hipFree( cp.cu_d_Umat_images));
gpuErr(hipFree( cp.cu_d_Bmat_images));
gpuErr(hipFree( cp.cu_d_Ncells_images));
gpuErr(hipFree( cp.cu_d_diffuse_gamma_images));
gpuErr(hipFree( cp.cu_d_diffuse_sigma_images));
gpuErr(hipFree( cp.cu_d2_Umat_images));
gpuErr(hipFree( cp.cu_d2_Bmat_images));
gpuErr(hipFree( cp.cu_d2_Ncells_images));
gpuErr(hipFree( cp.cu_d_eta_images));
gpuErr(hipFree( cp.cu_d2_eta_images));
gpuErr(hipFree( cp.cu_d_fcell_images));
gpuErr(hipFree( cp.cu_d2_fcell_images));
gpuErr(hipFree( cp.cu_d_lambda_images));
gpuErr(hipFree( cp.cu_d_panel_rot_images));
gpuErr(hipFree( cp.cu_d_panel_orig_images));
gpuErr(hipFree( cp.cu_d_sausage_XYZ_scale_images));
gpuErr(hipFree( cp.cu_d_fp_fdp_images));
gpuErr(hipFree(cp.cu_Fhkl));
gpuErr(hipFree(cp.cu_Fhkl2));
gpuErr(hipFree(cp.cu_fdet_vectors));
gpuErr(hipFree(cp.cu_sdet_vectors));
gpuErr(hipFree(cp.cu_odet_vectors));
gpuErr(hipFree(cp.cu_pix0_vectors));
gpuErr(hipFree(cp.cu_close_distances));
gpuErr(hipFree(cp.cu_nominal_hkl));
gpuErr(hipFree(cp.cu_atom_data));
gpuErr(hipFree(cp.cu_fpfdp));
gpuErr(hipFree(cp.cu_fpfdp_derivs));
gpuErr(hipFree(cp.cu_source_X));
gpuErr(hipFree(cp.cu_source_Y));
gpuErr(hipFree(cp.cu_source_Z));
gpuErr(hipFree(cp.cu_source_I));
gpuErr(hipFree(cp.cu_source_lambda));
gpuErr(hipFree(cp.cu_UMATS));
gpuErr(hipFree(cp.cu_UMATS_RXYZ));
gpuErr(hipFree(cp.cu_AMATS));
gpuErr(hipFree(cp.cu_UMATS_RXYZ_prime));
gpuErr(hipFree(cp.cu_UMATS_RXYZ_dbl_prime));
gpuErr(hipFree(cp.cu_RotMats));
gpuErr(hipFree(cp.cu_dRotMats));
gpuErr(hipFree(cp.cu_d2RotMats));
gpuErr(hipFree(cp.cu_dB_Mats));
gpuErr(hipFree(cp.cu_dB2_Mats));
//gpuErr(hipFree(cp.cu_sausages_RXYZ));
//gpuErr(hipFree(cp.cu_d_sausages_RXYZ));
//gpuErr(hipFree(cp.cu_sausages_U));
//gpuErr(hipFree(cp.cu_sausages_scale));
gpuErr(hipFree(cp.cu_dF_vecs));
gpuErr(hipFree(cp.cu_dS_vecs));
gpuErr(hipFree(cp.cu_refine_Bmat));
gpuErr(hipFree(cp.cu_refine_Umat));
gpuErr(hipFree(cp.cu_refine_Ncells));
gpuErr(hipFree(cp.cu_refine_lambda));
gpuErr(hipFree(cp.cu_refine_panel_origin));
gpuErr(hipFree(cp.cu_refine_panel_rot));
gpuErr(hipFree(cp.cu_panels_fasts_slows));
cp.device_is_allocated = false;
cp.npix_allocated = 0;
}
}
| 9d9f766eec786385347333a32691f809937c30f7.cu | #include <sys/time.h>
#include "diffBraggCUDA.h"
#include "diffBragg_gpu_kernel.h"
#include <stdio.h>
//lkalskdlaksdlkalsd
//#define BLOCKSIZE 128
//#define NUMBLOCKS 128
//https://stackoverflow.com/a/14038590/2077270
#define gpuErr(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void error_msg(cudaError_t err, const char* msg){
if (err != cudaSuccess){
printf("%s: CUDA error message: %s\n", msg, cudaGetErrorString(err));
exit(err);
}
}
void diffBragg_sum_over_steps_cuda(
int Npix_to_model,
std::vector<unsigned int>& panels_fasts_slows,
image_type& floatimage,
images& d_image,
images& d2_image,
step_arrays& db_steps,
detector& db_det,
beam& db_beam,
crystal& db_cryst,
flags& db_flags,
cuda_flags& db_cu_flags,
diffBragg_cudaPointers& cp,
timer_variables& TIMERS){
if (db_cryst.phi0 != 0 || db_cryst.phisteps > 1){
printf("PHI (goniometer position) not supported in GPU code: phi0=%f phisteps=%d, phistep=%d\n", db_cryst.phi0, db_cryst.phisteps, db_cryst.phistep);
exit(-1);
}
int numblocks;
int blocksize;
char* diffBragg_blocks = getenv("DIFFBRAGG_NUM_BLOCKS");
char* diffBragg_threads = getenv("DIFFBRAGG_THREADS_PER_BLOCK");
if (diffBragg_threads==NULL)
blocksize=128;
else
blocksize=atoi(diffBragg_threads);
if (diffBragg_blocks==NULL)
numblocks = (Npix_to_model+blocksize-1)/blocksize;
else
numblocks = atoi(diffBragg_blocks);
int cuda_devices;
cudaGetDeviceCount(&cuda_devices);
error_msg(cudaGetLastError(), "after device count");
if (db_flags.verbose > 1)
printf("Found %d CUDA-capable devices\n", cuda_devices);
//if (device_Id <= cuda_devices)
gpuErr(cudaSetDevice(db_cu_flags.device_Id));
double time;
struct timeval t1, t2;//, t3 ,t4;
gettimeofday(&t1, 0);
// determine if we need to allocate pixels, and how many.
// For best usage, one should use the diffBragg property (visible from Python) Npix_to_allocate
// in order to just allocate to the GPU - this is useful for ensemble refinement, where each shot
// can have a variable number of pixels being modeled, and ony only needs to allocate the device once
// (with the largest expected number of pixels for a given shot)
// TODO clean up this logic a bit
if (cp.device_is_allocated && (cp.npix_allocated < Npix_to_model)){
printf("Need to re-allocate pixels, currently have %d allocated, but trying to model %d\n",
cp.npix_allocated, Npix_to_model);
exit(-1);
}
else if (db_cu_flags.Npix_to_allocate==-1){
db_cu_flags.Npix_to_allocate = Npix_to_model;
}
else if (Npix_to_model > db_cu_flags.Npix_to_allocate){
printf("Npix to model=%d is greater than the number of pixel requested for allocation (%d)!\n",
Npix_to_model, db_cu_flags.Npix_to_allocate);
exit(-1);
}
// support dynamic allocation for different numbers of sources
if ( cp.previous_nsource != 0 && cp.previous_nsource != db_beam.number_of_sources){
gpuErr(cudaFree(cp.cu_source_X));
gpuErr(cudaFree(cp.cu_source_Y));
gpuErr(cudaFree(cp.cu_source_Z));
gpuErr(cudaFree(cp.cu_source_I));
gpuErr(cudaFree(cp.cu_source_lambda));
printf("Reallocating for %d sources!:\n", db_beam.number_of_sources);
gpuErr(cudaMallocManaged(&cp.cu_source_X, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_source_Y, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_source_Z, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_source_I, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_source_lambda, db_beam.number_of_sources*sizeof(CUDAREAL)));
cp.previous_nsource = db_beam.number_of_sources;
}
if(cp.device_is_allocated){
if (db_flags.verbose){
printf("Will model %d pixels (GPU has %d pre-allocated pix)\n", Npix_to_model, cp.npix_allocated);
}
}
else{
if (db_flags.verbose){
printf("Will model %d pixels and allocate %d pix\n", Npix_to_model, db_cu_flags.Npix_to_allocate);
}
gpuErr(cudaMallocManaged(&cp.cu_source_X, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_source_Y, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_source_Z, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_source_I, db_beam.number_of_sources*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_source_lambda, db_beam.number_of_sources*sizeof(CUDAREAL)));
cp.previous_nsource = db_beam.number_of_sources;
gpuErr(cudaMallocManaged((void **)&cp.cu_UMATS, db_cryst.UMATS.size()*sizeof(MAT3)));
gpuErr(cudaMallocManaged((void **)&cp.cu_UMATS_RXYZ, db_cryst.UMATS_RXYZ.size()*sizeof(MAT3)));
gpuErr(cudaMallocManaged((void **)&cp.cu_AMATS, db_cryst.UMATS_RXYZ.size()*sizeof(MAT3)));
if (db_cryst.UMATS_RXYZ_prime.size()>0)
gpuErr(cudaMallocManaged((void **)&cp.cu_UMATS_RXYZ_prime, db_cryst.UMATS_RXYZ_prime.size()*sizeof(MAT3)));
if (db_cryst.UMATS_RXYZ_dbl_prime.size()>0)
gpuErr(cudaMallocManaged((void **)&cp.cu_UMATS_RXYZ_dbl_prime, db_cryst.UMATS_RXYZ_dbl_prime.size()*sizeof(MAT3)));
gpuErr(cudaMallocManaged((void **)&cp.cu_dB_Mats, db_cryst.dB_Mats.size()*sizeof(MAT3)));
gpuErr(cudaMallocManaged((void **)&cp.cu_dB2_Mats, db_cryst.dB2_Mats.size()*sizeof(MAT3)));
gpuErr(cudaMallocManaged((void **)&cp.cu_RotMats, db_cryst.RotMats.size()*sizeof(MAT3)));
gpuErr(cudaMallocManaged((void **)&cp.cu_dRotMats, db_cryst.dRotMats.size()*sizeof(MAT3)));
gpuErr(cudaMallocManaged((void **)&cp.cu_d2RotMats, db_cryst.d2RotMats.size()*sizeof(MAT3)));
gpuErr(cudaMallocManaged(&cp.cu_fdet_vectors, db_det.fdet_vectors.size()*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_sdet_vectors, db_det.fdet_vectors.size()*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_odet_vectors, db_det.fdet_vectors.size()*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_pix0_vectors, db_det.fdet_vectors.size()*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_close_distances, db_det.close_distances.size()*sizeof(CUDAREAL)));
if (db_cryst.fpfdp.size() > 0){
gpuErr(cudaMallocManaged(&cp.cu_fpfdp, db_cryst.fpfdp.size()*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_atom_data, db_cryst.atom_data.size()*sizeof(CUDAREAL)));
}
if(db_cryst.fpfdp_derivs.size() > 0)
gpuErr(cudaMallocManaged(&cp.cu_fpfdp_derivs, db_cryst.fpfdp_derivs.size()*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_refine_Bmat, 6*sizeof(bool)));
gpuErr(cudaMallocManaged(&cp.cu_refine_Umat, 3*sizeof(bool)));
gpuErr(cudaMallocManaged(&cp.cu_refine_Ncells, 3*sizeof(bool)));
gpuErr(cudaMallocManaged(&cp.cu_refine_panel_origin, 3*sizeof(bool)));
gpuErr(cudaMallocManaged(&cp.cu_refine_panel_rot, 3*sizeof(bool)));
gpuErr(cudaMallocManaged(&cp.cu_refine_lambda, 2*sizeof(bool)));
gpuErr(cudaMallocManaged(&cp.cu_Fhkl, db_cryst.FhklLinear.size()*sizeof(CUDAREAL)));
if (db_flags.complex_miller)
gpuErr(cudaMallocManaged(&cp.cu_Fhkl2, db_cryst.FhklLinear.size()*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged((void **)&cp.cu_dF_vecs, db_det.dF_vecs.size()*sizeof(VEC3)));
gpuErr(cudaMallocManaged((void **)&cp.cu_dS_vecs, db_det.dF_vecs.size()*sizeof(VEC3)));
//gettimeofday(&t3, 0));
gpuErr(cudaMallocManaged(&cp.cu_floatimage, db_cu_flags.Npix_to_allocate*sizeof(CUDAREAL) ));
if (db_flags.wavelength_img){
gpuErr(cudaMallocManaged(&cp.cu_wavelenimage, db_cu_flags.Npix_to_allocate*sizeof(CUDAREAL) ));
}
if (db_flags.refine_diffuse){
gpuErr(cudaMallocManaged(&cp.cu_d_diffuse_gamma_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_d_diffuse_sigma_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL)));
}
if (db_flags.refine_fcell){
gpuErr(cudaMallocManaged(&cp.cu_d_fcell_images, db_cu_flags.Npix_to_allocate*1*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_d2_fcell_images, db_cu_flags.Npix_to_allocate*1*sizeof(CUDAREAL)));
}
if (db_flags.refine_eta){
gpuErr(cudaMallocManaged(&cp.cu_d_eta_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_d2_eta_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL)));
}
if (std::count(db_flags.refine_Umat.begin(), db_flags.refine_Umat.end(), true) > 0){
gpuErr(cudaMallocManaged(&cp.cu_d_Umat_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL) ));
gpuErr(cudaMallocManaged(&cp.cu_d2_Umat_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL) ));
}
if (std::count(db_flags.refine_Ncells.begin(), db_flags.refine_Ncells.end(), true) > 0 || db_flags.refine_Ncells_def){
gpuErr(cudaMallocManaged(&cp.cu_d_Ncells_images, db_cu_flags.Npix_to_allocate*6*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_d2_Ncells_images, db_cu_flags.Npix_to_allocate*6*sizeof(CUDAREAL)));
}
if (std::count(db_flags.refine_panel_rot.begin(), db_flags.refine_panel_rot.end(), true) > 0)
gpuErr(cudaMallocManaged(&cp.cu_d_panel_rot_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL)));
if (std::count(db_flags.refine_panel_origin.begin(), db_flags.refine_panel_origin.end(), true) > 0)
gpuErr(cudaMallocManaged(&cp.cu_d_panel_orig_images, db_cu_flags.Npix_to_allocate*3*sizeof(CUDAREAL)));
if (std::count(db_flags.refine_lambda.begin(), db_flags.refine_lambda.end(), true) > 0)
gpuErr(cudaMallocManaged(&cp.cu_d_lambda_images, db_cu_flags.Npix_to_allocate*2*sizeof(CUDAREAL)));
if (std::count(db_flags.refine_Bmat.begin(), db_flags.refine_Bmat.end(), true) > 0){
gpuErr(cudaMallocManaged(&cp.cu_d_Bmat_images, db_cu_flags.Npix_to_allocate*6*sizeof(CUDAREAL)));
gpuErr(cudaMallocManaged(&cp.cu_d2_Bmat_images, db_cu_flags.Npix_to_allocate*6*sizeof(CUDAREAL)));
}
if (db_flags.refine_fp_fdp)
gpuErr(cudaMallocManaged(&cp.cu_d_fp_fdp_images, db_cu_flags.Npix_to_allocate*2*sizeof(CUDAREAL)));
if(db_cryst.nominal_hkl.size() >0)
gpuErr(cudaMallocManaged(&cp.cu_nominal_hkl, db_cu_flags.Npix_to_allocate*3*sizeof(int)));
//gettimeofday(&t4, 0);
//time = (1000000.0*(t4.tv_sec-t3.tv_sec) + t4.tv_usec-t3.tv_usec)/1000.0;
//printf("TIME SPENT ALLOCATING (IMAGES ONLY): %3.10f ms \n", time);
gpuErr(cudaMallocManaged(&cp.cu_panels_fasts_slows, db_cu_flags.Npix_to_allocate*3*sizeof(panels_fasts_slows[0])));
cp.npix_allocated = db_cu_flags.Npix_to_allocate;
} // END of allocation
bool ALLOC = !cp.device_is_allocated; // shortcut variable
gettimeofday(&t2, 0);
time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
if (TIMERS.recording) TIMERS.cuda_alloc+= time;
if(db_flags.verbose>1)
printf("TIME SPENT ALLOCATING (TOTAL): %3.10f ms \n", time);
//ALLOC = false;
// BEGIN COPYING DATA
gettimeofday(&t1, 0);
bool FORCE_COPY=true;
// END step position
// BEGIN sources
if (db_cu_flags.update_sources || ALLOC || FORCE_COPY){
for (int i=0; i< db_beam.number_of_sources; i++){
VEC3 incident(db_beam.source_X[i], db_beam.source_Y[i], db_beam.source_Z[i]);
incident /= incident.norm();
cp.cu_source_X[i] = incident[0];
cp.cu_source_Y[i] = incident[1];
cp.cu_source_Z[i] = incident[2];
cp.cu_source_I[i] = db_beam.source_I[i];
cp.cu_source_lambda[i] = db_beam.source_lambda[i];
}
if(db_flags.verbose>1 )
printf("H2D sources\n");
}
// END sources
// UMATS
if (db_cu_flags.update_umats || ALLOC||FORCE_COPY){
for (int i=0; i< db_cryst.UMATS.size(); i++)
cp.cu_UMATS[i] = db_cryst.UMATS[i];
for (int i=0; i < db_cryst.UMATS_RXYZ.size(); i++)
cp.cu_UMATS_RXYZ[i] = db_cryst.UMATS_RXYZ[i];
for (int i=0; i < db_cryst.UMATS_RXYZ_prime.size(); i++)
cp.cu_UMATS_RXYZ_prime[i] = db_cryst.UMATS_RXYZ_prime[i];
for (int i=0; i < db_cryst.UMATS_RXYZ_dbl_prime.size(); i++)
cp.cu_UMATS_RXYZ_dbl_prime[i] = db_cryst.UMATS_RXYZ_dbl_prime[i];
if(db_flags.verbose>1)
printf("H2D Done copying Umats\n") ;
}
// END UMATS
if (db_cu_flags.update_umats || ALLOC||FORCE_COPY){
MAT3 Amat_init = db_cryst.eig_U*db_cryst.eig_B*1e10*(db_cryst.eig_O.transpose());
for(int i_mos =0; i_mos< db_cryst.UMATS_RXYZ.size(); i_mos++){
cp.cu_AMATS[i_mos] = (db_cryst.UMATS_RXYZ[i_mos]*Amat_init).transpose();
}
if(db_flags.verbose>1)
printf("H2D Done copying Amats\n") ;
}
// BMATS
if(db_cu_flags.update_dB_mats || ALLOC || FORCE_COPY){
for (int i=0; i< db_cryst.dB_Mats.size(); i++)
cp.cu_dB_Mats[i] = db_cryst.dB_Mats[i];
for (int i=0; i< db_cryst.dB2_Mats.size(); i++)
cp.cu_dB2_Mats[i] = db_cryst.dB2_Mats[i];
if(db_flags.verbose>1)
printf("H2D Done copying dB_Mats\n") ;
}
// END BMATS
// ROT MATS
if(db_cu_flags.update_rotmats || ALLOC || FORCE_COPY){
for (int i=0; i<db_cryst.RotMats.size(); i++)
cp.cu_RotMats[i] = db_cryst.RotMats[i];
for (int i=0; i<db_cryst.dRotMats.size(); i++)
cp.cu_dRotMats[i] = db_cryst.dRotMats[i];
for (int i=0; i<db_cryst.d2RotMats.size(); i++)
cp.cu_d2RotMats[i] = db_cryst.d2RotMats[i];
if (db_flags.verbose>1)
printf("H2D Done copying rotmats\n");
}
// END ROT MATS
// DETECTOR VECTORS
if (db_cu_flags.update_detector || ALLOC || FORCE_COPY){
for (int i=0; i<db_det.fdet_vectors.size(); i++){
cp.cu_fdet_vectors[i] = db_det.fdet_vectors[i];
cp.cu_sdet_vectors[i] = db_det.sdet_vectors[i];
cp.cu_odet_vectors[i] = db_det.odet_vectors[i];
cp.cu_pix0_vectors[i] = db_det.pix0_vectors[i];
}
for(int i=0; i < db_det.close_distances.size();i++){
cp.cu_close_distances[i] = db_det.close_distances[i];
}
if (db_flags.verbose>1)
printf("H2D Done copying detector vectors\n");
}
// END DETECTOR VECTORS
if ( ALLOC || FORCE_COPY){
for(int i=0; i< db_cryst.nominal_hkl.size(); i++){
cp.cu_nominal_hkl[i] = db_cryst.nominal_hkl[i];
}
for (int i=0; i< db_cryst.atom_data.size(); i++){
cp.cu_atom_data[i] = db_cryst.atom_data[i];
}
if (db_flags.verbose>1)
printf("H2D Done copying atom data\n");
for(int i=0; i< db_cryst.fpfdp.size(); i++){
cp.cu_fpfdp[i] = db_cryst.fpfdp[i];
}
for(int i=0; i< db_cryst.fpfdp_derivs.size(); i++){
cp.cu_fpfdp_derivs[i] = db_cryst.fpfdp_derivs[i];
}
if (db_flags.verbose>1)
printf("H2D Done copying fprime and fdblprime\n");
}
// BEGIN REFINEMENT FLAGS
if (db_cu_flags.update_refine_flags || ALLOC || FORCE_COPY){
for (int i=0; i<3; i++){
cp.cu_refine_Umat[i] = db_flags.refine_Umat[i];
cp.cu_refine_Ncells[i] = db_flags.refine_Ncells[i];
cp.cu_refine_panel_origin[i] = db_flags.refine_panel_origin[i];
cp.cu_refine_panel_rot[i] = db_flags.refine_panel_rot[i];
}
for(int i=0; i<2; i++)
cp.cu_refine_lambda[i] = db_flags.refine_lambda[i];
for(int i=0; i<6; i++)
cp.cu_refine_Bmat[i] = db_flags.refine_Bmat[i];
if (db_flags.verbose>1)
printf("H2D Done copying refinement flags\n");
}
// END REFINEMENT FLAGS
// BEGIN Fhkl
if (db_cu_flags.update_Fhkl || ALLOC || FORCE_COPY){
for(int i=0; i < db_cryst.FhklLinear.size(); i++){
cp.cu_Fhkl[i] = db_cryst.FhklLinear[i];
if (db_flags.complex_miller)
cp.cu_Fhkl2[i] = db_cryst.Fhkl2Linear[i];
}
if (db_flags.verbose>1)
printf("H2D Done copying step Fhkl\n");
}
// END Fhkl
// BEGIN panel derivative vecs
if(db_cu_flags.update_panel_deriv_vecs || ALLOC || FORCE_COPY){
for (int i=0; i<db_det.dF_vecs.size(); i++){
cp.cu_dF_vecs[i] = db_det.dF_vecs[i];
cp.cu_dS_vecs[i] = db_det.dS_vecs[i];
}
if (db_flags.verbose>1)
printf("H2D Done copying step panel derivative vectors\n");
}
// END panel derivative vecs
// BEGIN panels fasts slows
if (db_cu_flags.update_panels_fasts_slows || ALLOC || FORCE_COPY){
for (int i=0; i< panels_fasts_slows.size(); i++)
cp.cu_panels_fasts_slows[i] = panels_fasts_slows[i];
if (db_flags.verbose>1)
printf("H2D Done copying panels_fasts_slows\n");
}
// END panels fasts slows
gettimeofday(&t2, 0);
time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
if (TIMERS.recording) TIMERS.cuda_copy_to_dev += time;
if(db_flags.verbose>1)
printf("TIME SPENT COPYING DATA HOST->DEV: %3.10f ms \n", time);
cp.device_is_allocated = true;
error_msg(cudaGetLastError(), "after copy to device");
gettimeofday(&t1, 0);
int Npanels = db_det.fdet_vectors.size()/3;
int num_atoms = db_cryst.atom_data.size()/5;
if (db_cryst.fpfdp.size() == 0){ // note cannot use atom data if fpfdp is 0, make this cleaner
num_atoms=0;
}
//int sm_size = number_of_sources*5*sizeof(CUDAREAL);
//gpu_sum_over_steps<<<numblocks, blocksize, sm_size >>>(
bool aniso_eta = db_cryst.UMATS_RXYZ.size() != db_cryst.UMATS_RXYZ_prime.size();
bool use_nominal_hkl = !db_cryst.nominal_hkl.empty();
gpu_sum_over_steps<<<numblocks, blocksize>>>(
Npix_to_model, cp.cu_panels_fasts_slows,
cp.cu_floatimage,
cp.cu_wavelenimage,
cp.cu_d_Umat_images, cp.cu_d2_Umat_images,
cp.cu_d_Bmat_images, cp.cu_d2_Bmat_images,
cp.cu_d_Ncells_images, cp.cu_d2_Ncells_images,
cp.cu_d_fcell_images, cp.cu_d2_fcell_images,
cp.cu_d_eta_images, cp.cu_d2_eta_images,
cp.cu_d_lambda_images, cp.cu_d2_lambda_images,
cp.cu_d_panel_rot_images, cp.cu_d2_panel_rot_images,
cp.cu_d_panel_orig_images, cp.cu_d2_panel_orig_images,
cp.cu_d_fp_fdp_images,
db_steps.Nsteps, db_flags.printout_fpixel, db_flags.printout_spixel, db_flags.printout, db_cryst.default_F,
db_det.oversample, db_flags.oversample_omega, db_det.subpixel_size, db_det.pixel_size,
db_det.detector_thickstep, db_det.detector_thick, cp.cu_close_distances, db_det.detector_attnlen,
db_det.detector_thicksteps, db_beam.number_of_sources, db_cryst.phisteps, db_cryst.UMATS.size(),
db_flags.use_lambda_coefficients, db_beam.lambda0, db_beam.lambda1,
db_cryst.eig_U, db_cryst.eig_O, db_cryst.eig_B, db_cryst.RXYZ,
cp.cu_dF_vecs,
cp.cu_dS_vecs,
cp.cu_UMATS_RXYZ,
cp.cu_UMATS_RXYZ_prime,
cp.cu_UMATS_RXYZ_dbl_prime,
cp.cu_RotMats,
cp.cu_dRotMats,
cp.cu_d2RotMats,
cp.cu_UMATS,
cp.cu_dB_Mats,
cp.cu_dB2_Mats,
cp.cu_AMATS,
cp.cu_source_X, cp.cu_source_Y, cp.cu_source_Z, cp.cu_source_lambda, cp.cu_source_I,
db_beam.kahn_factor,
db_cryst.Na, db_cryst.Nb, db_cryst.Nc,
db_cryst.Nd, db_cryst.Ne, db_cryst.Nf,
db_cryst.phi0, db_cryst.phistep,
db_cryst.spindle_vec, db_beam.polarization_axis,
db_cryst.h_range, db_cryst.k_range, db_cryst.l_range,
db_cryst.h_max, db_cryst.h_min, db_cryst.k_max, db_cryst.k_min, db_cryst.l_max, db_cryst.l_min, db_cryst.dmin,
db_cryst.fudge, db_flags.complex_miller, db_flags.verbose, db_flags.only_save_omega_kahn,
db_flags.isotropic_ncells, db_flags.compute_curvatures,
cp.cu_Fhkl, cp.cu_Fhkl2,
cp.cu_refine_Bmat, cp.cu_refine_Ncells, db_flags.refine_Ncells_def, cp.cu_refine_panel_origin, cp.cu_refine_panel_rot,
db_flags.refine_fcell, cp.cu_refine_lambda, db_flags.refine_eta, cp.cu_refine_Umat,
cp.cu_fdet_vectors, cp.cu_sdet_vectors,
cp.cu_odet_vectors, cp.cu_pix0_vectors,
db_flags.nopolar, db_flags.point_pixel, db_beam.fluence, db_cryst.r_e_sqr, db_cryst.spot_scale, Npanels, aniso_eta, db_flags.no_Nabc_scale,
cp.cu_fpfdp, cp.cu_fpfdp_derivs, cp.cu_atom_data, num_atoms,
db_flags.refine_fp_fdp, cp.cu_nominal_hkl, use_nominal_hkl, db_cryst.anisoU, db_cryst.anisoG, db_flags.use_diffuse,
cp.cu_d_diffuse_gamma_images, cp.cu_d_diffuse_sigma_images,
db_flags.refine_diffuse, db_flags.gamma_miller_units, db_flags.refine_Icell,
db_flags.wavelength_img, db_cryst.laue_group_num, db_cryst.stencil_size);
error_msg(cudaGetLastError(), "after kernel call");
cudaDeviceSynchronize();
error_msg(cudaGetLastError(), "after kernel completion");
if(db_flags.verbose>1)
printf("KERNEL_COMPLETE gpu_sum_over_steps\n");
gettimeofday(&t2, 0);
time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
if (TIMERS.recording) TIMERS.cuda_kernel += time;
if(db_flags.verbose>1)
printf("TIME SPENT(KERNEL): %3.10f ms \n", time);
gettimeofday(&t1, 0);
// COPY BACK FROM DEVICE
for (int i=0; i< Npix_to_model; i++){
floatimage[i] = cp.cu_floatimage[i];
}
if(db_flags.wavelength_img){
for (int i=0; i< Npix_to_model; i++){
d_image.wavelength[i] = cp.cu_wavelenimage[i];
}
}
if (db_flags.refine_fcell){
for (int i=0; i<Npix_to_model; i++){
d_image.fcell[i] = cp.cu_d_fcell_images[i];
d2_image.fcell[i] = cp.cu_d2_fcell_images[i];
}
}
if (std::count(db_flags.refine_Umat.begin(), db_flags.refine_Umat.end(), true) > 0){
for (int i=0; i<3*Npix_to_model; i++){
d_image.Umat[i] = cp.cu_d_Umat_images[i];
d2_image.Umat[i] = cp.cu_d2_Umat_images[i];
}
}
if (std::count(db_flags.refine_panel_rot.begin(), db_flags.refine_panel_rot.end(), true) > 0){
for (int i=0; i<3*Npix_to_model; i++)
d_image.panel_rot[i] = cp.cu_d_panel_rot_images[i];
}
if (std::count(db_flags.refine_panel_origin.begin(), db_flags.refine_panel_origin.end(), true) > 0){
for (int i=0; i<3*Npix_to_model; i++)
d_image.panel_orig[i] = cp.cu_d_panel_orig_images[i];
}
if (db_flags.refine_eta){
for (int i=0; i<3*Npix_to_model; i++){
d_image.eta[i] = cp.cu_d_eta_images[i];
d2_image.eta[i] = cp.cu_d2_eta_images[i];
}
}
if (std::count(db_flags.refine_Ncells.begin(), db_flags.refine_Ncells.end(), true) > 0 || db_flags.refine_Ncells_def){
for(int i=0; i<6*Npix_to_model; i++){
d_image.Ncells[i] = cp.cu_d_Ncells_images[i];
d2_image.Ncells[i] = cp.cu_d2_Ncells_images[i];
}
}
if (db_flags.refine_diffuse){
for(int i=0; i<3*Npix_to_model; i++){
d_image.diffuse_gamma[i] = cp.cu_d_diffuse_gamma_images[i];
d_image.diffuse_sigma[i] = cp.cu_d_diffuse_sigma_images[i];
}
}
if (std::count(db_flags.refine_Bmat.begin(), db_flags.refine_Bmat.end(), true) > 0){
for(int i=0; i<6*Npix_to_model; i++){
d_image.Bmat[i] = cp.cu_d_Bmat_images[i];
d2_image.Bmat[i] = cp.cu_d2_Bmat_images[i];
}
}
if (std::count(db_flags.refine_lambda.begin(), db_flags.refine_lambda.end(), true) > 0){
for(int i=0; i<2*Npix_to_model; i++)
d_image.lambda[i] = cp.cu_d_lambda_images[i];
}
if (db_flags.refine_fp_fdp){
for (int i=0; i< 2*Npix_to_model; i++)
d_image.fp_fdp[i] = cp.cu_d_fp_fdp_images[i];
}
gettimeofday(&t2, 0);
time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
if (TIMERS.recording) TIMERS.cuda_copy_from_dev += time;
if(db_flags.verbose>1)
printf("TIME SPENT COPYING BACK : %3.10f ms \n", time);
error_msg(cudaGetLastError(), "After copy to host");
}
void freedom(diffBragg_cudaPointers& cp){
if (cp.device_is_allocated){
gpuErr(cudaFree( cp.cu_floatimage));
gpuErr(cudaFree( cp.cu_wavelenimage));
gpuErr(cudaFree( cp.cu_d_Umat_images));
gpuErr(cudaFree( cp.cu_d_Bmat_images));
gpuErr(cudaFree( cp.cu_d_Ncells_images));
gpuErr(cudaFree( cp.cu_d_diffuse_gamma_images));
gpuErr(cudaFree( cp.cu_d_diffuse_sigma_images));
gpuErr(cudaFree( cp.cu_d2_Umat_images));
gpuErr(cudaFree( cp.cu_d2_Bmat_images));
gpuErr(cudaFree( cp.cu_d2_Ncells_images));
gpuErr(cudaFree( cp.cu_d_eta_images));
gpuErr(cudaFree( cp.cu_d2_eta_images));
gpuErr(cudaFree( cp.cu_d_fcell_images));
gpuErr(cudaFree( cp.cu_d2_fcell_images));
gpuErr(cudaFree( cp.cu_d_lambda_images));
gpuErr(cudaFree( cp.cu_d_panel_rot_images));
gpuErr(cudaFree( cp.cu_d_panel_orig_images));
gpuErr(cudaFree( cp.cu_d_sausage_XYZ_scale_images));
gpuErr(cudaFree( cp.cu_d_fp_fdp_images));
gpuErr(cudaFree(cp.cu_Fhkl));
gpuErr(cudaFree(cp.cu_Fhkl2));
gpuErr(cudaFree(cp.cu_fdet_vectors));
gpuErr(cudaFree(cp.cu_sdet_vectors));
gpuErr(cudaFree(cp.cu_odet_vectors));
gpuErr(cudaFree(cp.cu_pix0_vectors));
gpuErr(cudaFree(cp.cu_close_distances));
gpuErr(cudaFree(cp.cu_nominal_hkl));
gpuErr(cudaFree(cp.cu_atom_data));
gpuErr(cudaFree(cp.cu_fpfdp));
gpuErr(cudaFree(cp.cu_fpfdp_derivs));
gpuErr(cudaFree(cp.cu_source_X));
gpuErr(cudaFree(cp.cu_source_Y));
gpuErr(cudaFree(cp.cu_source_Z));
gpuErr(cudaFree(cp.cu_source_I));
gpuErr(cudaFree(cp.cu_source_lambda));
gpuErr(cudaFree(cp.cu_UMATS));
gpuErr(cudaFree(cp.cu_UMATS_RXYZ));
gpuErr(cudaFree(cp.cu_AMATS));
gpuErr(cudaFree(cp.cu_UMATS_RXYZ_prime));
gpuErr(cudaFree(cp.cu_UMATS_RXYZ_dbl_prime));
gpuErr(cudaFree(cp.cu_RotMats));
gpuErr(cudaFree(cp.cu_dRotMats));
gpuErr(cudaFree(cp.cu_d2RotMats));
gpuErr(cudaFree(cp.cu_dB_Mats));
gpuErr(cudaFree(cp.cu_dB2_Mats));
//gpuErr(cudaFree(cp.cu_sausages_RXYZ));
//gpuErr(cudaFree(cp.cu_d_sausages_RXYZ));
//gpuErr(cudaFree(cp.cu_sausages_U));
//gpuErr(cudaFree(cp.cu_sausages_scale));
gpuErr(cudaFree(cp.cu_dF_vecs));
gpuErr(cudaFree(cp.cu_dS_vecs));
gpuErr(cudaFree(cp.cu_refine_Bmat));
gpuErr(cudaFree(cp.cu_refine_Umat));
gpuErr(cudaFree(cp.cu_refine_Ncells));
gpuErr(cudaFree(cp.cu_refine_lambda));
gpuErr(cudaFree(cp.cu_refine_panel_origin));
gpuErr(cudaFree(cp.cu_refine_panel_rot));
gpuErr(cudaFree(cp.cu_panels_fasts_slows));
cp.device_is_allocated = false;
cp.npix_allocated = 0;
}
}
|
8065622b3a3fbb7683661032954150aef93427a2.hip | // !!! This is a file automatically generated by hipify!!!
// This is modification of Alex's convolution kernel extending 2d to 3d.
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define LO8(x) ((x) & 0x000000FF)
#define MI16(x) (((x) & 0x0000FFFF) >> 8)
#define HI24(x) (((x) & 0x00FFFFFF) >> 16)
#define MUL24(x,y) ((x) * (y))
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#define MIN(a, b) ((a) < (b) ? (a) : (b))
__global__ void kSampleMultinomial(int* output, float* distribution, float* random, int k, int n){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < n){
distribution += k * id;
random += id;
output += k * id;
float preSum = 0, nowSum = 0;
for(int i = 0; i < k; i++){
nowSum += distribution[i];
output[i] = random[0] >= preSum && random[0] < nowSum;
preSum = nowSum;
}
}
}
__global__ void kExp(float* output, float* input, unsigned int numElements){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for(int i = id; i < numElements; i += numThreads)
output[i] = __expf(input[i]);
}
__global__ void kDivide(float* output, float* leftInput, float* rightInput, unsigned int numElements){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for(int i = id; i < numElements; i += numThreads)
output[i] = __fdividef(leftInput[i], rightInput[i]);
}
__global__ void kConvolve_forward(float* targets, float* images, float* filters,
const int numImages, const int numFilters,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesZ, const int numModulesY, const int numModulesX, const int imgStride) {
__shared__ float shFilters[4*1][4 * 4]; // pre-load 4 pixels from 4*4 filters
__shared__ float shImages[4*1][32 * 1]; // pre-load 4 pixels from 32*2 images
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize * filterSize;
const int blocksPerModule = numFilters / (4*4);
const int moduleIdx = blockIdx.x / blocksPerModule;
const int blockFilterIdx = blockIdx.x % blocksPerModule;
const int tidx = threadIdx.x * 32 + threadIdx.y;
const int imgLoadModPosZ = (moduleIdx / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = ((moduleIdx / numModulesX) % numModulesY )* moduleStride;
const int imgLoadModPosX = (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (4 * 4);
const int shFilterLoadX = tidx % (4 * 4);
const int myImgIdx = blockIdx.y * 32 * 1 + threadIdx.y;
images += myImgIdx;
filters += 4 * 4 * blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
targets += moduleIdx * numImages
+ (blockFilterIdx * 4 * 4 + threadIdx.x) * numImages * numModulesZ * numModulesY * numModulesX
+ myImgIdx;
float prod[4][1];
#pragma unroll
for(int f = 0; f < 4; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] = 0;
}
}
for (int p = 0; p < filterPixels; p += 4) {
/*
* Load 4 pixels from 4*4 filters
*/
if (shFilterLoadY < 4) {
#pragma unroll
for (int p2 = 0; p2 < 4; p2 += 32/4) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = 0;
}
}
}
}
/*
* Load 4 pixels from 32*1 images
*/
const int pixIdx = p + threadIdx.x;
if (pixIdx < filterPixels) {
const int x = paddingStart + imgLoadModPosX + pixIdx % filterSize;
const int y = paddingStart + imgLoadModPosY + (pixIdx / filterSize) % filterSize;
const int z = paddingStart + imgLoadModPosZ + pixIdx / (filterSize * filterSize);
if (z >= 0 && z < imgSizeZ && y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || myImgIdx + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = images[imgStride * (c * imgPixels + z * imgSizeX * imgSizeY + y * imgSizeX + x) + i * 32];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 4*1; i++) {
#pragma unroll
for(int f = 0; f < 4; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] += shImages[i][g * 32 + threadIdx.y] * shFilters[i][threadIdx.x + f * 4];
}
}
}
__syncthreads();
}
#pragma unroll
for (int g = 0; g < 1; g++) {
if (!true || myImgIdx + g * 32 < numImages) {
#pragma unroll
for (int f = 0; f < 4; f++) {
targets[g * 32 + f * 4 * numImages * numModulesZ * numModulesY * numModulesX] = prod[f][g];
}
}
}
}
__global__ void kConvolve_weight(float* targets, float* images, float* hidActs,
const int numImages, const int numFilters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int partialSum, const float scaleOutputs) {
__shared__ float shImages[5 * 8 * 1][32]; // preload 32 cases of 8 * 5 pixels
__shared__ float shHidActs[16][32 + 1]; // preload 32 cases of 16 hidActs
const int tidx = 16 * threadIdx.y + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterBlocksPerModule = numFilters / 16;
const int outputModuleIdx = blockIdx.x / filterBlocksPerModule;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = 16 * (blockIdx.x % filterBlocksPerModule);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesZ * numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * 8 * 5;
images += loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += (outputModuleIdx * numFilters) * filterPixels * 1
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shImgLoad = &shImages[loadY][loadX];
float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[1][5];
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int p = 0; p < 5; p++) {
prod[c][p] = 0;
}
}
__shared__ int pxDivs[8*5];
if (tidx < 8 * 5) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / (filterSize * filterSize)) << 16) + (((blockPixelOffset + tidx) / filterSize) % filterSize << 8) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosZ = paddingStart + (m / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((m / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += 32) {
if (loadY < 8 * 5) {
/*
* As long as 8 * 16 is divisible by 32 this will loop the right
* number of times.
*
* This will load some imgGrads from filter pixels that don't exit (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < 8 * 5; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((8 * 5) % (16 * 8 / 32) == 0 || y + loadY < 8 * 5) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!true || caseIdx + loadX < numImages)) {
const int pxZ = imgLoadModPosZ + HI24(pxDivs[pxIdx]);
const int pxY = imgLoadModPosY + MI16(pxDivs[pxIdx]); // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO8(pxDivs[pxIdx]);
if (pxZ >= 0 && pxZ < imgSizeZ && pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX) * imgStride;
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = 0;
}
}
}
}
}
if (loadY < 16 && (!true || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < 16; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (16 % (16 * 8 / 32) == 0 || y + loadY < 16) {
shHidActLoad[y * (32 + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int p = 0; p < 5; p++) {
#pragma unroll
for (int i = 0; i < 32; i++) {
#pragma unroll
for (int c = 0; c < 1; c++) {
prod[c][p] += shImages[threadIdx.y + p * 8 + c * 5 * 8][i] * shHidActs[threadIdx.x][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
#pragma unroll
for (int p = 0; p < 5; p++) {
if (blockPixelOffset + p * 8 + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < 1; c++) {
targets[p * 8 * numFilters + c * filterPixels * numFilters] = scaleOutputs * prod[c][p];
}
}
}
}
__global__ void kConvolve_backward(float* targets, const float* hidActs, const float* filters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int numImages, const int numFilters, const int filterSize,
const int imgSizeZ, const int imgSizeY, const int imgSizeX,
const int paddingStart, const int moduleStride) {
__shared__ float shFilters[1*16][16 + 1]; // load 16 filter one time. See below.
__shared__ float shHidActs[16][16*2]; // each block deal with 16 * imgPerThread images.
const int blockCaseIdx = blockIdx.x * 16 * 2;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int numRegionsY = DIVUP(imgSizeY, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = (blockRegionIdx / numRegionsX) % numRegionsY;
const int blockRegionIdxZ = blockRegionIdx / (numRegionsX * numRegionsY);
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int blockRegionFront = blockRegionIdxZ;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxZ = blockRegionFront;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX;
const bool isPxInImg = pxZ < imgSizeZ && pxY < imgSizeY && pxX < imgSizeX;
const int numModules = numModulesZ * numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeX * imgSizeY;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32; // load 32 cases one time.
hidActs += blockCaseIdx + loadY * numImages * numModules + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[1][2];
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int i = 0; i < 2; i++) {
prod[c][i] = 0;
}
}
const int startZ = blockRegionFront - paddingStart < filterSize ? 0
: 1 + (blockRegionFront - paddingStart -filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockRegionFront + 3 - paddingStart) / moduleStride);
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int mz = startZ; mz < endZ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInModuleZ = pxZ - moduleFront;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleZ >= 0 && pxInModuleZ < filterSize && pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleZ * filterSize * filterSize + pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < 2 * 16; i += 32) { // IMAGES
if (!true || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of 2*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * 2 + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of 2*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * 2 + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = true ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * 1 * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < 1; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < 2; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
#pragma unroll
for (int i = 0; i < 2; i++) {
if (!true || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < 1; c++) {
targets[c * imgPixels * numImages + i * 16] = prod[c][i];
}
}
}
}
}
__global__ void kConvolve_forward_c(float* targets, float* images, float* filters,
const int numImages, const int numFilters,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesZ, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups) {
__shared__ float shFilters[4*2][4 * 8]; // pre-load 4 pixels from 4*8 filters
__shared__ float shImages[4*2][32 * 1]; // pre-load 4 pixels from 32*1 images
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (4*8);
const int moduleIdx = blockIdx.x / blocksPerModule;
const int blockFilterIdx = 8 * 4 * (blockIdx.x % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY * numModulesZ;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.x * 32 + threadIdx.y;
const int imgLoadModPosZ = paddingStart + (moduleIdx / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((moduleIdx / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (4 * 8);
const int shFilterLoadX = tidx % (4 * 8);
const int myImgIdx = blockIdx.y * 32 * 1 + threadIdx.y;
images += blockColorIdx * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.x) * numImages * numModules
+ myImgIdx;
float prod[8][1];
#pragma unroll
for(int f = 0; f < 8; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += 2) { // oc stands for outer color (loop)
for (int p = 0; p < filterPixels; p += 4) {
/*
* Load 4 pixels from 4*8 filters
*/
if (shFilterLoadY < 4) {
#pragma unroll
for (int p2 = 0; p2 < 4; p2 += 32/8) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = 0;
}
}
}
}
/*
* Load 4 pixels from 32*1 images
*/
const int pixIdx = p + threadIdx.x;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + (pixIdx / filterSize) % filterSize;
const int z = imgLoadModPosZ + pixIdx / (filterSize * filterSize);
if (z >= 0 && z < imgSizeZ && y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (oc * imgPixels + z * imgSizeX * imgSizeY + y * imgSizeX + x)];
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || myImgIdx + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = m[c * imgStride * imgPixels + i * 32];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 4*2; i++) {
#pragma unroll
for(int f = 0; f < 8; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] += shImages[i][g * 32 + threadIdx.y] * shFilters[i][threadIdx.x + f * 4];
}
}
}
__syncthreads();
}
}
#pragma unroll
for (int g = 0; g < 1; g++) {
if (!true || myImgIdx + g * 32 < numImages) {
#pragma unroll
for (int f = 0; f < 8; f++) {
targets[g * 32 + f * 4 * numImages * numModules] = prod[f][g];
}
}
}
}
__global__ void kConvolve_weight_c(float* targets, float* images, float* hidActs,
const int numImages, const int numFilters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int partialSum,
const float scaleOutputs) {
__shared__ float shImages[8 * 8][32]; // preload 32 cases of 4 * pixelsPerThread pixels
__shared__ float shHidActs[2 * 16][32 + 1]; // preload 32 cases of 32 hidacts
const int tidx = 16 * threadIdx.y + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (16 * 2);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = 2 * 16 * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesZ * numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/8)) * 8;
const int filterColorIdx = (blockIdx.y % (numFilterColors/8)) * 8;
const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += imgColorIdx * imgPixels * imgStride + loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[8][2];
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int f = 0; f < 2; f++) {
prod[c][f] = 0;
}
}
// This avoids doing a division in an inner loop
__shared__ int pxDivs[8];
if (tidx < 8) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / (filterSize * filterSize)) << 16) + (((blockPixelOffset + tidx) / filterSize) % filterSize << 8) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosZ = paddingStart + (m / (numModulesY * numModulesX)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((m / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += 32) {
if (loadY < 8) {
/*
* As long as 4 * 32 is divisible by 32 this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < 8; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (8 % (16 * 8 / 32) == 0 || y + loadY < 8) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!true || caseIdx + loadX < numImages)) {
const int pxZ = imgLoadModPosZ + HI24(pxDivs[pxIdx]);
const int pxY = imgLoadModPosY + MI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO8(pxDivs[pxIdx]);
if (pxZ >= 0 && pxZ < imgSizeZ && pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = 0;
}
}
}
}
}
if (loadY < 16 * 2 && (!true || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < 16 * 2; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((16 * 2) % (16 * 8 / 32) == 0 || y + loadY < 16 * 2) {
shHidActLoad[y * (32 + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int i = 0; i < 32; i++) {
#pragma unroll
for (int f = 0; f < 2; f++) {
prod[c][f] += shImages[threadIdx.y + c * 8][i] * shHidActs[threadIdx.x + f * 16][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
#pragma unroll
for (int f = 0; f < 2; f++) {
#pragma unroll
for (int c = 0; c < 8; c++) {
targets[c * filterPixels * numFilters + f * 16] = scaleOutputs * prod[c][f];
}
}
}
}
__global__ void kConvolve_backward_c(float* targets, const float* hidActs, const float* filters,
const int numModulesZ, const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups) {
__shared__ float shFilters[4*4][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][32*1];
const int numImgBlocks = DIVUP(numImages,32*1);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 32*1;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * 4*4; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = (blockPixelIdx / imgSizeX) % imgSizeY;
const int blockPixelIdxZ = blockPixelIdx / (imgSizeX * imgSizeY);
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 32 + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesZ * numModulesY * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[4][1];
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] = 0;
}
}
const int startZ = blockPixelIdxZ - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxZ - paddingStart - filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockPixelIdxZ - paddingStart) / moduleStride);
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int mz = startZ; mz < endZ ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInFilterZ = blockPixelIdxZ - moduleFront;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterZ * filterSize * filterSize + pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < 1 * 32; i += 32) {
if (!true || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 32*4/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 32*4/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = 0;
}
}
}
const float* fLoad = true ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < 4*4; i+= 32*4/16) {
if ((4*4) % (32*4/16) == 0 || i + filtersLoadY < 4*4) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] += shFilters[c * 4 + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * 32];
}
}
}
__syncthreads();
}
}
}
}
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || blockCaseIdx + threadIdx.x + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 4; c++) {
targets[c * 4 * imgPixels * numImages + i * 32] = prod[c][i];
}
}
}
}
| 8065622b3a3fbb7683661032954150aef93427a2.cu | // This is modification of Alex's convolution kernel extending 2d to 3d.
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define LO8(x) ((x) & 0x000000FF)
#define MI16(x) (((x) & 0x0000FFFF) >> 8)
#define HI24(x) (((x) & 0x00FFFFFF) >> 16)
#define MUL24(x,y) ((x) * (y))
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#define MIN(a, b) ((a) < (b) ? (a) : (b))
__global__ void kSampleMultinomial(int* output, float* distribution, float* random, int k, int n){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < n){
distribution += k * id;
random += id;
output += k * id;
float preSum = 0, nowSum = 0;
for(int i = 0; i < k; i++){
nowSum += distribution[i];
output[i] = random[0] >= preSum && random[0] < nowSum;
preSum = nowSum;
}
}
}
__global__ void kExp(float* output, float* input, unsigned int numElements){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for(int i = id; i < numElements; i += numThreads)
output[i] = __expf(input[i]);
}
__global__ void kDivide(float* output, float* leftInput, float* rightInput, unsigned int numElements){
const unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for(int i = id; i < numElements; i += numThreads)
output[i] = __fdividef(leftInput[i], rightInput[i]);
}
__global__ void kConvolve_forward(float* targets, float* images, float* filters,
const int numImages, const int numFilters,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesZ, const int numModulesY, const int numModulesX, const int imgStride) {
__shared__ float shFilters[4*1][4 * 4]; // pre-load 4 pixels from 4*4 filters
__shared__ float shImages[4*1][32 * 1]; // pre-load 4 pixels from 32*2 images
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize * filterSize;
const int blocksPerModule = numFilters / (4*4);
const int moduleIdx = blockIdx.x / blocksPerModule;
const int blockFilterIdx = blockIdx.x % blocksPerModule;
const int tidx = threadIdx.x * 32 + threadIdx.y;
const int imgLoadModPosZ = (moduleIdx / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = ((moduleIdx / numModulesX) % numModulesY )* moduleStride;
const int imgLoadModPosX = (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (4 * 4);
const int shFilterLoadX = tidx % (4 * 4);
const int myImgIdx = blockIdx.y * 32 * 1 + threadIdx.y;
images += myImgIdx;
filters += 4 * 4 * blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
targets += moduleIdx * numImages
+ (blockFilterIdx * 4 * 4 + threadIdx.x) * numImages * numModulesZ * numModulesY * numModulesX
+ myImgIdx;
float prod[4][1];
#pragma unroll
for(int f = 0; f < 4; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] = 0;
}
}
for (int p = 0; p < filterPixels; p += 4) {
/*
* Load 4 pixels from 4*4 filters
*/
if (shFilterLoadY < 4) {
#pragma unroll
for (int p2 = 0; p2 < 4; p2 += 32/4) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = 0;
}
}
}
}
/*
* Load 4 pixels from 32*1 images
*/
const int pixIdx = p + threadIdx.x;
if (pixIdx < filterPixels) {
const int x = paddingStart + imgLoadModPosX + pixIdx % filterSize;
const int y = paddingStart + imgLoadModPosY + (pixIdx / filterSize) % filterSize;
const int z = paddingStart + imgLoadModPosZ + pixIdx / (filterSize * filterSize);
if (z >= 0 && z < imgSizeZ && y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || myImgIdx + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = images[imgStride * (c * imgPixels + z * imgSizeX * imgSizeY + y * imgSizeX + x) + i * 32];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 4*1; i++) {
#pragma unroll
for(int f = 0; f < 4; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] += shImages[i][g * 32 + threadIdx.y] * shFilters[i][threadIdx.x + f * 4];
}
}
}
__syncthreads();
}
#pragma unroll
for (int g = 0; g < 1; g++) {
if (!true || myImgIdx + g * 32 < numImages) {
#pragma unroll
for (int f = 0; f < 4; f++) {
targets[g * 32 + f * 4 * numImages * numModulesZ * numModulesY * numModulesX] = prod[f][g];
}
}
}
}
__global__ void kConvolve_weight(float* targets, float* images, float* hidActs,
const int numImages, const int numFilters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int partialSum, const float scaleOutputs) {
__shared__ float shImages[5 * 8 * 1][32]; // preload 32 cases of 8 * 5 pixels
__shared__ float shHidActs[16][32 + 1]; // preload 32 cases of 16 hidActs
const int tidx = 16 * threadIdx.y + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterBlocksPerModule = numFilters / 16;
const int outputModuleIdx = blockIdx.x / filterBlocksPerModule;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = 16 * (blockIdx.x % filterBlocksPerModule);
// const int moduleStride = (imgSize - filterSize + 1) / numModulesX;
const int numModules = numModulesZ * numModulesY * numModulesX;
const int blockPixelOffset = blockIdx.y * 8 * 5;
images += loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += (outputModuleIdx * numFilters) * filterPixels * 1
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shImgLoad = &shImages[loadY][loadX];
float* shHidActLoad = &shHidActs[loadY][loadX];
float prod[1][5];
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int p = 0; p < 5; p++) {
prod[c][p] = 0;
}
}
__shared__ int pxDivs[8*5];
if (tidx < 8 * 5) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / (filterSize * filterSize)) << 16) + (((blockPixelOffset + tidx) / filterSize) % filterSize << 8) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosZ = paddingStart + (m / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((m / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += 32) {
if (loadY < 8 * 5) {
/*
* As long as 8 * 16 is divisible by 32 this will loop the right
* number of times.
*
* This will load some imgGrads from filter pixels that don't exit (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < 8 * 5; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((8 * 5) % (16 * 8 / 32) == 0 || y + loadY < 8 * 5) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!true || caseIdx + loadX < numImages)) {
const int pxZ = imgLoadModPosZ + HI24(pxDivs[pxIdx]);
const int pxY = imgLoadModPosY + MI16(pxDivs[pxIdx]); // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO8(pxDivs[pxIdx]);
if (pxZ >= 0 && pxZ < imgSizeZ && pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX) * imgStride;
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < 1; c++) {
shImgLoad[(y + c * 5 * 8) * 32] = 0;
}
}
}
}
}
if (loadY < 16 && (!true || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < 16; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (16 % (16 * 8 / 32) == 0 || y + loadY < 16) {
shHidActLoad[y * (32 + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int p = 0; p < 5; p++) {
#pragma unroll
for (int i = 0; i < 32; i++) {
#pragma unroll
for (int c = 0; c < 1; c++) {
prod[c][p] += shImages[threadIdx.y + p * 8 + c * 5 * 8][i] * shHidActs[threadIdx.x][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
#pragma unroll
for (int p = 0; p < 5; p++) {
if (blockPixelOffset + p * 8 + threadIdx.y < filterPixels) {
#pragma unroll
for (int c = 0; c < 1; c++) {
targets[p * 8 * numFilters + c * filterPixels * numFilters] = scaleOutputs * prod[c][p];
}
}
}
}
__global__ void kConvolve_backward(float* targets, const float* hidActs, const float* filters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int numImages, const int numFilters, const int filterSize,
const int imgSizeZ, const int imgSizeY, const int imgSizeX,
const int paddingStart, const int moduleStride) {
__shared__ float shFilters[1*16][16 + 1]; // load 16 filter one time. See below.
__shared__ float shHidActs[16][16*2]; // each block deal with 16 * imgPerThread images.
const int blockCaseIdx = blockIdx.x * 16 * 2;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int numRegionsY = DIVUP(imgSizeY, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = (blockRegionIdx / numRegionsX) % numRegionsY;
const int blockRegionIdxZ = blockRegionIdx / (numRegionsX * numRegionsY);
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int blockRegionFront = blockRegionIdxZ;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxZ = blockRegionFront;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX;
const bool isPxInImg = pxZ < imgSizeZ && pxY < imgSizeY && pxX < imgSizeX;
const int numModules = numModulesZ * numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeX * imgSizeY;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32; // load 32 cases one time.
hidActs += blockCaseIdx + loadY * numImages * numModules + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[1][2];
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int i = 0; i < 2; i++) {
prod[c][i] = 0;
}
}
const int startZ = blockRegionFront - paddingStart < filterSize ? 0
: 1 + (blockRegionFront - paddingStart -filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockRegionFront + 3 - paddingStart) / moduleStride);
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int mz = startZ; mz < endZ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInModuleZ = pxZ - moduleFront;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleZ >= 0 && pxInModuleZ < filterSize && pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleZ * filterSize * filterSize + pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < 2 * 16; i += 32) { // IMAGES
if (!true || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of 2*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * 2 + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of 2*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * 2 + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = true ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * 1 * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < 1; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < 1; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < 2; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
#pragma unroll
for (int i = 0; i < 2; i++) {
if (!true || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < 1; c++) {
targets[c * imgPixels * numImages + i * 16] = prod[c][i];
}
}
}
}
}
__global__ void kConvolve_forward_c(float* targets, float* images, float* filters,
const int numImages, const int numFilters,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesZ, const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups) {
__shared__ float shFilters[4*2][4 * 8]; // pre-load 4 pixels from 4*8 filters
__shared__ float shImages[4*2][32 * 1]; // pre-load 4 pixels from 32*1 images
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (4*8);
const int moduleIdx = blockIdx.x / blocksPerModule;
const int blockFilterIdx = 8 * 4 * (blockIdx.x % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY * numModulesZ;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.x * 32 + threadIdx.y;
const int imgLoadModPosZ = paddingStart + (moduleIdx / (numModulesX * numModulesY)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((moduleIdx / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (4 * 8);
const int shFilterLoadX = tidx % (4 * 8);
const int myImgIdx = blockIdx.y * 32 * 1 + threadIdx.y;
images += blockColorIdx * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.x) * numImages * numModules
+ myImgIdx;
float prod[8][1];
#pragma unroll
for(int f = 0; f < 8; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += 2) { // oc stands for outer color (loop)
for (int p = 0; p < filterPixels; p += 4) {
/*
* Load 4 pixels from 4*8 filters
*/
if (shFilterLoadY < 4) {
#pragma unroll
for (int p2 = 0; p2 < 4; p2 += 32/8) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shFilters[shFilterLoadY + p2 + c * 4][shFilterLoadX] = 0;
}
}
}
}
/*
* Load 4 pixels from 32*1 images
*/
const int pixIdx = p + threadIdx.x;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + (pixIdx / filterSize) % filterSize;
const int z = imgLoadModPosZ + pixIdx / (filterSize * filterSize);
if (z >= 0 && z < imgSizeZ && y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (oc * imgPixels + z * imgSizeX * imgSizeY + y * imgSizeX + x)];
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || myImgIdx + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = m[c * imgStride * imgPixels + i * 32];
}
} else {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < 1; i++) {
#pragma unroll
for (int c = 0; c < 2; c++) {
shImages[threadIdx.x + c * 4][threadIdx.y + i * 32] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < 4*2; i++) {
#pragma unroll
for(int f = 0; f < 8; f++) {
#pragma unroll
for(int g = 0; g < 1; g++) {
prod[f][g] += shImages[i][g * 32 + threadIdx.y] * shFilters[i][threadIdx.x + f * 4];
}
}
}
__syncthreads();
}
}
#pragma unroll
for (int g = 0; g < 1; g++) {
if (!true || myImgIdx + g * 32 < numImages) {
#pragma unroll
for (int f = 0; f < 8; f++) {
targets[g * 32 + f * 4 * numImages * numModules] = prod[f][g];
}
}
}
}
__global__ void kConvolve_weight_c(float* targets, float* images, float* hidActs,
const int numImages, const int numFilters,
const int numModulesZ, const int numModulesY, const int numModulesX,
const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int filterSize,
const int paddingStart, const int moduleStride, const int imgStride,
const int numImgColors, const int numGroups, const int partialSum,
const float scaleOutputs) {
__shared__ float shImages[8 * 8][32]; // preload 32 cases of 4 * pixelsPerThread pixels
__shared__ float shHidActs[2 * 16][32 + 1]; // preload 32 cases of 32 hidacts
const int tidx = 16 * threadIdx.y + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int numFilterBlocks = numFilters / (16 * 2);
const int outputModuleIdx = blockIdx.x / numFilterBlocks;
const int moduleIdx = partialSum * outputModuleIdx;
const int blockFilterIdx = 2 * 16 * (blockIdx.x % numFilterBlocks);
const int numModules = numModulesZ * numModulesY * numModulesX;
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numFilterColors = numImgColors / numGroups;
const int blockPixelOffset = (blockIdx.y / (numFilterColors/8)) * 8;
const int filterColorIdx = (blockIdx.y % (numFilterColors/8)) * 8;
const int imgColorIdx = filterColorIdx + blockGroupIdx * numFilterColors;
images += imgColorIdx * imgPixels * imgStride + loadX;
hidActs += moduleIdx * numImages
+ blockFilterIdx * numImages * numModules
+ loadY * numImages * numModules
+ loadX;
targets += outputModuleIdx * numFilters * filterPixels * numFilterColors
+ filterColorIdx * filterPixels * numFilters
+ blockPixelOffset * numFilters
+ blockFilterIdx
+ threadIdx.y * numFilters + threadIdx.x;
float* shHidActLoad = &shHidActs[loadY][loadX];
float* shImgLoad = &shImages[loadY][loadX];
float prod[8][2];
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int f = 0; f < 2; f++) {
prod[c][f] = 0;
}
}
// This avoids doing a division in an inner loop
__shared__ int pxDivs[8];
if (tidx < 8) {
pxDivs[tidx] = (((blockPixelOffset + tidx) / (filterSize * filterSize)) << 16) + (((blockPixelOffset + tidx) / filterSize) % filterSize << 8) + ((blockPixelOffset + tidx) % filterSize);
}
__syncthreads();
for (int m = moduleIdx; m < moduleIdx + partialSum; m++) {
const int imgLoadModPosZ = paddingStart + (m / (numModulesY * numModulesX)) * moduleStride;
const int imgLoadModPosY = paddingStart + ((m / numModulesX) % numModulesY) * moduleStride;
const int imgLoadModPosX = paddingStart + (m % numModulesX) * moduleStride;
for (int caseIdx = 0; caseIdx < numImages; caseIdx += 32) {
if (loadY < 8) {
/*
* As long as 4 * 32 is divisible by 32 this will loop the right
* number of times.
*
* This will load some images from filter pixels that don't exist (it'll set those to 0),
* but the code does not produce any output for those pixels (see last lines).
*/
// #pragma unroll
for (int y = 0; y < 8; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if (8 % (16 * 8 / 32) == 0 || y + loadY < 8) {
const int pxIdx = loadY + y; // pixel idx in filter
if (pxIdx + blockPixelOffset < filterPixels && (!true || caseIdx + loadX < numImages)) {
const int pxZ = imgLoadModPosZ + HI24(pxDivs[pxIdx]);
const int pxY = imgLoadModPosY + MI16(pxDivs[pxIdx]);//pxIdx / filterSize; // pixel x,y coords in image
const int pxX = imgLoadModPosX + LO8(pxDivs[pxIdx]);
if (pxZ >= 0 && pxZ < imgSizeZ && pxY >= 0 && pxY < imgSizeY && pxX >= 0 && pxX < imgSizeX) {
const int pixIdx = (pxZ * imgSizeX * imgSizeY + pxY * imgSizeX + pxX) * imgStride; // pixel idx in image
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = images[caseIdx + c * imgPixels * imgStride + pixIdx];
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = 0;
}
}
} else {
#pragma unroll
for (int c = 0; c < 8; c++) {
shImgLoad[(y + c * 8) * 32] = 0;
}
}
}
}
}
if (loadY < 16 * 2 && (!true || caseIdx + loadX < numImages)) {
#pragma unroll
for (int y = 0; y < 16 * 2; y += (16 * 8) / 32) {
// Make sure number of rows in the array is divisible by number of rows filled per iteration
if ((16 * 2) % (16 * 8 / 32) == 0 || y + loadY < 16 * 2) {
shHidActLoad[y * (32 + 1)] = hidActs[caseIdx + y * numImages * numModules];
}
}
}
__syncthreads();
#pragma unroll
for (int c = 0; c < 8; c++) {
#pragma unroll
for (int i = 0; i < 32; i++) {
#pragma unroll
for (int f = 0; f < 2; f++) {
prod[c][f] += shImages[threadIdx.y + c * 8][i] * shHidActs[threadIdx.x + f * 16][i];
}
}
}
__syncthreads();
}
hidActs += numImages;
}
if (blockPixelOffset + threadIdx.y < filterPixels) {
#pragma unroll
for (int f = 0; f < 2; f++) {
#pragma unroll
for (int c = 0; c < 8; c++) {
targets[c * filterPixels * numFilters + f * 16] = scaleOutputs * prod[c][f];
}
}
}
}
__global__ void kConvolve_backward_c(float* targets, const float* hidActs, const float* filters,
const int numModulesZ, const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeZ, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups) {
__shared__ float shFilters[4*4][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][32*1];
const int numImgBlocks = DIVUP(numImages,32*1);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 32*1;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * 4*4; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = (blockPixelIdx / imgSizeX) % imgSizeY;
const int blockPixelIdxZ = blockPixelIdx / (imgSizeX * imgSizeY);
const int filterPixels = filterSize * filterSize * filterSize;
const int imgPixels = imgSizeZ * imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 32 + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesZ * numModulesY * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[4][1];
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] = 0;
}
}
const int startZ = blockPixelIdxZ - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxZ - paddingStart - filterSize) / moduleStride;
const int endZ = MIN(numModulesZ, 1 + (blockPixelIdxZ - paddingStart) / moduleStride);
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int mz = startZ; mz < endZ ; mz++){
const int moduleFront = paddingStart + mz * moduleStride;
const int pxInFilterZ = blockPixelIdxZ - moduleFront;
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = mz * numModulesX * numModulesY + my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterZ * filterSize * filterSize + pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < 1 * 32; i += 32) {
if (!true || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 32*4/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 32*4/32) { // load 16 rows of 1*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 32 * 1 + i] = 0;
}
}
}
const float* fLoad = true ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < 4*4; i+= 32*4/16) {
if ((4*4) % (32*4/16) == 0 || i + filtersLoadY < 4*4) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < 4; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < 1; i++) {
prod[c][i] += shFilters[c * 4 + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * 32];
}
}
}
__syncthreads();
}
}
}
}
#pragma unroll
for (int i = 0; i < 1; i++) {
if (!true || blockCaseIdx + threadIdx.x + i * 32 < numImages) {
#pragma unroll
for (int c = 0; c < 4; c++) {
targets[c * 4 * imgPixels * numImages + i * 32] = prod[c][i];
}
}
}
}
|
b8f874d2b6aa5deba5879f40df70f038741aecd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/hashing.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/scatter.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/partitioning.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/types.hpp>
namespace cudf {
namespace {
// Launch configuration for optimized hash partition
constexpr size_type OPTIMIZED_BLOCK_SIZE = 512;
constexpr size_type OPTIMIZED_ROWS_PER_THREAD = 8;
constexpr size_type ELEMENTS_PER_THREAD = 2;
constexpr size_type THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL = 1024;
// Launch configuration for fallback hash partition
constexpr size_type FALLBACK_BLOCK_SIZE = 256;
constexpr size_type FALLBACK_ROWS_PER_THREAD = 1;
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses the modulo operation.
*/
template <typename hash_value_t>
class modulo_partitioner {
public:
modulo_partitioner(size_type num_partitions) : divisor{num_partitions} {}
__device__ size_type operator()(hash_value_t hash_value) const { return hash_value % divisor; }
private:
const size_type divisor;
};
template <typename T>
bool is_power_two(T number)
{
return (0 == (number & (number - 1)));
}
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses a bitwise mask. Only works when num_partitions is a power of 2.
*
* For n % d, if d is a power of two, then it can be computed more efficiently via
* a single bitwise AND as:
* n & (d - 1)
*/
template <typename hash_value_t>
class bitwise_partitioner {
public:
bitwise_partitioner(size_type num_partitions) : mask{(num_partitions - 1)}
{
assert(is_power_two(num_partitions));
}
__device__ size_type operator()(hash_value_t hash_value) const
{
return hash_value & mask; // hash_value & (num_partitions - 1)
}
private:
const size_type mask;
};
/* --------------------------------------------------------------------------*/
/**
* @brief Computes which partition each row of a device_table will belong to based
on hashing each row, and applying a partition function to the hash value.
Records the size of each partition for each thread block as well as the global
size of each partition across all thread blocks.
*
* @param[in] the_table The table whose rows will be partitioned
* @param[in] num_rows The number of rows in the table
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] the_partitioner The functor that maps a rows hash value to a partition number
* @param[out] row_partition_numbers Array that holds which partition each row belongs to
* @param[out] row_partition_offset Array that holds the offset of each row in its partition of
* the thread block
* @param[out] block_partition_sizes Array that holds the size of each partition for each block,
* i.e., { {block0 partition0 size, block1 partition0 size, ...},
{block0 partition1 size, block1 partition1 size, ...},
...
{block0 partition(num_partitions-1) size, block1 partition(num_partitions -1) size, ...} }
* @param[out] global_partition_sizes The number of rows in each partition.
*/
/* ----------------------------------------------------------------------------*/
template <class row_hasher_t, typename partitioner_type>
__global__ void compute_row_partition_numbers(row_hasher_t the_hasher,
const size_type num_rows,
const size_type num_partitions,
const partitioner_type the_partitioner,
size_type* __restrict__ row_partition_numbers,
size_type* __restrict__ row_partition_offset,
size_type* __restrict__ block_partition_sizes,
size_type* __restrict__ global_partition_sizes)
{
// Accumulate histogram of the size of each partition in shared memory
extern __shared__ size_type shared_partition_sizes[];
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize local histogram
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_sizes[partition_number] = 0;
partition_number += blockDim.x;
}
__syncthreads();
// Compute the hash value for each row, store it to the array of hash values
// and compute the partition to which the hash value belongs and increment
// the shared memory counter for that partition
while (row_number < num_rows) {
const hash_value_type row_hash_value = the_hasher(row_number);
const size_type partition_number = the_partitioner(row_hash_value);
row_partition_numbers[row_number] = partition_number;
row_partition_offset[row_number] =
atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1));
row_number += blockDim.x * gridDim.x;
}
__syncthreads();
// Flush shared memory histogram to global memory
partition_number = threadIdx.x;
while (partition_number < num_partitions) {
const size_type block_partition_size = shared_partition_sizes[partition_number];
// Update global size of each partition
atomicAdd(&global_partition_sizes[partition_number], block_partition_size);
// Record the size of this partition in this block
const size_type write_location = partition_number * gridDim.x + blockIdx.x;
block_partition_sizes[write_location] = block_partition_size;
partition_number += blockDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Given an array of partition numbers, computes the final output location
for each element in the output such that all rows with the same partition are
contiguous in memory.
*
* @param row_partition_numbers The array that records the partition number for each row
* @param num_rows The number of rows
* @param num_partitions THe number of partitions
* @param[out] block_partition_offsets Array that holds the offset of each partition for each thread
block,
* i.e., { {block0 partition0 offset, block1 partition0 offset, ...},
{block0 partition1 offset, block1 partition1 offset, ...},
...
{block0 partition(num_partitions-1) offset, block1 partition(num_partitions -1) offset,
...} }
*/
/* ----------------------------------------------------------------------------*/
__global__ void compute_row_output_locations(size_type* __restrict__ row_partition_numbers,
const size_type num_rows,
const size_type num_partitions,
size_type* __restrict__ block_partition_offsets)
{
// Shared array that holds the offset of this blocks partitions in
// global memory
extern __shared__ size_type shared_partition_offsets[];
// Initialize array of this blocks offsets from global array
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_offsets[partition_number] =
block_partition_offsets[partition_number * gridDim.x + blockIdx.x];
partition_number += blockDim.x;
}
__syncthreads();
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Get each row's partition number, and get it's output location by
// incrementing block's offset counter for that partition number
// and store the row's output location in-place
while (row_number < num_rows) {
// Get partition number of this row
const size_type partition_number = row_partition_numbers[row_number];
// Get output location based on partition number by incrementing the corresponding
// partition offset for this block
const size_type row_output_location =
atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1));
// Store the row's output location in-place
row_partition_numbers[row_number] = row_output_location;
row_number += blockDim.x * gridDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Move one column from the input table to the hashed table.
*
* @param[in] input_buf Data buffer of the column in the input table
* @param[out] output_buf Preallocated data buffer of the column in the output table
* @param[in] num_rows The number of rows in each column
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] row_partition_numbers Array that holds which partition each row belongs to
* @param[in] row_partition_offset Array that holds the offset of each row in its partition of
* the thread block.
* @param[in] block_partition_sizes Array that holds the size of each partition for each block
* @param[in] scanned_block_partition_sizes The scan of block_partition_sizes
*/
/* ----------------------------------------------------------------------------*/
template <typename InputIter, typename DataType>
__global__ void copy_block_partitions(InputIter input_iter,
DataType* __restrict__ output_buf,
const size_type num_rows,
const size_type num_partitions,
size_type const* __restrict__ row_partition_numbers,
size_type const* __restrict__ row_partition_offset,
size_type const* __restrict__ block_partition_sizes,
size_type const* __restrict__ scanned_block_partition_sizes)
{
extern __shared__ char shared_memory[];
auto block_output = reinterpret_cast<DataType*>(shared_memory);
auto partition_offset_shared =
reinterpret_cast<size_type*>(block_output + OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD);
auto partition_offset_global =
reinterpret_cast<size_type*>(partition_offset_shared + num_partitions + 1);
typedef hipcub::BlockScan<size_type, OPTIMIZED_BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
// use ELEMENTS_PER_THREAD=2 to support up to 1024 partitions
size_type temp_histo[ELEMENTS_PER_THREAD];
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
temp_histo[i] =
block_partition_sizes[blockIdx.x + (ELEMENTS_PER_THREAD * threadIdx.x + i) * gridDim.x];
} else {
temp_histo[i] = 0;
}
}
__syncthreads();
BlockScan(temp_storage).InclusiveSum(temp_histo, temp_histo);
__syncthreads();
if (threadIdx.x == 0) { partition_offset_shared[0] = 0; }
// Calculate the offset in shared memory of each partition in this thread block
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
partition_offset_shared[ELEMENTS_PER_THREAD * threadIdx.x + i + 1] = temp_histo[i];
}
}
// Fetch the offset in the output buffer of each partition in this thread block
for (size_type ipartition = threadIdx.x; ipartition < num_partitions; ipartition += blockDim.x) {
partition_offset_global[ipartition] =
scanned_block_partition_sizes[ipartition * gridDim.x + blockIdx.x];
}
__syncthreads();
// Fetch the input data to shared memory
for (size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; row_number < num_rows;
row_number += blockDim.x * gridDim.x) {
size_type const ipartition = row_partition_numbers[row_number];
block_output[partition_offset_shared[ipartition] + row_partition_offset[row_number]] =
input_iter[row_number];
}
__syncthreads();
// Copy data from shared memory to output using 32 threads for each partition
constexpr int nthreads_partition = 32;
static_assert(OPTIMIZED_BLOCK_SIZE % nthreads_partition == 0,
"BLOCK_SIZE must be divisible by number of threads");
for (size_type ipartition = threadIdx.x / nthreads_partition; ipartition < num_partitions;
ipartition += OPTIMIZED_BLOCK_SIZE / nthreads_partition) {
size_type const nelements_partition =
partition_offset_shared[ipartition + 1] - partition_offset_shared[ipartition];
for (size_type row_offset = threadIdx.x % nthreads_partition; row_offset < nelements_partition;
row_offset += nthreads_partition) {
output_buf[partition_offset_global[ipartition] + row_offset] =
block_output[partition_offset_shared[ipartition] + row_offset];
}
}
}
template <typename InputIter, typename OutputIter>
void copy_block_partitions_impl(InputIter const input,
OutputIter output,
size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
hipStream_t stream)
{
// We need 3 chunks of shared memory:
// 1. BLOCK_SIZE * ROWS_PER_THREAD elements of size_type for copying to output
// 2. num_partitions + 1 elements of size_type for per-block partition offsets
// 3. num_partitions + 1 elements of size_type for global partition offsets
int const smem = OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD * sizeof(*output) +
(num_partitions + 1) * sizeof(size_type) * 2;
hipLaunchKernelGGL(( copy_block_partitions), dim3(grid_size), dim3(OPTIMIZED_BLOCK_SIZE), smem, stream,
input,
output,
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes);
}
rmm::device_vector<size_type> compute_gather_map(size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
hipStream_t stream)
{
auto sequence = thrust::make_counting_iterator(0);
rmm::device_vector<size_type> gather_map(num_rows);
copy_block_partitions_impl(sequence,
gather_map.data().get(),
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return gather_map;
}
struct copy_block_partitions_dispatcher {
template <typename DataType, std::enable_if_t<is_fixed_width<DataType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
rmm::device_buffer output(input.size() * sizeof(DataType), stream, mr);
copy_block_partitions_impl(input.data<DataType>(),
static_cast<DataType*>(output.data()),
input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return std::make_unique<column>(input.type(), input.size(), std::move(output));
}
template <typename DataType, std::enable_if_t<not is_fixed_width<DataType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
// Use move_to_output_buffer to create an equivalent gather map
auto gather_map = compute_gather_map(input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
// Use gather instead for non-fixed width types
return type_dispatcher(input.type(),
detail::column_gatherer{},
input,
gather_map.begin(),
gather_map.end(),
false,
stream,
mr);
}
};
// NOTE hash_has_nulls must be true if table_to_hash has nulls
template <bool hash_has_nulls>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition_table(
table_view const& input,
table_view const& table_to_hash,
size_type num_partitions,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto const num_rows = table_to_hash.num_rows();
bool const use_optimization{num_partitions <= THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL};
auto const block_size = use_optimization ? OPTIMIZED_BLOCK_SIZE : FALLBACK_BLOCK_SIZE;
auto const rows_per_thread =
use_optimization ? OPTIMIZED_ROWS_PER_THREAD : FALLBACK_ROWS_PER_THREAD;
auto const rows_per_block = block_size * rows_per_thread;
// NOTE grid_size is non-const to workaround lambda capture bug in gcc 5.4
auto grid_size = util::div_rounding_up_safe(num_rows, rows_per_block);
// Allocate array to hold which partition each row belongs to
auto row_partition_numbers = rmm::device_vector<size_type>(num_rows);
// Array to hold the size of each partition computed by each block
// i.e., { {block0 partition0 size, block1 partition0 size, ...},
// {block0 partition1 size, block1 partition1 size, ...},
// ...
// {block0 partition(num_partitions-1) size, block1 partition(num_partitions -1) size,
// ...} }
auto block_partition_sizes = rmm::device_vector<size_type>(grid_size * num_partitions);
auto scanned_block_partition_sizes = rmm::device_vector<size_type>(grid_size * num_partitions);
// Holds the total number of rows in each partition
auto global_partition_sizes = rmm::device_vector<size_type>(num_partitions, size_type{0});
auto row_partition_offset = rmm::device_vector<size_type>(num_rows);
auto const device_input = table_device_view::create(table_to_hash, stream);
auto const hasher = row_hasher<MurmurHash3_32, hash_has_nulls>(*device_input);
// If the number of partitions is a power of two, we can compute the partition
// number of each row more efficiently with bitwise operations
if (is_power_two(num_partitions)) {
// Determines how the mapping between hash value and partition number is computed
using partitioner_type = bitwise_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and performing
// a partitioning operator on the hash value. Also computes the number of
// rows in each partition both for each thread block as well as across all blocks
hipLaunchKernelGGL(( compute_row_partition_numbers), dim3(grid_size),
dim3(block_size),
num_partitions * sizeof(size_type),
stream, hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data().get(),
row_partition_offset.data().get(),
block_partition_sizes.data().get(),
global_partition_sizes.data().get());
} else {
// Determines how the mapping between hash value and partition number is computed
using partitioner_type = modulo_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and performing
// a partitioning operator on the hash value. Also computes the number of
// rows in each partition both for each thread block as well as across all blocks
hipLaunchKernelGGL(( compute_row_partition_numbers), dim3(grid_size),
dim3(block_size),
num_partitions * sizeof(size_type),
stream, hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data().get(),
row_partition_offset.data().get(),
block_partition_sizes.data().get(),
global_partition_sizes.data().get());
}
// Compute exclusive scan of all blocks' partition sizes in-place to determine
// the starting point for each blocks portion of each partition in the output
thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream),
block_partition_sizes.begin(),
block_partition_sizes.end(),
scanned_block_partition_sizes.data().get());
// Compute exclusive scan of size of each partition to determine offset location
// of each partition in final output.
// TODO This can be done independently on a separate stream
size_type* scanned_global_partition_sizes{global_partition_sizes.data().get()};
thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream),
global_partition_sizes.begin(),
global_partition_sizes.end(),
scanned_global_partition_sizes);
// Copy the result of the exclusive scan to the output offsets array
// to indicate the starting point for each partition in the output
std::vector<size_type> partition_offsets(num_partitions);
CUDA_TRY(hipMemcpyAsync(partition_offsets.data(),
scanned_global_partition_sizes,
num_partitions * sizeof(size_type),
hipMemcpyDeviceToHost,
stream));
// When the number of partitions is less than a threshold, we can apply an
// optimization using shared memory to copy values to the output buffer.
// Otherwise, fallback to using scatter.
if (use_optimization) {
std::vector<std::unique_ptr<column>> output_cols(input.num_columns());
// NOTE these pointers are non-const to workaround lambda capture bug in gcc 5.4
auto row_partition_numbers_ptr{row_partition_numbers.data().get()};
auto row_partition_offset_ptr{row_partition_offset.data().get()};
auto block_partition_sizes_ptr{block_partition_sizes.data().get()};
auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data().get()};
// Copy input to output by partition per column
std::transform(input.begin(), input.end(), output_cols.begin(), [=](auto const& col) {
return cudf::type_dispatcher(col.type(),
copy_block_partitions_dispatcher{},
col,
num_partitions,
row_partition_numbers_ptr,
row_partition_offset_ptr,
block_partition_sizes_ptr,
scanned_block_partition_sizes_ptr,
grid_size,
mr,
stream);
});
if (has_nulls(input)) {
// Use copy_block_partitions to compute a gather map
auto gather_map = compute_gather_map(num_rows,
num_partitions,
row_partition_numbers_ptr,
row_partition_offset_ptr,
block_partition_sizes_ptr,
scanned_block_partition_sizes_ptr,
grid_size,
stream);
// Handle bitmask using gather to take advantage of ballot_sync
detail::gather_bitmask(
input, gather_map.begin(), output_cols, detail::gather_bitmask_op::DONT_CHECK, mr, stream);
}
auto output{std::make_unique<table>(std::move(output_cols))};
return std::make_pair(std::move(output), std::move(partition_offsets));
} else {
// Compute a scatter map from input to output such that the output rows are
// sorted by partition number
auto row_output_locations{row_partition_numbers.data().get()};
auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data().get()};
hipLaunchKernelGGL(( compute_row_output_locations), dim3(grid_size),
dim3(block_size),
num_partitions * sizeof(size_type),
stream,
row_output_locations, num_rows, num_partitions, scanned_block_partition_sizes_ptr);
// Use the resulting scatter map to materialize the output
auto output = detail::scatter(
input, row_partition_numbers.begin(), row_partition_numbers.end(), input, false, mr, stream);
return std::make_pair(std::move(output), std::move(partition_offsets));
}
}
// MD5 supported leaf data type check
bool md5_type_check(data_type dt)
{
return !is_chrono(dt) && (is_fixed_width(dt) || (dt.id() == type_id::STRING));
}
} // namespace
namespace detail {
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUDF_FUNC_RANGE();
auto table_to_hash = input.select(columns_to_hash);
// Return empty result if there are no partitions or nothing to hash
if (num_partitions <= 0 || input.num_rows() == 0 || table_to_hash.num_columns() == 0) {
return std::make_pair(empty_like(input), std::vector<size_type>{});
}
if (has_nulls(table_to_hash)) {
return hash_partition_table<true>(input, table_to_hash, num_partitions, mr, stream);
} else {
return hash_partition_table<false>(input, table_to_hash, num_partitions, mr, stream);
}
}
std::unique_ptr<column> hash(table_view const& input,
hash_id hash_function,
std::vector<uint32_t> const& initial_hash,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
switch (hash_function) {
case (hash_id::HASH_MURMUR3): return murmur_hash3_32(input, initial_hash, mr, stream);
case (hash_id::HASH_MD5): return md5_hash(input, mr, stream);
default: return nullptr;
}
}
std::unique_ptr<column> md5_hash(table_view const& input,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
if (input.num_columns() == 0 || input.num_rows() == 0) {
const string_scalar string_128bit("d41d8cd98f00b204e9orig98ecf8427e");
auto output = make_column_from_scalar(string_128bit, input.num_rows(), mr, stream);
return output;
}
// Accepts string and fixed width columns, or single layer list columns holding those types
CUDF_EXPECTS(
std::all_of(input.begin(),
input.end(),
[](auto col) {
return md5_type_check(col.type()) ||
(col.type().id() == type_id::LIST && md5_type_check(col.child(1).type()));
}),
"MD5 unsupported column type");
// Result column allocation and creation
auto begin = thrust::make_constant_iterator(32);
auto offsets_column =
cudf::strings::detail::make_offsets_child_column(begin, begin + input.num_rows(), mr, stream);
auto offsets_view = offsets_column->view();
auto d_new_offsets = offsets_view.data<int32_t>();
auto chars_column = strings::detail::create_chars_child_column(
input.num_rows(), 0, input.num_rows() * 32, mr, stream);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
rmm::device_buffer null_mask{0, stream, mr};
auto const device_input = table_device_view::create(input, stream);
// Hash each row, hashing each element sequentially left to right
thrust::for_each(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(input.num_rows()),
[d_chars, device_input = *device_input] __device__(auto row_index) {
md5_intermediate_data hash_state;
MD5Hash hasher = MD5Hash{};
for (int col_index = 0; col_index < device_input.num_columns(); col_index++) {
if (device_input.column(col_index).is_valid(row_index)) {
cudf::type_dispatcher(device_input.column(col_index).type(),
hasher,
device_input.column(col_index),
row_index,
&hash_state);
}
}
hasher.finalize(&hash_state, d_chars + (row_index * 32));
});
return make_strings_column(input.num_rows(),
std::move(offsets_column),
std::move(chars_column),
0,
std::move(null_mask),
stream,
mr);
}
std::unique_ptr<column> murmur_hash3_32(table_view const& input,
std::vector<uint32_t> const& initial_hash,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
// TODO this should be UINT32
auto output = make_numeric_column(
data_type(type_id::INT32), input.num_rows(), mask_state::UNALLOCATED, stream, mr);
// Return early if there's nothing to hash
if (input.num_columns() == 0 || input.num_rows() == 0) { return output; }
bool const nullable = has_nulls(input);
auto const device_input = table_device_view::create(input, stream);
auto output_view = output->mutable_view();
// Compute the hash value for each row depending on the specified hash function
if (!initial_hash.empty()) {
CUDF_EXPECTS(initial_hash.size() == size_t(input.num_columns()),
"Expected same size of initial hash values as number of columns");
auto device_initial_hash = rmm::device_vector<uint32_t>(initial_hash);
if (nullable) {
thrust::tabulate(rmm::exec_policy(stream)->on(stream),
output_view.begin<int32_t>(),
output_view.end<int32_t>(),
row_hasher_initial_values<MurmurHash3_32, true>(
*device_input, device_initial_hash.data().get()));
} else {
thrust::tabulate(rmm::exec_policy(stream)->on(stream),
output_view.begin<int32_t>(),
output_view.end<int32_t>(),
row_hasher_initial_values<MurmurHash3_32, false>(
*device_input, device_initial_hash.data().get()));
}
} else {
if (nullable) {
thrust::tabulate(rmm::exec_policy(stream)->on(stream),
output_view.begin<int32_t>(),
output_view.end<int32_t>(),
row_hasher<MurmurHash3_32, true>(*device_input));
} else {
thrust::tabulate(rmm::exec_policy(stream)->on(stream),
output_view.begin<int32_t>(),
output_view.end<int32_t>(),
row_hasher<MurmurHash3_32, false>(*device_input));
}
}
return output;
}
} // namespace detail
std::unique_ptr<column> hash(table_view const& input,
hash_id hash_function,
std::vector<uint32_t> const& initial_hash,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::hash(input, hash_function, initial_hash, mr);
}
std::unique_ptr<column> murmur_hash3_32(table_view const& input,
std::vector<uint32_t> const& initial_hash,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::murmur_hash3_32(input, initial_hash, mr);
}
} // namespace cudf
| b8f874d2b6aa5deba5879f40df70f038741aecd4.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/gather.cuh>
#include <cudf/detail/hashing.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/detail/scatter.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/partitioning.hpp>
#include <cudf/table/row_operators.cuh>
#include <cudf/table/table_device_view.cuh>
#include <cudf/types.hpp>
namespace cudf {
namespace {
// Launch configuration for optimized hash partition
constexpr size_type OPTIMIZED_BLOCK_SIZE = 512;
constexpr size_type OPTIMIZED_ROWS_PER_THREAD = 8;
constexpr size_type ELEMENTS_PER_THREAD = 2;
constexpr size_type THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL = 1024;
// Launch configuration for fallback hash partition
constexpr size_type FALLBACK_BLOCK_SIZE = 256;
constexpr size_type FALLBACK_ROWS_PER_THREAD = 1;
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses the modulo operation.
*/
template <typename hash_value_t>
class modulo_partitioner {
public:
modulo_partitioner(size_type num_partitions) : divisor{num_partitions} {}
__device__ size_type operator()(hash_value_t hash_value) const { return hash_value % divisor; }
private:
const size_type divisor;
};
template <typename T>
bool is_power_two(T number)
{
return (0 == (number & (number - 1)));
}
/**
* @brief Functor to map a hash value to a particular 'bin' or partition number
* that uses a bitwise mask. Only works when num_partitions is a power of 2.
*
* For n % d, if d is a power of two, then it can be computed more efficiently via
* a single bitwise AND as:
* n & (d - 1)
*/
template <typename hash_value_t>
class bitwise_partitioner {
public:
bitwise_partitioner(size_type num_partitions) : mask{(num_partitions - 1)}
{
assert(is_power_two(num_partitions));
}
__device__ size_type operator()(hash_value_t hash_value) const
{
return hash_value & mask; // hash_value & (num_partitions - 1)
}
private:
const size_type mask;
};
/* --------------------------------------------------------------------------*/
/**
* @brief Computes which partition each row of a device_table will belong to based
on hashing each row, and applying a partition function to the hash value.
Records the size of each partition for each thread block as well as the global
size of each partition across all thread blocks.
*
* @param[in] the_table The table whose rows will be partitioned
* @param[in] num_rows The number of rows in the table
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] the_partitioner The functor that maps a rows hash value to a partition number
* @param[out] row_partition_numbers Array that holds which partition each row belongs to
* @param[out] row_partition_offset Array that holds the offset of each row in its partition of
* the thread block
* @param[out] block_partition_sizes Array that holds the size of each partition for each block,
* i.e., { {block0 partition0 size, block1 partition0 size, ...},
{block0 partition1 size, block1 partition1 size, ...},
...
{block0 partition(num_partitions-1) size, block1 partition(num_partitions -1) size, ...} }
* @param[out] global_partition_sizes The number of rows in each partition.
*/
/* ----------------------------------------------------------------------------*/
template <class row_hasher_t, typename partitioner_type>
__global__ void compute_row_partition_numbers(row_hasher_t the_hasher,
const size_type num_rows,
const size_type num_partitions,
const partitioner_type the_partitioner,
size_type* __restrict__ row_partition_numbers,
size_type* __restrict__ row_partition_offset,
size_type* __restrict__ block_partition_sizes,
size_type* __restrict__ global_partition_sizes)
{
// Accumulate histogram of the size of each partition in shared memory
extern __shared__ size_type shared_partition_sizes[];
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize local histogram
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_sizes[partition_number] = 0;
partition_number += blockDim.x;
}
__syncthreads();
// Compute the hash value for each row, store it to the array of hash values
// and compute the partition to which the hash value belongs and increment
// the shared memory counter for that partition
while (row_number < num_rows) {
const hash_value_type row_hash_value = the_hasher(row_number);
const size_type partition_number = the_partitioner(row_hash_value);
row_partition_numbers[row_number] = partition_number;
row_partition_offset[row_number] =
atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1));
row_number += blockDim.x * gridDim.x;
}
__syncthreads();
// Flush shared memory histogram to global memory
partition_number = threadIdx.x;
while (partition_number < num_partitions) {
const size_type block_partition_size = shared_partition_sizes[partition_number];
// Update global size of each partition
atomicAdd(&global_partition_sizes[partition_number], block_partition_size);
// Record the size of this partition in this block
const size_type write_location = partition_number * gridDim.x + blockIdx.x;
block_partition_sizes[write_location] = block_partition_size;
partition_number += blockDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Given an array of partition numbers, computes the final output location
for each element in the output such that all rows with the same partition are
contiguous in memory.
*
* @param row_partition_numbers The array that records the partition number for each row
* @param num_rows The number of rows
* @param num_partitions THe number of partitions
* @param[out] block_partition_offsets Array that holds the offset of each partition for each thread
block,
* i.e., { {block0 partition0 offset, block1 partition0 offset, ...},
{block0 partition1 offset, block1 partition1 offset, ...},
...
{block0 partition(num_partitions-1) offset, block1 partition(num_partitions -1) offset,
...} }
*/
/* ----------------------------------------------------------------------------*/
__global__ void compute_row_output_locations(size_type* __restrict__ row_partition_numbers,
const size_type num_rows,
const size_type num_partitions,
size_type* __restrict__ block_partition_offsets)
{
// Shared array that holds the offset of this blocks partitions in
// global memory
extern __shared__ size_type shared_partition_offsets[];
// Initialize array of this blocks offsets from global array
size_type partition_number = threadIdx.x;
while (partition_number < num_partitions) {
shared_partition_offsets[partition_number] =
block_partition_offsets[partition_number * gridDim.x + blockIdx.x];
partition_number += blockDim.x;
}
__syncthreads();
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Get each row's partition number, and get it's output location by
// incrementing block's offset counter for that partition number
// and store the row's output location in-place
while (row_number < num_rows) {
// Get partition number of this row
const size_type partition_number = row_partition_numbers[row_number];
// Get output location based on partition number by incrementing the corresponding
// partition offset for this block
const size_type row_output_location =
atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1));
// Store the row's output location in-place
row_partition_numbers[row_number] = row_output_location;
row_number += blockDim.x * gridDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Move one column from the input table to the hashed table.
*
* @param[in] input_buf Data buffer of the column in the input table
* @param[out] output_buf Preallocated data buffer of the column in the output table
* @param[in] num_rows The number of rows in each column
* @param[in] num_partitions The number of partitions to divide the rows into
* @param[in] row_partition_numbers Array that holds which partition each row belongs to
* @param[in] row_partition_offset Array that holds the offset of each row in its partition of
* the thread block.
* @param[in] block_partition_sizes Array that holds the size of each partition for each block
* @param[in] scanned_block_partition_sizes The scan of block_partition_sizes
*/
/* ----------------------------------------------------------------------------*/
template <typename InputIter, typename DataType>
__global__ void copy_block_partitions(InputIter input_iter,
DataType* __restrict__ output_buf,
const size_type num_rows,
const size_type num_partitions,
size_type const* __restrict__ row_partition_numbers,
size_type const* __restrict__ row_partition_offset,
size_type const* __restrict__ block_partition_sizes,
size_type const* __restrict__ scanned_block_partition_sizes)
{
extern __shared__ char shared_memory[];
auto block_output = reinterpret_cast<DataType*>(shared_memory);
auto partition_offset_shared =
reinterpret_cast<size_type*>(block_output + OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD);
auto partition_offset_global =
reinterpret_cast<size_type*>(partition_offset_shared + num_partitions + 1);
typedef cub::BlockScan<size_type, OPTIMIZED_BLOCK_SIZE> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
// use ELEMENTS_PER_THREAD=2 to support up to 1024 partitions
size_type temp_histo[ELEMENTS_PER_THREAD];
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
temp_histo[i] =
block_partition_sizes[blockIdx.x + (ELEMENTS_PER_THREAD * threadIdx.x + i) * gridDim.x];
} else {
temp_histo[i] = 0;
}
}
__syncthreads();
BlockScan(temp_storage).InclusiveSum(temp_histo, temp_histo);
__syncthreads();
if (threadIdx.x == 0) { partition_offset_shared[0] = 0; }
// Calculate the offset in shared memory of each partition in this thread block
for (int i = 0; i < ELEMENTS_PER_THREAD; ++i) {
if (ELEMENTS_PER_THREAD * threadIdx.x + i < num_partitions) {
partition_offset_shared[ELEMENTS_PER_THREAD * threadIdx.x + i + 1] = temp_histo[i];
}
}
// Fetch the offset in the output buffer of each partition in this thread block
for (size_type ipartition = threadIdx.x; ipartition < num_partitions; ipartition += blockDim.x) {
partition_offset_global[ipartition] =
scanned_block_partition_sizes[ipartition * gridDim.x + blockIdx.x];
}
__syncthreads();
// Fetch the input data to shared memory
for (size_type row_number = threadIdx.x + blockIdx.x * blockDim.x; row_number < num_rows;
row_number += blockDim.x * gridDim.x) {
size_type const ipartition = row_partition_numbers[row_number];
block_output[partition_offset_shared[ipartition] + row_partition_offset[row_number]] =
input_iter[row_number];
}
__syncthreads();
// Copy data from shared memory to output using 32 threads for each partition
constexpr int nthreads_partition = 32;
static_assert(OPTIMIZED_BLOCK_SIZE % nthreads_partition == 0,
"BLOCK_SIZE must be divisible by number of threads");
for (size_type ipartition = threadIdx.x / nthreads_partition; ipartition < num_partitions;
ipartition += OPTIMIZED_BLOCK_SIZE / nthreads_partition) {
size_type const nelements_partition =
partition_offset_shared[ipartition + 1] - partition_offset_shared[ipartition];
for (size_type row_offset = threadIdx.x % nthreads_partition; row_offset < nelements_partition;
row_offset += nthreads_partition) {
output_buf[partition_offset_global[ipartition] + row_offset] =
block_output[partition_offset_shared[ipartition] + row_offset];
}
}
}
template <typename InputIter, typename OutputIter>
void copy_block_partitions_impl(InputIter const input,
OutputIter output,
size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
cudaStream_t stream)
{
// We need 3 chunks of shared memory:
// 1. BLOCK_SIZE * ROWS_PER_THREAD elements of size_type for copying to output
// 2. num_partitions + 1 elements of size_type for per-block partition offsets
// 3. num_partitions + 1 elements of size_type for global partition offsets
int const smem = OPTIMIZED_BLOCK_SIZE * OPTIMIZED_ROWS_PER_THREAD * sizeof(*output) +
(num_partitions + 1) * sizeof(size_type) * 2;
copy_block_partitions<<<grid_size, OPTIMIZED_BLOCK_SIZE, smem, stream>>>(
input,
output,
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes);
}
rmm::device_vector<size_type> compute_gather_map(size_type num_rows,
size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
cudaStream_t stream)
{
auto sequence = thrust::make_counting_iterator(0);
rmm::device_vector<size_type> gather_map(num_rows);
copy_block_partitions_impl(sequence,
gather_map.data().get(),
num_rows,
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return gather_map;
}
struct copy_block_partitions_dispatcher {
template <typename DataType, std::enable_if_t<is_fixed_width<DataType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
rmm::device_buffer output(input.size() * sizeof(DataType), stream, mr);
copy_block_partitions_impl(input.data<DataType>(),
static_cast<DataType*>(output.data()),
input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
return std::make_unique<column>(input.type(), input.size(), std::move(output));
}
template <typename DataType, std::enable_if_t<not is_fixed_width<DataType>()>* = nullptr>
std::unique_ptr<column> operator()(column_view const& input,
const size_type num_partitions,
size_type const* row_partition_numbers,
size_type const* row_partition_offset,
size_type const* block_partition_sizes,
size_type const* scanned_block_partition_sizes,
size_type grid_size,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
// Use move_to_output_buffer to create an equivalent gather map
auto gather_map = compute_gather_map(input.size(),
num_partitions,
row_partition_numbers,
row_partition_offset,
block_partition_sizes,
scanned_block_partition_sizes,
grid_size,
stream);
// Use gather instead for non-fixed width types
return type_dispatcher(input.type(),
detail::column_gatherer{},
input,
gather_map.begin(),
gather_map.end(),
false,
stream,
mr);
}
};
// NOTE hash_has_nulls must be true if table_to_hash has nulls
template <bool hash_has_nulls>
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition_table(
table_view const& input,
table_view const& table_to_hash,
size_type num_partitions,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto const num_rows = table_to_hash.num_rows();
bool const use_optimization{num_partitions <= THRESHOLD_FOR_OPTIMIZED_PARTITION_KERNEL};
auto const block_size = use_optimization ? OPTIMIZED_BLOCK_SIZE : FALLBACK_BLOCK_SIZE;
auto const rows_per_thread =
use_optimization ? OPTIMIZED_ROWS_PER_THREAD : FALLBACK_ROWS_PER_THREAD;
auto const rows_per_block = block_size * rows_per_thread;
// NOTE grid_size is non-const to workaround lambda capture bug in gcc 5.4
auto grid_size = util::div_rounding_up_safe(num_rows, rows_per_block);
// Allocate array to hold which partition each row belongs to
auto row_partition_numbers = rmm::device_vector<size_type>(num_rows);
// Array to hold the size of each partition computed by each block
// i.e., { {block0 partition0 size, block1 partition0 size, ...},
// {block0 partition1 size, block1 partition1 size, ...},
// ...
// {block0 partition(num_partitions-1) size, block1 partition(num_partitions -1) size,
// ...} }
auto block_partition_sizes = rmm::device_vector<size_type>(grid_size * num_partitions);
auto scanned_block_partition_sizes = rmm::device_vector<size_type>(grid_size * num_partitions);
// Holds the total number of rows in each partition
auto global_partition_sizes = rmm::device_vector<size_type>(num_partitions, size_type{0});
auto row_partition_offset = rmm::device_vector<size_type>(num_rows);
auto const device_input = table_device_view::create(table_to_hash, stream);
auto const hasher = row_hasher<MurmurHash3_32, hash_has_nulls>(*device_input);
// If the number of partitions is a power of two, we can compute the partition
// number of each row more efficiently with bitwise operations
if (is_power_two(num_partitions)) {
// Determines how the mapping between hash value and partition number is computed
using partitioner_type = bitwise_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and performing
// a partitioning operator on the hash value. Also computes the number of
// rows in each partition both for each thread block as well as across all blocks
compute_row_partition_numbers<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream>>>(hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data().get(),
row_partition_offset.data().get(),
block_partition_sizes.data().get(),
global_partition_sizes.data().get());
} else {
// Determines how the mapping between hash value and partition number is computed
using partitioner_type = modulo_partitioner<hash_value_type>;
// Computes which partition each row belongs to by hashing the row and performing
// a partitioning operator on the hash value. Also computes the number of
// rows in each partition both for each thread block as well as across all blocks
compute_row_partition_numbers<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream>>>(hasher,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers.data().get(),
row_partition_offset.data().get(),
block_partition_sizes.data().get(),
global_partition_sizes.data().get());
}
// Compute exclusive scan of all blocks' partition sizes in-place to determine
// the starting point for each blocks portion of each partition in the output
thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream),
block_partition_sizes.begin(),
block_partition_sizes.end(),
scanned_block_partition_sizes.data().get());
// Compute exclusive scan of size of each partition to determine offset location
// of each partition in final output.
// TODO This can be done independently on a separate stream
size_type* scanned_global_partition_sizes{global_partition_sizes.data().get()};
thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream),
global_partition_sizes.begin(),
global_partition_sizes.end(),
scanned_global_partition_sizes);
// Copy the result of the exclusive scan to the output offsets array
// to indicate the starting point for each partition in the output
std::vector<size_type> partition_offsets(num_partitions);
CUDA_TRY(cudaMemcpyAsync(partition_offsets.data(),
scanned_global_partition_sizes,
num_partitions * sizeof(size_type),
cudaMemcpyDeviceToHost,
stream));
// When the number of partitions is less than a threshold, we can apply an
// optimization using shared memory to copy values to the output buffer.
// Otherwise, fallback to using scatter.
if (use_optimization) {
std::vector<std::unique_ptr<column>> output_cols(input.num_columns());
// NOTE these pointers are non-const to workaround lambda capture bug in gcc 5.4
auto row_partition_numbers_ptr{row_partition_numbers.data().get()};
auto row_partition_offset_ptr{row_partition_offset.data().get()};
auto block_partition_sizes_ptr{block_partition_sizes.data().get()};
auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data().get()};
// Copy input to output by partition per column
std::transform(input.begin(), input.end(), output_cols.begin(), [=](auto const& col) {
return cudf::type_dispatcher(col.type(),
copy_block_partitions_dispatcher{},
col,
num_partitions,
row_partition_numbers_ptr,
row_partition_offset_ptr,
block_partition_sizes_ptr,
scanned_block_partition_sizes_ptr,
grid_size,
mr,
stream);
});
if (has_nulls(input)) {
// Use copy_block_partitions to compute a gather map
auto gather_map = compute_gather_map(num_rows,
num_partitions,
row_partition_numbers_ptr,
row_partition_offset_ptr,
block_partition_sizes_ptr,
scanned_block_partition_sizes_ptr,
grid_size,
stream);
// Handle bitmask using gather to take advantage of ballot_sync
detail::gather_bitmask(
input, gather_map.begin(), output_cols, detail::gather_bitmask_op::DONT_CHECK, mr, stream);
}
auto output{std::make_unique<table>(std::move(output_cols))};
return std::make_pair(std::move(output), std::move(partition_offsets));
} else {
// Compute a scatter map from input to output such that the output rows are
// sorted by partition number
auto row_output_locations{row_partition_numbers.data().get()};
auto scanned_block_partition_sizes_ptr{scanned_block_partition_sizes.data().get()};
compute_row_output_locations<<<grid_size,
block_size,
num_partitions * sizeof(size_type),
stream>>>(
row_output_locations, num_rows, num_partitions, scanned_block_partition_sizes_ptr);
// Use the resulting scatter map to materialize the output
auto output = detail::scatter(
input, row_partition_numbers.begin(), row_partition_numbers.end(), input, false, mr, stream);
return std::make_pair(std::move(output), std::move(partition_offsets));
}
}
// MD5 supported leaf data type check
bool md5_type_check(data_type dt)
{
return !is_chrono(dt) && (is_fixed_width(dt) || (dt.id() == type_id::STRING));
}
} // namespace
namespace detail {
std::pair<std::unique_ptr<table>, std::vector<size_type>> hash_partition(
table_view const& input,
std::vector<size_type> const& columns_to_hash,
int num_partitions,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUDF_FUNC_RANGE();
auto table_to_hash = input.select(columns_to_hash);
// Return empty result if there are no partitions or nothing to hash
if (num_partitions <= 0 || input.num_rows() == 0 || table_to_hash.num_columns() == 0) {
return std::make_pair(empty_like(input), std::vector<size_type>{});
}
if (has_nulls(table_to_hash)) {
return hash_partition_table<true>(input, table_to_hash, num_partitions, mr, stream);
} else {
return hash_partition_table<false>(input, table_to_hash, num_partitions, mr, stream);
}
}
std::unique_ptr<column> hash(table_view const& input,
hash_id hash_function,
std::vector<uint32_t> const& initial_hash,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
switch (hash_function) {
case (hash_id::HASH_MURMUR3): return murmur_hash3_32(input, initial_hash, mr, stream);
case (hash_id::HASH_MD5): return md5_hash(input, mr, stream);
default: return nullptr;
}
}
std::unique_ptr<column> md5_hash(table_view const& input,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
if (input.num_columns() == 0 || input.num_rows() == 0) {
const string_scalar string_128bit("d41d8cd98f00b204e9orig98ecf8427e");
auto output = make_column_from_scalar(string_128bit, input.num_rows(), mr, stream);
return output;
}
// Accepts string and fixed width columns, or single layer list columns holding those types
CUDF_EXPECTS(
std::all_of(input.begin(),
input.end(),
[](auto col) {
return md5_type_check(col.type()) ||
(col.type().id() == type_id::LIST && md5_type_check(col.child(1).type()));
}),
"MD5 unsupported column type");
// Result column allocation and creation
auto begin = thrust::make_constant_iterator(32);
auto offsets_column =
cudf::strings::detail::make_offsets_child_column(begin, begin + input.num_rows(), mr, stream);
auto offsets_view = offsets_column->view();
auto d_new_offsets = offsets_view.data<int32_t>();
auto chars_column = strings::detail::create_chars_child_column(
input.num_rows(), 0, input.num_rows() * 32, mr, stream);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
rmm::device_buffer null_mask{0, stream, mr};
auto const device_input = table_device_view::create(input, stream);
// Hash each row, hashing each element sequentially left to right
thrust::for_each(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator(0),
thrust::make_counting_iterator(input.num_rows()),
[d_chars, device_input = *device_input] __device__(auto row_index) {
md5_intermediate_data hash_state;
MD5Hash hasher = MD5Hash{};
for (int col_index = 0; col_index < device_input.num_columns(); col_index++) {
if (device_input.column(col_index).is_valid(row_index)) {
cudf::type_dispatcher(device_input.column(col_index).type(),
hasher,
device_input.column(col_index),
row_index,
&hash_state);
}
}
hasher.finalize(&hash_state, d_chars + (row_index * 32));
});
return make_strings_column(input.num_rows(),
std::move(offsets_column),
std::move(chars_column),
0,
std::move(null_mask),
stream,
mr);
}
std::unique_ptr<column> murmur_hash3_32(table_view const& input,
std::vector<uint32_t> const& initial_hash,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
// TODO this should be UINT32
auto output = make_numeric_column(
data_type(type_id::INT32), input.num_rows(), mask_state::UNALLOCATED, stream, mr);
// Return early if there's nothing to hash
if (input.num_columns() == 0 || input.num_rows() == 0) { return output; }
bool const nullable = has_nulls(input);
auto const device_input = table_device_view::create(input, stream);
auto output_view = output->mutable_view();
// Compute the hash value for each row depending on the specified hash function
if (!initial_hash.empty()) {
CUDF_EXPECTS(initial_hash.size() == size_t(input.num_columns()),
"Expected same size of initial hash values as number of columns");
auto device_initial_hash = rmm::device_vector<uint32_t>(initial_hash);
if (nullable) {
thrust::tabulate(rmm::exec_policy(stream)->on(stream),
output_view.begin<int32_t>(),
output_view.end<int32_t>(),
row_hasher_initial_values<MurmurHash3_32, true>(
*device_input, device_initial_hash.data().get()));
} else {
thrust::tabulate(rmm::exec_policy(stream)->on(stream),
output_view.begin<int32_t>(),
output_view.end<int32_t>(),
row_hasher_initial_values<MurmurHash3_32, false>(
*device_input, device_initial_hash.data().get()));
}
} else {
if (nullable) {
thrust::tabulate(rmm::exec_policy(stream)->on(stream),
output_view.begin<int32_t>(),
output_view.end<int32_t>(),
row_hasher<MurmurHash3_32, true>(*device_input));
} else {
thrust::tabulate(rmm::exec_policy(stream)->on(stream),
output_view.begin<int32_t>(),
output_view.end<int32_t>(),
row_hasher<MurmurHash3_32, false>(*device_input));
}
}
return output;
}
} // namespace detail
std::unique_ptr<column> hash(table_view const& input,
hash_id hash_function,
std::vector<uint32_t> const& initial_hash,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::hash(input, hash_function, initial_hash, mr);
}
std::unique_ptr<column> murmur_hash3_32(table_view const& input,
std::vector<uint32_t> const& initial_hash,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::murmur_hash3_32(input, initial_hash, mr);
}
} // namespace cudf
|
b261aa3ab069a54c2343f6a132514d94b30c8fd7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* Determine eigenvalues for large matrices for intervals that contained after
* the first step one eigenvalue
*/
#ifndef _BISECT_KERNEL_LARGE_ONEI_H_
#define _BISECT_KERNEL_LARGE_ONEI_H_
// includes, project
#include "config.h"
#include "util.h"
// additional kernel
#include "bisect_util.cu"
////////////////////////////////////////////////////////////////////////////////
//! Determine eigenvalues for large matrices for intervals that after
//! the first step contained one eigenvalue
//! @param g_d diagonal elements of symmetric, tridiagonal matrix
//! @param g_s superdiagonal elements of symmetric, tridiagonal matrix
//! @param n matrix size
//! @param num_intervals total number of intervals containing one eigenvalue
//! after the first step
//! @param g_left left interval limits
//! @param g_right right interval limits
//! @param g_pos index of interval / number of intervals that are smaller than
//! right interval limit
//! @param precision desired precision of eigenvalues
////////////////////////////////////////////////////////////////////////////////
__global__
void
bisectKernelLarge_OneIntervals( float* g_d, float* g_s, const unsigned int n,
unsigned int num_intervals,
float* g_left, float* g_right,
unsigned int* g_pos,
float precision )
{
const unsigned int gtid = (blockDim.x * blockIdx.x) + threadIdx.x;
__shared__ float s_left_scratch[MAX_THREADS_BLOCK];
__shared__ float s_right_scratch[MAX_THREADS_BLOCK];
// active interval of thread
// left and right limit of current interval
float left, right;
// number of threads smaller than the right limit (also corresponds to the
// global index of the eigenvalues contained in the active interval)
unsigned int right_count;
// flag if current thread converged
unsigned int converged = 0;
// midpoint when current interval is subdivided
float mid = 0.0f;
// number of eigenvalues less than mid
unsigned int mid_count = 0;
// read data from global memory
if( gtid < num_intervals) {
left = g_left[gtid];
right = g_right[gtid];
right_count = g_pos[gtid];
}
// flag to determine if all threads converged to eigenvalue
__shared__ unsigned int converged_all_threads;
// initialized shared flag
if( 0 == threadIdx.x) {
converged_all_threads = 0;
}
__syncthreads();
// process until all threads converged to an eigenvalue
// while( 0 == converged_all_threads) {
while( true) {
converged_all_threads = 1;
// update midpoint for all active threads
if((gtid < num_intervals) && ( 0 == converged)) {
mid = computeMidpoint( left, right);
}
// find number of eigenvalues that are smaller than midpoint
mid_count = computeNumSmallerEigenvalsLarge( g_d, g_s, n,
mid, gtid, num_intervals,
s_left_scratch,
s_right_scratch,
converged );
__syncthreads();
// for all active threads
if( (gtid < num_intervals) && ( 0 == converged)) {
// udpate intervals -- always one child interval survives
if( right_count == mid_count) {
right = mid;
}
else {
left = mid;
}
// check for convergence
float t0 = right - left;
float t1 = max( abs(right), abs(left)) * precision;
if( t0 < min( precision, t1)) {
float lambda = computeMidpoint( left, right);
left = lambda;
right = lambda;
converged = 1;
}
else {
converged_all_threads = 0;
}
}
__syncthreads();
if( 1 == converged_all_threads) {
break;
}
__syncthreads();
}
// write data back to global memory
__syncthreads();
if( gtid < num_intervals) {
// intervals converged so left and right interval limit are both identical
// and identical to the eigenvalue
g_left[gtid] = left;
}
}
#endif // #ifndef _BISECT_KERNEL_LARGE_ONEI_H_
| b261aa3ab069a54c2343f6a132514d94b30c8fd7.cu | /*
* Copyright 1993-2007 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws. Users and possessors of this source code
* are hereby granted a nonexclusive, royalty-free license to use this code
* in individual and commercial software.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*
* Any use of this source code in individual and commercial software must
* include, in the user documentation and internal comments to the code,
* the above Disclaimer and U.S. Government End Users Notice.
*/
/* Determine eigenvalues for large matrices for intervals that contained after
* the first step one eigenvalue
*/
#ifndef _BISECT_KERNEL_LARGE_ONEI_H_
#define _BISECT_KERNEL_LARGE_ONEI_H_
// includes, project
#include "config.h"
#include "util.h"
// additional kernel
#include "bisect_util.cu"
////////////////////////////////////////////////////////////////////////////////
//! Determine eigenvalues for large matrices for intervals that after
//! the first step contained one eigenvalue
//! @param g_d diagonal elements of symmetric, tridiagonal matrix
//! @param g_s superdiagonal elements of symmetric, tridiagonal matrix
//! @param n matrix size
//! @param num_intervals total number of intervals containing one eigenvalue
//! after the first step
//! @param g_left left interval limits
//! @param g_right right interval limits
//! @param g_pos index of interval / number of intervals that are smaller than
//! right interval limit
//! @param precision desired precision of eigenvalues
////////////////////////////////////////////////////////////////////////////////
__global__
void
bisectKernelLarge_OneIntervals( float* g_d, float* g_s, const unsigned int n,
unsigned int num_intervals,
float* g_left, float* g_right,
unsigned int* g_pos,
float precision )
{
const unsigned int gtid = (blockDim.x * blockIdx.x) + threadIdx.x;
__shared__ float s_left_scratch[MAX_THREADS_BLOCK];
__shared__ float s_right_scratch[MAX_THREADS_BLOCK];
// active interval of thread
// left and right limit of current interval
float left, right;
// number of threads smaller than the right limit (also corresponds to the
// global index of the eigenvalues contained in the active interval)
unsigned int right_count;
// flag if current thread converged
unsigned int converged = 0;
// midpoint when current interval is subdivided
float mid = 0.0f;
// number of eigenvalues less than mid
unsigned int mid_count = 0;
// read data from global memory
if( gtid < num_intervals) {
left = g_left[gtid];
right = g_right[gtid];
right_count = g_pos[gtid];
}
// flag to determine if all threads converged to eigenvalue
__shared__ unsigned int converged_all_threads;
// initialized shared flag
if( 0 == threadIdx.x) {
converged_all_threads = 0;
}
__syncthreads();
// process until all threads converged to an eigenvalue
// while( 0 == converged_all_threads) {
while( true) {
converged_all_threads = 1;
// update midpoint for all active threads
if((gtid < num_intervals) && ( 0 == converged)) {
mid = computeMidpoint( left, right);
}
// find number of eigenvalues that are smaller than midpoint
mid_count = computeNumSmallerEigenvalsLarge( g_d, g_s, n,
mid, gtid, num_intervals,
s_left_scratch,
s_right_scratch,
converged );
__syncthreads();
// for all active threads
if( (gtid < num_intervals) && ( 0 == converged)) {
// udpate intervals -- always one child interval survives
if( right_count == mid_count) {
right = mid;
}
else {
left = mid;
}
// check for convergence
float t0 = right - left;
float t1 = max( abs(right), abs(left)) * precision;
if( t0 < min( precision, t1)) {
float lambda = computeMidpoint( left, right);
left = lambda;
right = lambda;
converged = 1;
}
else {
converged_all_threads = 0;
}
}
__syncthreads();
if( 1 == converged_all_threads) {
break;
}
__syncthreads();
}
// write data back to global memory
__syncthreads();
if( gtid < num_intervals) {
// intervals converged so left and right interval limit are both identical
// and identical to the eigenvalue
g_left[gtid] = left;
}
}
#endif // #ifndef _BISECT_KERNEL_LARGE_ONEI_H_
|
9ef4d42b9bf35a57b8035d7291963df17774ab11.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2021 Facebook
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
// For an input tensor, computes the top k entries in each row
// (resp. vector along the last dimension). Thus,
// values.shape = indices.shape = input.shape[:-1] + [k]
void FFModel::top_k(const Tensor& input,
Tensor* outputs,
int k,
bool sorted,
const char *name)
{
TopK* topk = new TopK(*this, input, k, sorted, name);
layers.push_back(topk);
assert(topk->numOutputs == 2);
outputs[0] = topk->outputs[0];
outputs[1] = topk->outputs[1];
}
TopK::TopK(FFModel& model,
const Tensor& _input,
int _k, bool _sorted,
const char* name)
: Op(model, OP_TOPK, name, _input),
k(_k), sorted(_sorted)
{
numOutputs = 2;
outputs[0].numDim = inputs[0].numDim;
outputs[1].numDim = inputs[0].numDim;
outputs[0].adim[0] = k;
outputs[1].adim[0] = k;
for (int i = 1; i < inputs[0].numDim; i++) {
outputs[0].adim[i] = outputs[1].adim[i] = inputs[0].adim[i];
}
numWeights = 0;
}
void TopK::create_weights(FFModel& model)
{
// Do nothing
}
void TopK::create_output_and_partition(FFModel& model)
{
int dim = inputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
create_output_and_partition_with_dim<DIM>(model); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dim for ElementWiseBinary operator
assert(false);
}
}
}
template<int NDIM>
void TopK::create_output_and_partition_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = inputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
outputs[1] = model.create_tensor<NDIM>(dims, DT_INT32, this);
outputs[1].owner_op = this;
outputs[1].owner_idx = 1;
Rect<NDIM> input_rect;
input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[0], IndexSpaceT<NDIM>(task_is), input_lps[0], input_grad_lps[0]);
}
}
OpMeta* TopK::init_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
TopK* topk = (TopK*) task->args;
FFHandler handle = *((FFHandler*)task->local_args);
TopKMeta* m = new TopKMeta(handle);
m->profiling = topk->profiling;
m->sorted = topk->sorted;
return m;
}
void TopK::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
ParallelConfig pc; \
std::string pcname = name; \
ff.config.find_parallel_config(DIM, pcname, pc); \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(ELEMENTBINARY_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(TopK)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[1].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[1].region));
launcher.add_field(2, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
meta[idx++] = fm.get_result<OpMeta*>(*it); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
enum class HeapType { kMinHeap, kMaxHeap };
enum class PreferIndices { kLower, kHigher };
template <typename T>
struct Entry {
int index;
T value;
};
template <typename T>
struct LinearData {
typedef Entry<T> Entry;
__device__ Entry& operator[](std::size_t index) const { return data[index]; }
__device__ int get_index(int i) const { return data[i].index; }
__device__ T get_value(int i) const { return data[i].value; }
Entry* const data;
};
template <typename T>
struct IndirectLinearData {
typedef Entry<T> Entry;
__device__ Entry& operator[](std::size_t index) const { return data[index]; }
__device__ int get_index(int i) const {
return backing_data[data[i].index].index;
}
__device__ T get_value(int i) const { return data[i].value; }
Entry* const data;
Entry* const backing_data;
};
template <typename T>
struct StridedData {
typedef Entry<T> Entry;
__device__ Entry& operator[](std::size_t index) const {
return data[index * blockDim.x + threadIdx.x];
}
__device__ int get_index(int i) const { return (*this)[i].index; }
__device__ T get_value(int i) const { return (*this)[i].value; }
Entry* const data;
};
// A heap of Entry<T> that can either work as a min-heap or as a max-heap.
template <HeapType heapType, PreferIndices preferIndices,
template <typename> class Data, typename T>
struct IndexedHeap {
typedef typename Data<T>::Entry Entry;
const Data<T> data;
__device__ IndexedHeap(const Data<T>& d) : data(d) {}
__device__ bool is_above(int left, int right) {
T left_value = data.get_value(left);
T right_value = data.get_value(right);
if (left_value == right_value) {
if (preferIndices == PreferIndices::kLower) {
return data.get_index(left) < data.get_index(right);
} else {
return data.get_index(left) > data.get_index(right);
}
}
if (heapType == HeapType::kMinHeap) {
return left_value < right_value;
} else {
return left_value > right_value;
}
}
__device__ void assign(int i, const Entry& entry) { data[i] = entry; }
__device__ void push_up(int i) {
int child = i;
int parent;
for (; child > 0; child = parent) {
parent = (child - 1) / 2;
if (!is_above(child, parent)) {
// Heap property satisfied.
break;
}
swap(child, parent);
}
}
__device__ void swap(int a, int b) {
auto tmp = data[b];
data[b] = data[a];
data[a] = tmp;
}
__device__ void push_root_down(int k) { push_down(0, k); }
// MAX-HEAPIFY in Cormen
__device__ void push_down(int node, int k) {
while (true) {
const int left = 2 * node + 1;
const int right = left + 1;
int smallest = node;
if (left < k && is_above(left, smallest)) {
smallest = left;
}
if (right < k && is_above(right, smallest)) {
smallest = right;
}
if (smallest == node) {
break;
}
swap(smallest, node);
node = smallest;
}
}
// BUILD-MAX-HEAPIFY in Cormen
__device__ void build(int k) {
for (int node = (k - 1) / 2; node >= 0; node--) {
push_down(node, k);
}
}
// HEAP-EXTRACT-MAX in Cormen
__device__ void remove_root(int k) {
data[0] = data[k - 1];
push_root_down(k - 1);
}
// in-place HEAPSORT in Cormen
// This method destroys the heap property.
__device__ void sort(int k) {
for (int slot = k - 1; slot > 0; slot--) {
// This is like remove_root but we insert the element at the end.
swap(slot, 0);
// Heap is now an element smaller.
push_root_down(/*k=*/slot);
}
}
__device__ void replace_root(const Entry& entry, int k) {
data[0] = entry;
push_root_down(k);
}
__device__ const Entry& root() { return data[0]; }
};
template <HeapType heapType, PreferIndices preferIndices,
template <typename> class Data, typename T>
__device__ IndexedHeap<heapType, preferIndices, Data, T> make_indexed_heap(
typename Data<T>::Entry* data) {
return IndexedHeap<heapType, preferIndices, Data, T>{Data<T>{data}};
}
// heapTopK walks over [input, input+length) with `step_size` stride starting at
// `start_index`.
// It builds a top-`k` heap that is stored in `heap_entries` using `Accessor` to
// access elements in `heap_entries`. If sorted=true, the elements will be
// sorted at the end.
template <typename T, template <typename> class Data = LinearData>
__device__ void heapTopK(const T* __restrict__ input, int length, int k,
Entry<T>* __restrict__ heap_entries,
bool sorted = false, int start_index = 0,
int step_size = 1)
{
assert(k <= length);
auto heap =
make_indexed_heap<HeapType::kMinHeap, PreferIndices::kHigher, Data, T>(
heap_entries);
int heap_end_index = start_index + k * step_size;
if (heap_end_index > length) {
heap_end_index = length;
}
// Initialize the min-heap.
for (int index = start_index, slot = 0; index < heap_end_index;
index += step_size, slot++) {
heap.assign(slot, {index, input[index]});
}
heap.build(k);
// Now iterate over the remaining items.
// If an item is smaller than the min element, it is not amongst the top k.
// Otherwise, replace the min element with it and push upwards.
for (int index = heap_end_index; index < length; index += step_size) {
// We prefer elements with lower indices. This is given here.
// Later elements automatically have higher indices, so can be discarded.
if (input[index] > heap.root().value) {
// This element should replace the min.
heap.replace_root({index, input[index]}, k);
}
}
// Sort if wanted.
if (sorted) {
heap.sort(k);
}
}
// mergeShards performs a top-k merge on `num_shards` many sorted streams that
// are sorted and stored in `entries` in a strided way:
// |s_1 1st|s_2 1st|...s_{num_shards} 1st|s_1 2nd|s_2 2nd|...
// The overall top k elements are written to `top_k_values` and their indices
// to top_k_indices.
// `top_k_heap` is used as temporary storage for the merge heap.
template <typename T> __device__
void mergeShards(int num_shards, int k,
Entry<T>* __restrict__ entries,
Entry<T>* __restrict__ top_k_heap, T* top_k_values,
int* top_k_indices)
{
// If k < num_shards, we can use a min-heap with k elements to get the top k
// of the sorted blocks.
// If k > num_shards, we can initialize a min-heap with the top element from
// each sorted block.
const int heap_size = k < num_shards ? k : num_shards;
// Min-heap part.
{
auto min_heap = IndexedHeap<HeapType::kMinHeap, PreferIndices::kHigher,
IndirectLinearData, T>{
IndirectLinearData<T>{top_k_heap, entries}};
// Initialize the heap as a min-heap.
for (int slot = 0; slot < heap_size; slot++) {
min_heap.assign(slot, {slot, entries[slot].value});
}
min_heap.build(heap_size);
// Now perform top k with the remaining shards (if num_shards > heap_size).
for (int shard = heap_size; shard < num_shards; shard++) {
const auto entry = entries[shard];
const auto root = min_heap.root();
if (entry.value < root.value) {
continue;
}
if (entry.value == root.value &&
entry.index > entries[root.index].index) {
continue;
}
// This element should replace the min.
min_heap.replace_root({shard, entry.value}, heap_size);
}
}
// Max-part.
{
// Turn the min-heap into a max-heap in-place.
auto max_heap = IndexedHeap<HeapType::kMaxHeap, PreferIndices::kLower,
IndirectLinearData, T>{
IndirectLinearData<T>{top_k_heap, entries}};
// Heapify into a max heap.
max_heap.build(heap_size);
// Now extract the minimum k-1 times.
// k is treated specially.
const int last_k = k - 1;
for (int rank = 0; rank < last_k; rank++) {
const Entry<T>& max_element = max_heap.root();
top_k_values[rank] = max_element.value;
int shard_index = max_element.index;
top_k_indices[rank] = entries[shard_index].index;
int next_shard_index = shard_index + num_shards;
// For rank < k-1, each top k heap still contains at least 1 element,
// so we can draw a replacement.
max_heap.replace_root({next_shard_index, entries[next_shard_index].value},
heap_size);
}
// rank == last_k.
const Entry<T>& max_element = max_heap.root();
top_k_values[last_k] = max_element.value;
int shard_index = max_element.index;
top_k_indices[last_k] = entries[shard_index].index;
}
}
template <typename T>
__global__ void
topk_forward_kernel(const T* __restrict__ input,
size_t shared_memory_size,
int length, int k, bool sorted,
T* __restrict__ output,
int* __restrict__ indices)
{
__shared__ char shared_memory[48 << 10];
const int batch_index = blockIdx.x;
const T* batch_input = input + batch_index * length;
const int thread_index = threadIdx.x;
const int thread_count = blockDim.x;
Entry<T>* shared_entries = (Entry<T>*)shared_memory;
heapTopK<T, StridedData>(batch_input, length, k, shared_entries, true,
thread_index, thread_count);
__syncthreads();
if (thread_index == 0) {
const int offset = batch_index * k;
auto batch_output = output + offset;
auto batch_indices = indices + offset;
Entry<T>* top_k_heap = shared_entries + thread_count * k;
mergeShards(thread_count, k, shared_entries, top_k_heap, batch_output,
batch_indices);
}
}
/*static*/
void TopK::forward_kernel(const TopKMeta* m,
const float* input_ptr,
float* output_ptr,
int* indices_ptr,
size_t batch_size, int length, int k,
bool sorted)
{
// Adopted from TensorFlow's TopK implementation
// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/topk_op_gpu.h
int num_shards = 0;
{
constexpr auto shared_memory_size = 48 << 10;
const auto heap_size = k * sizeof(Entry<float>);
// shared_memory_size = (num_shards + 1) * heap_size <=>
num_shards = shared_memory_size / heap_size - 1;
assert(num_shards > 0);
if (num_shards > CUDA_NUM_THREADS)
num_shards = CUDA_NUM_THREADS;
}
// We are limited by the amount of shared memory we have per block.
size_t shared_memory_size = (num_shards + 1) * k * sizeof(Entry<float>);
size_t num_blocks = (batch_size + num_shards - 1) / num_shards;
hipLaunchKernelGGL(( topk_forward_kernel), dim3(num_blocks), dim3(num_shards), 0, 0,
input_ptr, shared_memory_size, length, k, sorted,
output_ptr, indices_ptr);
}
void TopK::forward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
//const TopK* topk = (const TopK*) task->args;
const TopKMeta* m = *((TopKMeta**)task->local_args);
Domain in1_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain out1_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain out2_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(out1_domain == out2_domain);
for (int i = 1; i < in1_domain.get_dim(); i++) {
assert(in1_domain.lo()[i] == out1_domain.lo()[i]);
assert(in1_domain.hi()[i] == out1_domain.hi()[i]);
}
const float* in_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
float* value_ptr = helperGetTensorPointerWO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
int* index_ptr = helperGetTensorPointerWO<int>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
hipEvent_t t_start, t_end;
if (m->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
int length = in1_domain.hi()[0] - in1_domain.lo()[0] + 1;
int k = out1_domain.hi()[0] - out1_domain.lo()[0] + 1;
size_t batch_size = in1_domain.get_volume() / length;
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
forward_kernel(m, in_ptr, value_ptr, index_ptr,
batch_size, length, k, m->sorted);
if (m->profiling) {
hipEventRecord(t_end);
checkCUDA(hipEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end));
hipEventDestroy(t_start);
hipEventDestroy(t_end);
}
}
void TopK::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(TOPK_FWD_TASK_ID, task_is,
TaskArgument(NULL, 0), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[1].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[1].region));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
template<typename T>
__global__ void
topk_backward_kernel(const T* __restrict__ value_grad_ptr,
const int* __restrict__ indices_ptr,
T* __restrict__ in_grad_ptr,
size_t batch_size, int length, int k)
{
coord_t size = (coord_t)batch_size * k;
CUDA_KERNEL_LOOP(i, size)
{
coord_t batch_idx = i / k;
coord_t src_offset = batch_idx * length + indices_ptr[i];
in_grad_ptr[src_offset] += value_grad_ptr[i];
}
}
/*static*/
void TopK::backward_kernel(const TopKMeta* m,
const float* value_grad_ptr,
const int* indices_ptr,
float* in_grad_ptr,
size_t batch_size, int length, int k)
{
hipLaunchKernelGGL(( assign_kernel), dim3(GET_BLOCKS(batch_size*length)), dim3(CUDA_NUM_THREADS), 0, 0,
in_grad_ptr, batch_size * length, 0.0f);
hipLaunchKernelGGL(( topk_backward_kernel), dim3(GET_BLOCKS(batch_size*k)), dim3(CUDA_NUM_THREADS), 0, 0,
value_grad_ptr, indices_ptr, in_grad_ptr, batch_size, length, k);
}
/*
regions[0](I): out1_grad
regions[1](I): out2
regions[2](I/0): in_grad
*/
void TopK::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
//const TopK* topk = (const TopK*) task->args;
const TopKMeta* m = *((TopKMeta**) task->local_args);
assert(regions.size() == 3);
Domain out1_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain out2_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(out1_domain == out2_domain);
for (int i = 1; i < in_domain.get_dim(); i++) {
assert(in_domain.lo()[i] == out1_domain.lo()[i]);
assert(in_domain.hi()[i] == out1_domain.hi()[i]);
}
const float* value_grad_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
const int* indices_ptr = helperGetTensorPointerRO<int>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
float* in_grad_ptr = helperGetTensorPointerRW<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
hipEvent_t t_start, t_end;
if (m->profiling) {
hipEventCreate(&t_start);
hipEventCreate(&t_end);
hipEventRecord(t_start);
}
int length = in_domain.hi()[0] - in_domain.lo()[0] + 1;
int k = out1_domain.hi()[0] - out1_domain.lo()[0] + 1;
size_t batch_size = in_domain.get_volume() / length;
#ifndef DISABLE_LEGION_CUDA_HIJACK
hipStream_t stream;
checkCUDA(hipStreamCreate(&stream));
checkCUDA(hipblasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
backward_kernel(m, value_grad_ptr, indices_ptr, in_grad_ptr,
batch_size, length, k);
}
void TopK::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(TOPK_BWD_TASK_ID, task_is,
TaskArgument(NULL, 0), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): value_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(0, FID_DATA);
// regions[1](I): indices
launcher.add_region_requirement(
RegionRequirement(outputs[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[1].region));
launcher.add_field(1, FID_DATA);
// regions[2](I/O): input_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
TopKMeta::TopKMeta(FFHandler handler)
: OpMeta(handler)
{
}
bool TopK::measure_operator_cost(Simulator* sim,
const ParallelConfig& pc,
CostMetrics& cost_metrics)
{
// To be implemented
assert(false);
return false;
}
| 9ef4d42b9bf35a57b8035d7291963df17774ab11.cu | /* Copyright 2021 Facebook
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "model.h"
#include "cuda_helper.h"
// For an input tensor, computes the top k entries in each row
// (resp. vector along the last dimension). Thus,
// values.shape = indices.shape = input.shape[:-1] + [k]
void FFModel::top_k(const Tensor& input,
Tensor* outputs,
int k,
bool sorted,
const char *name)
{
TopK* topk = new TopK(*this, input, k, sorted, name);
layers.push_back(topk);
assert(topk->numOutputs == 2);
outputs[0] = topk->outputs[0];
outputs[1] = topk->outputs[1];
}
TopK::TopK(FFModel& model,
const Tensor& _input,
int _k, bool _sorted,
const char* name)
: Op(model, OP_TOPK, name, _input),
k(_k), sorted(_sorted)
{
numOutputs = 2;
outputs[0].numDim = inputs[0].numDim;
outputs[1].numDim = inputs[0].numDim;
outputs[0].adim[0] = k;
outputs[1].adim[0] = k;
for (int i = 1; i < inputs[0].numDim; i++) {
outputs[0].adim[i] = outputs[1].adim[i] = inputs[0].adim[i];
}
numWeights = 0;
}
void TopK::create_weights(FFModel& model)
{
// Do nothing
}
void TopK::create_output_and_partition(FFModel& model)
{
int dim = inputs[0].numDim;
switch (dim) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
create_output_and_partition_with_dim<DIM>(model); \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
{
// Unsupported dim for ElementWiseBinary operator
assert(false);
}
}
}
template<int NDIM>
void TopK::create_output_and_partition_with_dim(FFModel& model)
{
// Retrive the task indexspace for the op
task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name));
Context ctx = model.config.lg_ctx;
Runtime* runtime = model.config.lg_hlr;
Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is);
int dims[NDIM];
for (int i = 0; i < NDIM; i++)
dims[i] = inputs[0].adim[NDIM-1-i];
outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this);
outputs[0].owner_op = this;
outputs[0].owner_idx = 0;
outputs[1] = model.create_tensor<NDIM>(dims, DT_INT32, this);
outputs[1].owner_op = this;
outputs[1].owner_idx = 1;
Rect<NDIM> input_rect;
input_rect = runtime->get_index_partition_color_space(
ctx, inputs[0].part.get_index_partition());
if (input_rect == part_rect) {
input_lps[0] = inputs[0].part;
input_grad_lps[0] = inputs[0].part_grad;
} else {
model.create_disjoint_partition<NDIM>(
inputs[0], IndexSpaceT<NDIM>(task_is), input_lps[0], input_grad_lps[0]);
}
}
OpMeta* TopK::init_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
TopK* topk = (TopK*) task->args;
FFHandler handle = *((FFHandler*)task->local_args);
TopKMeta* m = new TopKMeta(handle);
m->profiling = topk->profiling;
m->sorted = topk->sorted;
return m;
}
void TopK::init(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
ParallelConfig pc; \
std::string pcname = name; \
ff.config.find_parallel_config(DIM, pcname, pc); \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
FFHandler handle = ff.handlers[pc.device_ids[idx++]]; \
argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(ELEMENTBINARY_INIT_TASK_ID, task_is,
TaskArgument(this, sizeof(TopK)), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[1].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[1].region));
launcher.add_field(2, FID_DATA);
FutureMap fm = runtime->execute_index_space(ctx, launcher);
fm.wait_all_results();
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
meta[idx++] = fm.get_result<OpMeta*>(*it); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
}
enum class HeapType { kMinHeap, kMaxHeap };
enum class PreferIndices { kLower, kHigher };
template <typename T>
struct Entry {
int index;
T value;
};
template <typename T>
struct LinearData {
typedef Entry<T> Entry;
__device__ Entry& operator[](std::size_t index) const { return data[index]; }
__device__ int get_index(int i) const { return data[i].index; }
__device__ T get_value(int i) const { return data[i].value; }
Entry* const data;
};
template <typename T>
struct IndirectLinearData {
typedef Entry<T> Entry;
__device__ Entry& operator[](std::size_t index) const { return data[index]; }
__device__ int get_index(int i) const {
return backing_data[data[i].index].index;
}
__device__ T get_value(int i) const { return data[i].value; }
Entry* const data;
Entry* const backing_data;
};
template <typename T>
struct StridedData {
typedef Entry<T> Entry;
__device__ Entry& operator[](std::size_t index) const {
return data[index * blockDim.x + threadIdx.x];
}
__device__ int get_index(int i) const { return (*this)[i].index; }
__device__ T get_value(int i) const { return (*this)[i].value; }
Entry* const data;
};
// A heap of Entry<T> that can either work as a min-heap or as a max-heap.
template <HeapType heapType, PreferIndices preferIndices,
template <typename> class Data, typename T>
struct IndexedHeap {
typedef typename Data<T>::Entry Entry;
const Data<T> data;
__device__ IndexedHeap(const Data<T>& d) : data(d) {}
__device__ bool is_above(int left, int right) {
T left_value = data.get_value(left);
T right_value = data.get_value(right);
if (left_value == right_value) {
if (preferIndices == PreferIndices::kLower) {
return data.get_index(left) < data.get_index(right);
} else {
return data.get_index(left) > data.get_index(right);
}
}
if (heapType == HeapType::kMinHeap) {
return left_value < right_value;
} else {
return left_value > right_value;
}
}
__device__ void assign(int i, const Entry& entry) { data[i] = entry; }
__device__ void push_up(int i) {
int child = i;
int parent;
for (; child > 0; child = parent) {
parent = (child - 1) / 2;
if (!is_above(child, parent)) {
// Heap property satisfied.
break;
}
swap(child, parent);
}
}
__device__ void swap(int a, int b) {
auto tmp = data[b];
data[b] = data[a];
data[a] = tmp;
}
__device__ void push_root_down(int k) { push_down(0, k); }
// MAX-HEAPIFY in Cormen
__device__ void push_down(int node, int k) {
while (true) {
const int left = 2 * node + 1;
const int right = left + 1;
int smallest = node;
if (left < k && is_above(left, smallest)) {
smallest = left;
}
if (right < k && is_above(right, smallest)) {
smallest = right;
}
if (smallest == node) {
break;
}
swap(smallest, node);
node = smallest;
}
}
// BUILD-MAX-HEAPIFY in Cormen
__device__ void build(int k) {
for (int node = (k - 1) / 2; node >= 0; node--) {
push_down(node, k);
}
}
// HEAP-EXTRACT-MAX in Cormen
__device__ void remove_root(int k) {
data[0] = data[k - 1];
push_root_down(k - 1);
}
// in-place HEAPSORT in Cormen
// This method destroys the heap property.
__device__ void sort(int k) {
for (int slot = k - 1; slot > 0; slot--) {
// This is like remove_root but we insert the element at the end.
swap(slot, 0);
// Heap is now an element smaller.
push_root_down(/*k=*/slot);
}
}
__device__ void replace_root(const Entry& entry, int k) {
data[0] = entry;
push_root_down(k);
}
__device__ const Entry& root() { return data[0]; }
};
template <HeapType heapType, PreferIndices preferIndices,
template <typename> class Data, typename T>
__device__ IndexedHeap<heapType, preferIndices, Data, T> make_indexed_heap(
typename Data<T>::Entry* data) {
return IndexedHeap<heapType, preferIndices, Data, T>{Data<T>{data}};
}
// heapTopK walks over [input, input+length) with `step_size` stride starting at
// `start_index`.
// It builds a top-`k` heap that is stored in `heap_entries` using `Accessor` to
// access elements in `heap_entries`. If sorted=true, the elements will be
// sorted at the end.
template <typename T, template <typename> class Data = LinearData>
__device__ void heapTopK(const T* __restrict__ input, int length, int k,
Entry<T>* __restrict__ heap_entries,
bool sorted = false, int start_index = 0,
int step_size = 1)
{
assert(k <= length);
auto heap =
make_indexed_heap<HeapType::kMinHeap, PreferIndices::kHigher, Data, T>(
heap_entries);
int heap_end_index = start_index + k * step_size;
if (heap_end_index > length) {
heap_end_index = length;
}
// Initialize the min-heap.
for (int index = start_index, slot = 0; index < heap_end_index;
index += step_size, slot++) {
heap.assign(slot, {index, input[index]});
}
heap.build(k);
// Now iterate over the remaining items.
// If an item is smaller than the min element, it is not amongst the top k.
// Otherwise, replace the min element with it and push upwards.
for (int index = heap_end_index; index < length; index += step_size) {
// We prefer elements with lower indices. This is given here.
// Later elements automatically have higher indices, so can be discarded.
if (input[index] > heap.root().value) {
// This element should replace the min.
heap.replace_root({index, input[index]}, k);
}
}
// Sort if wanted.
if (sorted) {
heap.sort(k);
}
}
// mergeShards performs a top-k merge on `num_shards` many sorted streams that
// are sorted and stored in `entries` in a strided way:
// |s_1 1st|s_2 1st|...s_{num_shards} 1st|s_1 2nd|s_2 2nd|...
// The overall top k elements are written to `top_k_values` and their indices
// to top_k_indices.
// `top_k_heap` is used as temporary storage for the merge heap.
template <typename T> __device__
void mergeShards(int num_shards, int k,
Entry<T>* __restrict__ entries,
Entry<T>* __restrict__ top_k_heap, T* top_k_values,
int* top_k_indices)
{
// If k < num_shards, we can use a min-heap with k elements to get the top k
// of the sorted blocks.
// If k > num_shards, we can initialize a min-heap with the top element from
// each sorted block.
const int heap_size = k < num_shards ? k : num_shards;
// Min-heap part.
{
auto min_heap = IndexedHeap<HeapType::kMinHeap, PreferIndices::kHigher,
IndirectLinearData, T>{
IndirectLinearData<T>{top_k_heap, entries}};
// Initialize the heap as a min-heap.
for (int slot = 0; slot < heap_size; slot++) {
min_heap.assign(slot, {slot, entries[slot].value});
}
min_heap.build(heap_size);
// Now perform top k with the remaining shards (if num_shards > heap_size).
for (int shard = heap_size; shard < num_shards; shard++) {
const auto entry = entries[shard];
const auto root = min_heap.root();
if (entry.value < root.value) {
continue;
}
if (entry.value == root.value &&
entry.index > entries[root.index].index) {
continue;
}
// This element should replace the min.
min_heap.replace_root({shard, entry.value}, heap_size);
}
}
// Max-part.
{
// Turn the min-heap into a max-heap in-place.
auto max_heap = IndexedHeap<HeapType::kMaxHeap, PreferIndices::kLower,
IndirectLinearData, T>{
IndirectLinearData<T>{top_k_heap, entries}};
// Heapify into a max heap.
max_heap.build(heap_size);
// Now extract the minimum k-1 times.
// k is treated specially.
const int last_k = k - 1;
for (int rank = 0; rank < last_k; rank++) {
const Entry<T>& max_element = max_heap.root();
top_k_values[rank] = max_element.value;
int shard_index = max_element.index;
top_k_indices[rank] = entries[shard_index].index;
int next_shard_index = shard_index + num_shards;
// For rank < k-1, each top k heap still contains at least 1 element,
// so we can draw a replacement.
max_heap.replace_root({next_shard_index, entries[next_shard_index].value},
heap_size);
}
// rank == last_k.
const Entry<T>& max_element = max_heap.root();
top_k_values[last_k] = max_element.value;
int shard_index = max_element.index;
top_k_indices[last_k] = entries[shard_index].index;
}
}
template <typename T>
__global__ void
topk_forward_kernel(const T* __restrict__ input,
size_t shared_memory_size,
int length, int k, bool sorted,
T* __restrict__ output,
int* __restrict__ indices)
{
__shared__ char shared_memory[48 << 10];
const int batch_index = blockIdx.x;
const T* batch_input = input + batch_index * length;
const int thread_index = threadIdx.x;
const int thread_count = blockDim.x;
Entry<T>* shared_entries = (Entry<T>*)shared_memory;
heapTopK<T, StridedData>(batch_input, length, k, shared_entries, true,
thread_index, thread_count);
__syncthreads();
if (thread_index == 0) {
const int offset = batch_index * k;
auto batch_output = output + offset;
auto batch_indices = indices + offset;
Entry<T>* top_k_heap = shared_entries + thread_count * k;
mergeShards(thread_count, k, shared_entries, top_k_heap, batch_output,
batch_indices);
}
}
/*static*/
void TopK::forward_kernel(const TopKMeta* m,
const float* input_ptr,
float* output_ptr,
int* indices_ptr,
size_t batch_size, int length, int k,
bool sorted)
{
// Adopted from TensorFlow's TopK implementation
// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/topk_op_gpu.h
int num_shards = 0;
{
constexpr auto shared_memory_size = 48 << 10;
const auto heap_size = k * sizeof(Entry<float>);
// shared_memory_size = (num_shards + 1) * heap_size <=>
num_shards = shared_memory_size / heap_size - 1;
assert(num_shards > 0);
if (num_shards > CUDA_NUM_THREADS)
num_shards = CUDA_NUM_THREADS;
}
// We are limited by the amount of shared memory we have per block.
size_t shared_memory_size = (num_shards + 1) * k * sizeof(Entry<float>);
size_t num_blocks = (batch_size + num_shards - 1) / num_shards;
topk_forward_kernel<<<num_blocks, num_shards>>>(
input_ptr, shared_memory_size, length, k, sorted,
output_ptr, indices_ptr);
}
void TopK::forward_task(const Task* task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
assert(regions.size() == 3);
assert(task->regions.size() == 3);
//const TopK* topk = (const TopK*) task->args;
const TopKMeta* m = *((TopKMeta**)task->local_args);
Domain in1_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain out1_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain out2_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(out1_domain == out2_domain);
for (int i = 1; i < in1_domain.get_dim(); i++) {
assert(in1_domain.lo()[i] == out1_domain.lo()[i]);
assert(in1_domain.hi()[i] == out1_domain.hi()[i]);
}
const float* in_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
float* value_ptr = helperGetTensorPointerWO<float>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
int* index_ptr = helperGetTensorPointerWO<int>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
cudaEvent_t t_start, t_end;
if (m->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
int length = in1_domain.hi()[0] - in1_domain.lo()[0] + 1;
int k = out1_domain.hi()[0] - out1_domain.lo()[0] + 1;
size_t batch_size = in1_domain.get_volume() / length;
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
forward_kernel(m, in_ptr, value_ptr, index_ptr,
batch_size, length, k, m->sorted);
if (m->profiling) {
cudaEventRecord(t_end);
checkCUDA(cudaEventSynchronize(t_end));
float elapsed = 0;
checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end));
cudaEventDestroy(t_start);
cudaEventDestroy(t_end);
}
}
void TopK::forward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(TOPK_FWD_TASK_ID, task_is,
TaskArgument(NULL, 0), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
launcher.add_region_requirement(
RegionRequirement(input_lps[0], 0/*projection id*/,
READ_ONLY, EXCLUSIVE, inputs[0].region));
launcher.add_field(0, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[0].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[0].region));
launcher.add_field(1, FID_DATA);
launcher.add_region_requirement(
RegionRequirement(outputs[1].part, 0/*projection id*/,
WRITE_ONLY, EXCLUSIVE, outputs[1].region));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
template<typename T>
__global__ void
topk_backward_kernel(const T* __restrict__ value_grad_ptr,
const int* __restrict__ indices_ptr,
T* __restrict__ in_grad_ptr,
size_t batch_size, int length, int k)
{
coord_t size = (coord_t)batch_size * k;
CUDA_KERNEL_LOOP(i, size)
{
coord_t batch_idx = i / k;
coord_t src_offset = batch_idx * length + indices_ptr[i];
in_grad_ptr[src_offset] += value_grad_ptr[i];
}
}
/*static*/
void TopK::backward_kernel(const TopKMeta* m,
const float* value_grad_ptr,
const int* indices_ptr,
float* in_grad_ptr,
size_t batch_size, int length, int k)
{
assign_kernel<<<GET_BLOCKS(batch_size*length), CUDA_NUM_THREADS>>>(
in_grad_ptr, batch_size * length, 0.0f);
topk_backward_kernel<<<GET_BLOCKS(batch_size*k), CUDA_NUM_THREADS>>>(
value_grad_ptr, indices_ptr, in_grad_ptr, batch_size, length, k);
}
/*
regions[0](I): out1_grad
regions[1](I): out2
regions[2](I/0): in_grad
*/
void TopK::backward_task(const Task *task,
const std::vector<PhysicalRegion> ®ions,
Context ctx, Runtime* runtime)
{
//const TopK* topk = (const TopK*) task->args;
const TopKMeta* m = *((TopKMeta**) task->local_args);
assert(regions.size() == 3);
Domain out1_domain = runtime->get_index_space_domain(
ctx, task->regions[0].region.get_index_space());
Domain out2_domain = runtime->get_index_space_domain(
ctx, task->regions[1].region.get_index_space());
Domain in_domain = runtime->get_index_space_domain(
ctx, task->regions[2].region.get_index_space());
assert(out1_domain == out2_domain);
for (int i = 1; i < in_domain.get_dim(); i++) {
assert(in_domain.lo()[i] == out1_domain.lo()[i]);
assert(in_domain.hi()[i] == out1_domain.hi()[i]);
}
const float* value_grad_ptr = helperGetTensorPointerRO<float>(
regions[0], task->regions[0], FID_DATA, ctx, runtime);
const int* indices_ptr = helperGetTensorPointerRO<int>(
regions[1], task->regions[1], FID_DATA, ctx, runtime);
float* in_grad_ptr = helperGetTensorPointerRW<float>(
regions[2], task->regions[2], FID_DATA, ctx, runtime);
cudaEvent_t t_start, t_end;
if (m->profiling) {
cudaEventCreate(&t_start);
cudaEventCreate(&t_end);
cudaEventRecord(t_start);
}
int length = in_domain.hi()[0] - in_domain.lo()[0] + 1;
int k = out1_domain.hi()[0] - out1_domain.lo()[0] + 1;
size_t batch_size = in_domain.get_volume() / length;
#ifndef DISABLE_LEGION_CUDA_HIJACK
cudaStream_t stream;
checkCUDA(cudaStreamCreate(&stream));
checkCUDA(cublasSetStream(m->handle.blas, stream));
checkCUDNN(cudnnSetStream(m->handle.dnn, stream));
#endif
backward_kernel(m, value_grad_ptr, indices_ptr, in_grad_ptr,
batch_size, length, k);
}
void TopK::backward(const FFModel& ff)
{
ArgumentMap argmap;
Context ctx = ff.config.lg_ctx;
Runtime* runtime = ff.config.lg_hlr;
Domain domain = runtime->get_index_space_domain(ctx, task_is);
switch (domain.get_dim()) {
#define DIMFUNC(DIM) \
case DIM: \
{ \
Rect<DIM> rect = domain; \
int idx = 0; \
for (PointInRectIterator<DIM> it(rect); it(); it++) { \
OpMeta* mp = meta[idx++]; \
argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \
} \
break; \
}
LEGION_FOREACH_N(DIMFUNC)
#undef DIMFUNC
default:
assert(false);
}
IndexLauncher launcher(TOPK_BWD_TASK_ID, task_is,
TaskArgument(NULL, 0), argmap,
Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/,
FFConfig::get_hash_id(std::string(name)));
// regions[0](I): value_grad
launcher.add_region_requirement(
RegionRequirement(outputs[0].part_grad, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[0].region_grad));
launcher.add_field(0, FID_DATA);
// regions[1](I): indices
launcher.add_region_requirement(
RegionRequirement(outputs[1].part, 0/*projection id*/,
READ_ONLY, EXCLUSIVE, outputs[1].region));
launcher.add_field(1, FID_DATA);
// regions[2](I/O): input_grad
launcher.add_region_requirement(
RegionRequirement(input_grad_lps[0], 0/*projection id*/,
READ_WRITE, EXCLUSIVE, inputs[0].region_grad));
launcher.add_field(2, FID_DATA);
runtime->execute_index_space(ctx, launcher);
}
TopKMeta::TopKMeta(FFHandler handler)
: OpMeta(handler)
{
}
bool TopK::measure_operator_cost(Simulator* sim,
const ParallelConfig& pc,
CostMetrics& cost_metrics)
{
// To be implemented
assert(false);
return false;
}
|
c7666a2259794a24333b9395029a8bc47bc88d7c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <metrics/rand_index.cuh>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
//parameter structure definition
struct randIndexParam {
uint64_t nElements;
int lowerLabelRange;
int upperLabelRange;
double tolerance;
};
//test fixture class
template <typename T>
class randIndexTest : public ::testing::TestWithParam<randIndexParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<randIndexParam>::GetParam();
size = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
//generating random value test input
std::vector<int> arr1(size, 0);
std::vector<int> arr2(size, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
//generating the golden output
int64_t a_truth = 0, b_truth = 0, iter = 0, jiter;
for (; iter < size; ++iter) {
for (jiter = 0; jiter < iter; ++jiter) {
if (arr1[iter] == arr1[jiter] && arr2[iter] == arr2[jiter]) {
++a_truth;
} else if (arr1[iter] != arr1[jiter] && arr2[iter] != arr2[jiter]) {
++b_truth;
}
}
}
uint64_t nChooseTwo = (size * (size - 1)) / 2;
truthRandIndex =
(double)(((double)(a_truth + b_truth)) / (double)nChooseTwo);
//allocating and initializing memory to the GPU
CUDA_CHECK(hipStreamCreate(&stream));
raft::allocate(firstClusterArray, size, true);
raft::allocate(secondClusterArray, size, true);
raft::update_device(firstClusterArray, &arr1[0], (int)size, stream);
raft::update_device(secondClusterArray, &arr2[0], (int)size, stream);
std::shared_ptr<MLCommon::deviceAllocator> allocator(
new raft::mr::device::default_allocator);
//calling the rand_index CUDA implementation
computedRandIndex = MLCommon::Metrics::compute_rand_index(
firstClusterArray, secondClusterArray, size, allocator, stream);
}
//the destructor
void TearDown() override {
CUDA_CHECK(hipFree(firstClusterArray));
CUDA_CHECK(hipFree(secondClusterArray));
CUDA_CHECK(hipStreamDestroy(stream));
}
//declaring the data values
randIndexParam params;
int lowerLabelRange = 0, upperLabelRange = 2;
T* firstClusterArray = nullptr;
T* secondClusterArray = nullptr;
uint64_t size = 0;
double truthRandIndex = 0;
double computedRandIndex = 0;
hipStream_t stream;
};
//setting test parameter values
const std::vector<randIndexParam> inputs = {
{199, 1, 10, 0.000001}, {200, 1, 100, 0.000001}, {10, 1, 1200, 0.000001},
{100, 1, 10000, 0.000001}, {198, 1, 100, 0.000001}, {300, 3, 99, 0.000001},
{2, 0, 0, 0.00001}};
//writing the test suite
typedef randIndexTest<int> randIndexTestClass;
TEST_P(randIndexTestClass, Result) {
ASSERT_NEAR(computedRandIndex, truthRandIndex, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(randIndex, randIndexTestClass,
::testing::ValuesIn(inputs));
} //end namespace Metrics
} //end namespace MLCommon
| c7666a2259794a24333b9395029a8bc47bc88d7c.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <metrics/rand_index.cuh>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
//parameter structure definition
struct randIndexParam {
uint64_t nElements;
int lowerLabelRange;
int upperLabelRange;
double tolerance;
};
//test fixture class
template <typename T>
class randIndexTest : public ::testing::TestWithParam<randIndexParam> {
protected:
//the constructor
void SetUp() override {
//getting the parameters
params = ::testing::TestWithParam<randIndexParam>::GetParam();
size = params.nElements;
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
//generating random value test input
std::vector<int> arr1(size, 0);
std::vector<int> arr2(size, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
//generating the golden output
int64_t a_truth = 0, b_truth = 0, iter = 0, jiter;
for (; iter < size; ++iter) {
for (jiter = 0; jiter < iter; ++jiter) {
if (arr1[iter] == arr1[jiter] && arr2[iter] == arr2[jiter]) {
++a_truth;
} else if (arr1[iter] != arr1[jiter] && arr2[iter] != arr2[jiter]) {
++b_truth;
}
}
}
uint64_t nChooseTwo = (size * (size - 1)) / 2;
truthRandIndex =
(double)(((double)(a_truth + b_truth)) / (double)nChooseTwo);
//allocating and initializing memory to the GPU
CUDA_CHECK(cudaStreamCreate(&stream));
raft::allocate(firstClusterArray, size, true);
raft::allocate(secondClusterArray, size, true);
raft::update_device(firstClusterArray, &arr1[0], (int)size, stream);
raft::update_device(secondClusterArray, &arr2[0], (int)size, stream);
std::shared_ptr<MLCommon::deviceAllocator> allocator(
new raft::mr::device::default_allocator);
//calling the rand_index CUDA implementation
computedRandIndex = MLCommon::Metrics::compute_rand_index(
firstClusterArray, secondClusterArray, size, allocator, stream);
}
//the destructor
void TearDown() override {
CUDA_CHECK(cudaFree(firstClusterArray));
CUDA_CHECK(cudaFree(secondClusterArray));
CUDA_CHECK(cudaStreamDestroy(stream));
}
//declaring the data values
randIndexParam params;
int lowerLabelRange = 0, upperLabelRange = 2;
T* firstClusterArray = nullptr;
T* secondClusterArray = nullptr;
uint64_t size = 0;
double truthRandIndex = 0;
double computedRandIndex = 0;
cudaStream_t stream;
};
//setting test parameter values
const std::vector<randIndexParam> inputs = {
{199, 1, 10, 0.000001}, {200, 1, 100, 0.000001}, {10, 1, 1200, 0.000001},
{100, 1, 10000, 0.000001}, {198, 1, 100, 0.000001}, {300, 3, 99, 0.000001},
{2, 0, 0, 0.00001}};
//writing the test suite
typedef randIndexTest<int> randIndexTestClass;
TEST_P(randIndexTestClass, Result) {
ASSERT_NEAR(computedRandIndex, truthRandIndex, params.tolerance);
}
INSTANTIATE_TEST_CASE_P(randIndex, randIndexTestClass,
::testing::ValuesIn(inputs));
} //end namespace Metrics
} //end namespace MLCommon
|
6789133f72fc2f0a91f28c422a4db10d7c3152b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
void cVecAdd(float *A, float *B, float *C)
{
for(long long i=0; i < (4096 * 16); ++i)
{
C[i] = A[i] + B[i];
}
}
__global__ void VecAdd(float *A, float *B, float *C)
{
long long i = threadIdx.x + blockIdx.x * blockDim.x;
C[i] = A[i] + B[i];
}
int main()
{
const long long N = 4096 * 16;
dim3 NumberOfThreadsPerBlock (256, 8, 1);
dim3 NumberOfBlocks ((N / NumberOfThreadsPerBlock.x),
(N / NumberOfThreadsPerBlock.y),
1);
//printf("Number of blocks %d ", NumberOfBlocks);
float A[N];
float B[N];
float C[N];
float *D_A, *D_B, *D_C;
clock_t start, end, cstart, cend;
double elapsed, celapsed;
size_t memSize = N * sizeof(float);
hipMalloc( (void**) &D_A, memSize);
hipMalloc( (void**) &D_B, memSize);
hipMalloc( (void**) &D_C, memSize);
for(long long i=0; i < N; ++i)
{
A[i] = i;
B[i] = i * 2.0;
C[i] = 0;
}
hipMemcpy(D_A, A, memSize, hipMemcpyHostToDevice);
hipMemcpy(D_B, B, memSize, hipMemcpyHostToDevice);
hipMemcpy(D_C, C, memSize, hipMemcpyHostToDevice);
hipDeviceSynchronize();
start = clock();
hipLaunchKernelGGL(( VecAdd), dim3(NumberOfBlocks), dim3(NumberOfThreadsPerBlock), 0, 0, D_A, D_B, D_C);
hipDeviceSynchronize();
hipMemcpy(C, D_C, memSize, hipMemcpyDeviceToHost);
end = clock();
elapsed = ((double)(end-start)) / CLOCKS_PER_SEC;
cstart = clock();
cVecAdd(A, B, C);
cend = clock();
celapsed = ((double)(cend - cstart)) / CLOCKS_PER_SEC;
printf("Time elapsed %f \n", elapsed);
printf("Time celapsed %f \n", celapsed);
hipFree(D_A);
hipFree(D_B);
hipFree(D_C);
}
| 6789133f72fc2f0a91f28c422a4db10d7c3152b5.cu | #include <stdio.h>
#include <time.h>
void cVecAdd(float *A, float *B, float *C)
{
for(long long i=0; i < (4096 * 16); ++i)
{
C[i] = A[i] + B[i];
}
}
__global__ void VecAdd(float *A, float *B, float *C)
{
long long i = threadIdx.x + blockIdx.x * blockDim.x;
C[i] = A[i] + B[i];
}
int main()
{
const long long N = 4096 * 16;
dim3 NumberOfThreadsPerBlock (256, 8, 1);
dim3 NumberOfBlocks ((N / NumberOfThreadsPerBlock.x),
(N / NumberOfThreadsPerBlock.y),
1);
//printf("Number of blocks %d ", NumberOfBlocks);
float A[N];
float B[N];
float C[N];
float *D_A, *D_B, *D_C;
clock_t start, end, cstart, cend;
double elapsed, celapsed;
size_t memSize = N * sizeof(float);
cudaMalloc( (void**) &D_A, memSize);
cudaMalloc( (void**) &D_B, memSize);
cudaMalloc( (void**) &D_C, memSize);
for(long long i=0; i < N; ++i)
{
A[i] = i;
B[i] = i * 2.0;
C[i] = 0;
}
cudaMemcpy(D_A, A, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(D_B, B, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(D_C, C, memSize, cudaMemcpyHostToDevice);
cudaThreadSynchronize();
start = clock();
VecAdd<<<NumberOfBlocks, NumberOfThreadsPerBlock>>>(D_A, D_B, D_C);
cudaThreadSynchronize();
cudaMemcpy(C, D_C, memSize, cudaMemcpyDeviceToHost);
end = clock();
elapsed = ((double)(end-start)) / CLOCKS_PER_SEC;
cstart = clock();
cVecAdd(A, B, C);
cend = clock();
celapsed = ((double)(cend - cstart)) / CLOCKS_PER_SEC;
printf("Time elapsed %f \n", elapsed);
printf("Time celapsed %f \n", celapsed);
cudaFree(D_A);
cudaFree(D_B);
cudaFree(D_C);
}
|
9c3e24047f498cdb92aa6de0e6731a45b9c59e90.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// System includes.
#include <stdio.h>
#include <iostream>
// STL.
#include <vector>
// CUDA runtime.
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA.
#include <helper_functions.h>
#include <helper_cuda.h>
// Device library includes.
#include "simpleDeviceLibrary.cuh"
using std::cout;
using std::endl;
using std::vector;
#define EPS 1e-5
typedef unsigned int uint;
typedef float(*deviceFunc)(float);
const char *sampleName = "simpleSeparateCompilation";
////////////////////////////////////////////////////////////////////////////////
// Auto-Verification Code
bool testResult = true;
////////////////////////////////////////////////////////////////////////////////
// Static device pointers to __device__ functions.
__device__ deviceFunc dMultiplyByTwoPtr = multiplyByTwo;
__device__ deviceFunc dDivideByTwoPtr = divideByTwo;
////////////////////////////////////////////////////////////////////////////////
// Kernels
////////////////////////////////////////////////////////////////////////////////
//! Transforms vector.
//! Applies the __device__ function "f" to each element of the vector "v".
////////////////////////////////////////////////////////////////////////////////
__global__ void transformVector(float *v, deviceFunc f, uint size)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size)
{
v[tid] = (*f)(v[tid]);
}
}
////////////////////////////////////////////////////////////////////////////////
// Declaration, forward
void runTest(int argc, const char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
cout << sampleName << " starting..." << endl;
runTest(argc, (const char **)argv);
cout << sampleName << " completed, returned "
<< (testResult ? "OK" : "ERROR") << endl;
exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
void runTest(int argc, const char **argv)
{
try
{
int devID;
//hipError_t error;
hipDeviceProp_t deviceProp;
// This will pick the best possible CUDA capable device.
devID = findCudaDevice(argc, (const char **) argv);
checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID));
if (deviceProp.major < 2)
{
cout << sampleName
<< " requires a GPU with compute capability "
<< "2.0 or later, exiting..." << endl;
hipDeviceReset();
exit(EXIT_SUCCESS);
}
// Create host vector.
const uint kVectorSize = 1000;
vector<float> hVector(kVectorSize);
for (uint i = 0; i < kVectorSize; ++i)
{
hVector[i] = rand() / static_cast<float>(RAND_MAX);
}
// Create and populate device vector.
float *dVector;
checkCudaErrors(hipMalloc(&dVector, kVectorSize * sizeof(float)));
checkCudaErrors(hipMemcpy(dVector,
&hVector[0],
kVectorSize * sizeof(float),
hipMemcpyHostToDevice));
// Kernel configuration, where a one-dimensional
// grid and one-dimensional blocks are configured.
const int nThreads = 1024;
const int nBlocks = 1;
dim3 dimGrid(nBlocks);
dim3 dimBlock(nThreads);
// Test library functions.
deviceFunc hFunctionPtr;
hipMemcpyFromSymbol(&hFunctionPtr,
dMultiplyByTwoPtr,
sizeof(deviceFunc));
hipLaunchKernelGGL(( transformVector), dim3(dimGrid), dim3(dimBlock), 0, 0,
dVector, hFunctionPtr, kVectorSize);
checkCudaErrors(hipGetLastError());
hipMemcpyFromSymbol(&hFunctionPtr,
dDivideByTwoPtr,
sizeof(deviceFunc));
hipLaunchKernelGGL(( transformVector), dim3(dimGrid), dim3(dimBlock), 0, 0,
dVector, hFunctionPtr, kVectorSize);
checkCudaErrors(hipGetLastError());
// Download results.
vector<float> hResultVector(kVectorSize);
checkCudaErrors(hipMemcpy(&hResultVector[0],
dVector,
kVectorSize *sizeof(float),
hipMemcpyDeviceToHost));
// Check results.
for (int i = 0; i < kVectorSize; ++i)
{
if (fabs(hVector[i] - hResultVector[i]) > EPS)
{
cout << "Computations were incorrect..." << endl;
testResult = false;
break;
}
}
// Free resources.
if (dVector) checkCudaErrors(hipFree(dVector));
checkCudaErrors(hipDeviceReset());
}
catch (...)
{
cout << "Error occured, exiting..." << endl;
hipDeviceReset();
exit(EXIT_FAILURE);
}
}
| 9c3e24047f498cdb92aa6de0e6731a45b9c59e90.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// System includes.
#include <stdio.h>
#include <iostream>
// STL.
#include <vector>
// CUDA runtime.
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA.
#include <helper_functions.h>
#include <helper_cuda.h>
// Device library includes.
#include "simpleDeviceLibrary.cuh"
using std::cout;
using std::endl;
using std::vector;
#define EPS 1e-5
typedef unsigned int uint;
typedef float(*deviceFunc)(float);
const char *sampleName = "simpleSeparateCompilation";
////////////////////////////////////////////////////////////////////////////////
// Auto-Verification Code
bool testResult = true;
////////////////////////////////////////////////////////////////////////////////
// Static device pointers to __device__ functions.
__device__ deviceFunc dMultiplyByTwoPtr = multiplyByTwo;
__device__ deviceFunc dDivideByTwoPtr = divideByTwo;
////////////////////////////////////////////////////////////////////////////////
// Kernels
////////////////////////////////////////////////////////////////////////////////
//! Transforms vector.
//! Applies the __device__ function "f" to each element of the vector "v".
////////////////////////////////////////////////////////////////////////////////
__global__ void transformVector(float *v, deviceFunc f, uint size)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < size)
{
v[tid] = (*f)(v[tid]);
}
}
////////////////////////////////////////////////////////////////////////////////
// Declaration, forward
void runTest(int argc, const char **argv);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
cout << sampleName << " starting..." << endl;
runTest(argc, (const char **)argv);
cout << sampleName << " completed, returned "
<< (testResult ? "OK" : "ERROR") << endl;
exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE);
}
void runTest(int argc, const char **argv)
{
try
{
int devID;
//cudaError_t error;
cudaDeviceProp deviceProp;
// This will pick the best possible CUDA capable device.
devID = findCudaDevice(argc, (const char **) argv);
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID));
if (deviceProp.major < 2)
{
cout << sampleName
<< " requires a GPU with compute capability "
<< "2.0 or later, exiting..." << endl;
cudaDeviceReset();
exit(EXIT_SUCCESS);
}
// Create host vector.
const uint kVectorSize = 1000;
vector<float> hVector(kVectorSize);
for (uint i = 0; i < kVectorSize; ++i)
{
hVector[i] = rand() / static_cast<float>(RAND_MAX);
}
// Create and populate device vector.
float *dVector;
checkCudaErrors(cudaMalloc(&dVector, kVectorSize * sizeof(float)));
checkCudaErrors(cudaMemcpy(dVector,
&hVector[0],
kVectorSize * sizeof(float),
cudaMemcpyHostToDevice));
// Kernel configuration, where a one-dimensional
// grid and one-dimensional blocks are configured.
const int nThreads = 1024;
const int nBlocks = 1;
dim3 dimGrid(nBlocks);
dim3 dimBlock(nThreads);
// Test library functions.
deviceFunc hFunctionPtr;
cudaMemcpyFromSymbol(&hFunctionPtr,
dMultiplyByTwoPtr,
sizeof(deviceFunc));
transformVector<<<dimGrid, dimBlock>>>
(dVector, hFunctionPtr, kVectorSize);
checkCudaErrors(cudaGetLastError());
cudaMemcpyFromSymbol(&hFunctionPtr,
dDivideByTwoPtr,
sizeof(deviceFunc));
transformVector<<<dimGrid, dimBlock>>>
(dVector, hFunctionPtr, kVectorSize);
checkCudaErrors(cudaGetLastError());
// Download results.
vector<float> hResultVector(kVectorSize);
checkCudaErrors(cudaMemcpy(&hResultVector[0],
dVector,
kVectorSize *sizeof(float),
cudaMemcpyDeviceToHost));
// Check results.
for (int i = 0; i < kVectorSize; ++i)
{
if (fabs(hVector[i] - hResultVector[i]) > EPS)
{
cout << "Computations were incorrect..." << endl;
testResult = false;
break;
}
}
// Free resources.
if (dVector) checkCudaErrors(cudaFree(dVector));
checkCudaErrors(cudaDeviceReset());
}
catch (...)
{
cout << "Error occured, exiting..." << endl;
cudaDeviceReset();
exit(EXIT_FAILURE);
}
}
|
119efd88f70c6281f77254b18bff0fb48d076737.hip | // !!! This is a file automatically generated by hipify!!!
#define TILE_WIDTH 100
#include <iostream>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
//cuda kernel
__global__ void sharedMem_transpose_pad(float* M, float* R, int dim1, int dim2)
{
// fill data into shared memory
__shared__ float M_Shared[TILE_WIDTH][TILE_WIDTH + 1];
int ix, iy, index_in;
int i_row, i_col, _id_index, out_ix, out_iy, index_out;
ix = blockDim.x * blockIdx.x + threadIdx.x;
iy = blockDim.y * blockIdx.y + threadIdx.y;
index_in = iy * dim1 + ix;
_id_index = threadIdx.y * blockDim.x + threadIdx.x;
i_row = _id_index / blockDim.y;
i_col = _id_index % blockDim.y;
out_ix = blockIdx.y * blockDim.y + i_col;
out_iy = blockIdx.x * blockDim.x + i_row;
index_out = out_iy * dim2 + out_ix;
if (ix < dim1 && iy < dim2)
{
M_Shared[threadIdx.y][threadIdx.x] = M[index_in];
hipDeviceSynchronize(); // wait all other threads to go further.
R[index_out] = M_Shared[i_col][i_row];
}
}
//host code
int main()
{
int const tile_size = 100;
int const dim1 = 3000;
int const dim2 = 3000;
float* M_h;
float* R_h;
float* M_d;
float* R_d;
size_t size = dim1 * dim2 * sizeof(float);
hipHostMalloc((float**)& M_h, size); //page locked host mem allocation
R_h = (float*)malloc(size);
hipMalloc((float**)& M_d, size);
// init matrix
for (int i = 0; i < dim1 * dim2; ++i)
{
M_h[i] = i;
}
hipMemcpyAsync(M_d, M_h, size, hipMemcpyHostToDevice);
hipMalloc((float**)& R_d, size);
hipMemset(R_d, 0, size);
int threadNumX = tile_size;
int threadNumY = tile_size;
int blockNumX = dim1 / tile_size + (dim1 % tile_size == 0 ? 0 : 1);
int blockNumY = dim2 / tile_size + (dim2 % tile_size == 0 ? 0 : 1);
dim3 blockSize(threadNumX, threadNumY);
dim3 gridSize(blockNumX, blockNumY);
hipLaunchKernelGGL(( sharedMem_transpose_pad), dim3(gridSize), dim3(blockSize), 0, 0, M_d, R_d, dim1, dim2);
hipMemcpy(R_h, R_d, size, hipMemcpyDeviceToHost);
for (int i = 0; i < dim1; ++i)
{
for (int j = 0; j < dim2; ++j)
{
float num = R_h[i*dim2 + j];
cout << num;
}
cout << endl;
}
free(M_h);
free(R_h);
hipFree(R_d);
hipFree(M_d);
return 0;
}
| 119efd88f70c6281f77254b18bff0fb48d076737.cu | #define TILE_WIDTH 100
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
//cuda kernel
__global__ void sharedMem_transpose_pad(float* M, float* R, int dim1, int dim2)
{
// fill data into shared memory
__shared__ float M_Shared[TILE_WIDTH][TILE_WIDTH + 1];
int ix, iy, index_in;
int i_row, i_col, _id_index, out_ix, out_iy, index_out;
ix = blockDim.x * blockIdx.x + threadIdx.x;
iy = blockDim.y * blockIdx.y + threadIdx.y;
index_in = iy * dim1 + ix;
_id_index = threadIdx.y * blockDim.x + threadIdx.x;
i_row = _id_index / blockDim.y;
i_col = _id_index % blockDim.y;
out_ix = blockIdx.y * blockDim.y + i_col;
out_iy = blockIdx.x * blockDim.x + i_row;
index_out = out_iy * dim2 + out_ix;
if (ix < dim1 && iy < dim2)
{
M_Shared[threadIdx.y][threadIdx.x] = M[index_in];
cudaDeviceSynchronize(); // wait all other threads to go further.
R[index_out] = M_Shared[i_col][i_row];
}
}
//host code
int main()
{
int const tile_size = 100;
int const dim1 = 3000;
int const dim2 = 3000;
float* M_h;
float* R_h;
float* M_d;
float* R_d;
size_t size = dim1 * dim2 * sizeof(float);
cudaMallocHost((float**)& M_h, size); //page locked host mem allocation
R_h = (float*)malloc(size);
cudaMalloc((float**)& M_d, size);
// init matrix
for (int i = 0; i < dim1 * dim2; ++i)
{
M_h[i] = i;
}
cudaMemcpyAsync(M_d, M_h, size, cudaMemcpyHostToDevice);
cudaMalloc((float**)& R_d, size);
cudaMemset(R_d, 0, size);
int threadNumX = tile_size;
int threadNumY = tile_size;
int blockNumX = dim1 / tile_size + (dim1 % tile_size == 0 ? 0 : 1);
int blockNumY = dim2 / tile_size + (dim2 % tile_size == 0 ? 0 : 1);
dim3 blockSize(threadNumX, threadNumY);
dim3 gridSize(blockNumX, blockNumY);
sharedMem_transpose_pad<<<gridSize, blockSize>>>(M_d, R_d, dim1, dim2);
cudaMemcpy(R_h, R_d, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < dim1; ++i)
{
for (int j = 0; j < dim2; ++j)
{
float num = R_h[i*dim2 + j];
cout << num;
}
cout << endl;
}
free(M_h);
free(R_h);
cudaFree(R_d);
cudaFree(M_d);
return 0;
}
|
0ea7805945659064e890fdb2e6d92c191092f16a.hip | // !!! This is a file automatically generated by hipify!!!
#include "head.h"
float *h_a;
float *h_b;
float *d_a;
void CPU_malloc(){
size_t size = N*sizeof(float);
h_a = (float *)malloc(size);
h_b = (float *)malloc(size);
}
void GPU_malloc(){
size_t size = N*sizeof(float);
hipError_t Error;
Error = hipMalloc((void**)&d_a,size);
printf("CUDA error(malloc d_a) = %s\n", hipGetErrorString(Error));
}
void Free(){
free(h_a);
free(h_b);
hipFree(d_a);
}
| 0ea7805945659064e890fdb2e6d92c191092f16a.cu | #include "head.h"
float *h_a;
float *h_b;
float *d_a;
void CPU_malloc(){
size_t size = N*sizeof(float);
h_a = (float *)malloc(size);
h_b = (float *)malloc(size);
}
void GPU_malloc(){
size_t size = N*sizeof(float);
cudaError_t Error;
Error = cudaMalloc((void**)&d_a,size);
printf("CUDA error(malloc d_a) = %s\n", cudaGetErrorString(Error));
}
void Free(){
free(h_a);
free(h_b);
cudaFree(d_a);
}
|
e93f3755e3600e947fc4fd97bb056a92ff49f70d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <fstream>
#include <iomanip>
#include <iostream>
#include "matrix.hh"
#include "utils.hh"
namespace gpu_1::utils
{
Matrix::Matrix(std::size_t rows, std::size_t cols, value_t value)
: rows_(rows)
, cols_(cols)
, pitch_(0)
, data_(nullptr)
{
hipError_t rc = hipSuccess;
rc = hipMallocPitch(&this->data_, &this->pitch_, cols * sizeof(value_t), rows);
if (rc)
{
abortError("Fail buffer allocation");
}
}
Matrix::~Matrix()
{
hipError_t rc = hipSuccess;
rc = hipFree(this->data_);
if (rc)
{
abortError("Unable to free memory");
}
}
void Matrix::sub_matrix(std::size_t starting_row,
std::size_t starting_col,
std::size_t row_count,
std::size_t col_count,
matrix_device_t& result) const
{
hipLaunchKernelGGL(( sub_matrix_cuda), dim3(1), dim3(1), 0, 0,
this->data_, this->pitch_, starting_row, starting_col, row_count, col_count, result.data_, result.pitch_);
hipDeviceSynchronize();
if (hipPeekAtLastError())
{
abortError("Computation Error");
}
}
void Matrix::matrix_transpose(matrix_device_t& result) const
{
hipLaunchKernelGGL(( matrix_transpose_cuda), dim3(1), dim3(1), 0, 0,
this->data_, this->pitch_, this->rows_, this->cols_, result.data_, result.pitch_);
hipDeviceSynchronize();
if (hipPeekAtLastError())
{
abortError("Computation Error");
}
}
float Matrix::matrix_norm_2() const
{
float *norm_device;
hipError_t rc = hipSuccess;
rc = hipMalloc(&norm_device, sizeof(float));
if (rc)
{
abortError("Fail buffer allocation");
}
hipLaunchKernelGGL(( matrix_norm_2_cuda), dim3(1), dim3(1), 0, 0, this->data_, this->pitch_, this->rows_, this->cols_, norm_device);
hipDeviceSynchronize();
if (hipPeekAtLastError())
{
abortError("Computation Error");
}
float norm_host;
rc = hipMemcpy(&norm_host, norm_device, sizeof(float), hipMemcpyDeviceToHost);
if (rc)
{
abortError("Fail buffer copy");
}
rc = hipFree(norm_device);
if (rc)
{
abortError("Fail buffer free");
}
return norm_host;
}
void Matrix::matrix_subtract_vector(const matrix_device_t& vector, matrix_device_t& result) const
{
hipLaunchKernelGGL(( matrix_subtract_vector_cuda), dim3(1), dim3(1), 0, 0, this->data_,
this->pitch_,
this->rows_,
this->cols_,
vector.data_,
vector.pitch_,
result.data_,
result.pitch_);
hipDeviceSynchronize();
if (hipPeekAtLastError())
{
abortError("Computation Error");
}
}
void Matrix::matrix_add_vector(const matrix_device_t& vector, matrix_device_t& result) const
{
hipLaunchKernelGGL(( matrix_add_vector_cuda), dim3(1), dim3(1), 0, 0, this->data_,
this->pitch_,
this->rows_,
this->cols_,
vector.data_,
vector.pitch_,
result.data_,
result.pitch_);
hipDeviceSynchronize();
if (hipPeekAtLastError())
{
abortError("Computation Error");
}
}
void Matrix::matrix_centroid(matrix_device_t& result) const
{
hipLaunchKernelGGL(( matrix_centroid_cuda), dim3(1), dim3(1), 0, 0,
this->data_, this->pitch_, this->rows_, this->cols_, result.data_, result.pitch_);
hipDeviceSynchronize();
if (hipPeekAtLastError())
{
abortError("Computation Error");
}
}
void Matrix::multiply_by_scalar(float val, matrix_device_t& result) const
{
hipLaunchKernelGGL(( multiply_by_scalar_cuda), dim3(1), dim3(1), 0, 0,
this->data_, this->pitch_, this->rows_, this->cols_, val, result.data_, result.pitch_);
hipDeviceSynchronize();
if (hipPeekAtLastError())
{
abortError("Computation Error");
}
}
float Matrix::matrix_diag_sum() const
{
float *sum_device;
hipError_t rc = hipSuccess;
rc = hipMalloc(&sum_device, sizeof(float));
if (rc)
{
abortError("Fail buffer allocation");
}
hipLaunchKernelGGL(( matrix_diag_sum_cuda), dim3(1), dim3(1), 0, 0, this->data_, this->pitch_, this->rows_, sum_device);
hipDeviceSynchronize();
if (hipPeekAtLastError())
{
abortError("Computation Error");
}
float sum_host;
rc = hipMemcpy(&sum_host, sum_device, sizeof(float), hipMemcpyDeviceToHost);
if (rc)
{
abortError("Fail buffer copy");
}
rc = hipFree(sum_device);
if (rc)
{
abortError("Fail buffer free");
}
return sum_host;
}
void Matrix::set_val(std::size_t row, std::size_t col, value_t val)
{
hipLaunchKernelGGL(( set_val_cuda), dim3(1), dim3(1), 0, 0, this->data_, this->pitch_, row, col, val);
hipDeviceSynchronize();
if (hipPeekAtLastError())
{
abortError("Computation Error");
}
}
void Matrix::set_val_ptr(std::size_t row, std::size_t col, value_t* val)
{
hipLaunchKernelGGL(( set_val_ptr_cuda), dim3(1), dim3(1), 0, 0, this->data_, this->pitch_, row, col, val);
hipDeviceSynchronize();
if (hipPeekAtLastError())
{
abortError("Computation Error");
}
}
value_t Matrix::get_val(std::size_t row, std::size_t col) const
{
value_t *val_device;
hipError_t rc = hipSuccess;
rc = hipMalloc(&val_device, sizeof(value_t));
if (rc)
{
abortError("Fail buffer allocation");
}
hipLaunchKernelGGL(( get_val_cuda), dim3(1), dim3(1), 0, 0, this->data_, this->pitch_, row, col, val_device);
hipDeviceSynchronize();
if (hipPeekAtLastError())
{
abortError("Computation Error");
}
float val_host;
rc = hipMemcpy(&val_host, val_device, sizeof(value_t), hipMemcpyDeviceToHost);
if (rc)
{
abortError("Fail buffer copy");
}
rc = hipFree(val_device);
if (rc)
{
abortError("Fail buffer free");
}
return val_host;
}
void Matrix::print_matrix() const
{
std::cout << "rows: " << this->rows_ << " cols: " << this->cols_ << std::endl;
hipLaunchKernelGGL(( print_matrix_cuda), dim3(1), dim3(1), 0, 0, this->data_, this->pitch_, this->rows_, this->cols_);
hipDeviceSynchronize();
if (hipPeekAtLastError())
{
abortError("Computation Error");
}
}
} // namespace utils | e93f3755e3600e947fc4fd97bb056a92ff49f70d.cu | #include <fstream>
#include <iomanip>
#include <iostream>
#include "matrix.hh"
#include "utils.hh"
namespace gpu_1::utils
{
Matrix::Matrix(std::size_t rows, std::size_t cols, value_t value)
: rows_(rows)
, cols_(cols)
, pitch_(0)
, data_(nullptr)
{
cudaError_t rc = cudaSuccess;
rc = cudaMallocPitch(&this->data_, &this->pitch_, cols * sizeof(value_t), rows);
if (rc)
{
abortError("Fail buffer allocation");
}
}
Matrix::~Matrix()
{
cudaError_t rc = cudaSuccess;
rc = cudaFree(this->data_);
if (rc)
{
abortError("Unable to free memory");
}
}
void Matrix::sub_matrix(std::size_t starting_row,
std::size_t starting_col,
std::size_t row_count,
std::size_t col_count,
matrix_device_t& result) const
{
sub_matrix_cuda<<<1, 1>>>(
this->data_, this->pitch_, starting_row, starting_col, row_count, col_count, result.data_, result.pitch_);
cudaDeviceSynchronize();
if (cudaPeekAtLastError())
{
abortError("Computation Error");
}
}
void Matrix::matrix_transpose(matrix_device_t& result) const
{
matrix_transpose_cuda<<<1, 1>>>(
this->data_, this->pitch_, this->rows_, this->cols_, result.data_, result.pitch_);
cudaDeviceSynchronize();
if (cudaPeekAtLastError())
{
abortError("Computation Error");
}
}
float Matrix::matrix_norm_2() const
{
float *norm_device;
cudaError_t rc = cudaSuccess;
rc = cudaMalloc(&norm_device, sizeof(float));
if (rc)
{
abortError("Fail buffer allocation");
}
matrix_norm_2_cuda<<<1, 1>>>(this->data_, this->pitch_, this->rows_, this->cols_, norm_device);
cudaDeviceSynchronize();
if (cudaPeekAtLastError())
{
abortError("Computation Error");
}
float norm_host;
rc = cudaMemcpy(&norm_host, norm_device, sizeof(float), cudaMemcpyDeviceToHost);
if (rc)
{
abortError("Fail buffer copy");
}
rc = cudaFree(norm_device);
if (rc)
{
abortError("Fail buffer free");
}
return norm_host;
}
void Matrix::matrix_subtract_vector(const matrix_device_t& vector, matrix_device_t& result) const
{
matrix_subtract_vector_cuda<<<1, 1>>>(this->data_,
this->pitch_,
this->rows_,
this->cols_,
vector.data_,
vector.pitch_,
result.data_,
result.pitch_);
cudaDeviceSynchronize();
if (cudaPeekAtLastError())
{
abortError("Computation Error");
}
}
void Matrix::matrix_add_vector(const matrix_device_t& vector, matrix_device_t& result) const
{
matrix_add_vector_cuda<<<1, 1>>>(this->data_,
this->pitch_,
this->rows_,
this->cols_,
vector.data_,
vector.pitch_,
result.data_,
result.pitch_);
cudaDeviceSynchronize();
if (cudaPeekAtLastError())
{
abortError("Computation Error");
}
}
void Matrix::matrix_centroid(matrix_device_t& result) const
{
matrix_centroid_cuda<<<1, 1>>>(
this->data_, this->pitch_, this->rows_, this->cols_, result.data_, result.pitch_);
cudaDeviceSynchronize();
if (cudaPeekAtLastError())
{
abortError("Computation Error");
}
}
void Matrix::multiply_by_scalar(float val, matrix_device_t& result) const
{
multiply_by_scalar_cuda<<<1, 1>>>(
this->data_, this->pitch_, this->rows_, this->cols_, val, result.data_, result.pitch_);
cudaDeviceSynchronize();
if (cudaPeekAtLastError())
{
abortError("Computation Error");
}
}
float Matrix::matrix_diag_sum() const
{
float *sum_device;
cudaError_t rc = cudaSuccess;
rc = cudaMalloc(&sum_device, sizeof(float));
if (rc)
{
abortError("Fail buffer allocation");
}
matrix_diag_sum_cuda<<<1, 1>>>(this->data_, this->pitch_, this->rows_, sum_device);
cudaDeviceSynchronize();
if (cudaPeekAtLastError())
{
abortError("Computation Error");
}
float sum_host;
rc = cudaMemcpy(&sum_host, sum_device, sizeof(float), cudaMemcpyDeviceToHost);
if (rc)
{
abortError("Fail buffer copy");
}
rc = cudaFree(sum_device);
if (rc)
{
abortError("Fail buffer free");
}
return sum_host;
}
void Matrix::set_val(std::size_t row, std::size_t col, value_t val)
{
set_val_cuda<<<1, 1>>>(this->data_, this->pitch_, row, col, val);
cudaDeviceSynchronize();
if (cudaPeekAtLastError())
{
abortError("Computation Error");
}
}
void Matrix::set_val_ptr(std::size_t row, std::size_t col, value_t* val)
{
set_val_ptr_cuda<<<1, 1>>>(this->data_, this->pitch_, row, col, val);
cudaDeviceSynchronize();
if (cudaPeekAtLastError())
{
abortError("Computation Error");
}
}
value_t Matrix::get_val(std::size_t row, std::size_t col) const
{
value_t *val_device;
cudaError_t rc = cudaSuccess;
rc = cudaMalloc(&val_device, sizeof(value_t));
if (rc)
{
abortError("Fail buffer allocation");
}
get_val_cuda<<<1, 1>>>(this->data_, this->pitch_, row, col, val_device);
cudaDeviceSynchronize();
if (cudaPeekAtLastError())
{
abortError("Computation Error");
}
float val_host;
rc = cudaMemcpy(&val_host, val_device, sizeof(value_t), cudaMemcpyDeviceToHost);
if (rc)
{
abortError("Fail buffer copy");
}
rc = cudaFree(val_device);
if (rc)
{
abortError("Fail buffer free");
}
return val_host;
}
void Matrix::print_matrix() const
{
std::cout << "rows: " << this->rows_ << " cols: " << this->cols_ << std::endl;
print_matrix_cuda<<<1, 1>>>(this->data_, this->pitch_, this->rows_, this->cols_);
cudaDeviceSynchronize();
if (cudaPeekAtLastError())
{
abortError("Computation Error");
}
}
} // namespace utils |
00f0403e588e303d6ca3f6b2da54a667e850c141.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaUtility.h"
// gpuPreImageNetRGB
__global__ void gpuPreImageNetRGB( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.x, px.y, px.z);
output[n * 0 + m] = bgr.x;
output[n * 1 + m] = bgr.y;
output[n * 2 + m] = bgr.z;
}
// cudaPreImageNetRGB
hipError_t cudaPreImageNetRGB( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
hipStream_t stream )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuPreImageNetRGB), dim3(gridDim), dim3(blockDim), 0, stream, scale, input, inputWidth, output, outputWidth, outputHeight);
return CUDA(hipGetLastError());
}
// gpuPreImageNetBGR
__global__ void gpuPreImageNetBGR( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.z, px.y, px.x);
output[n * 0 + m] = bgr.x;
output[n * 1 + m] = bgr.y;
output[n * 2 + m] = bgr.z;
}
// cudaPreImageNetBGR
hipError_t cudaPreImageNetBGR( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
hipStream_t stream )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuPreImageNetBGR), dim3(gridDim), dim3(blockDim), 0, stream, scale, input, inputWidth, output, outputWidth, outputHeight);
return CUDA(hipGetLastError());
}
// gpuPreImageNetMeanRGB
__global__ void gpuPreImageNetMeanRGB( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float3 mean_value )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.x - mean_value.x, px.y - mean_value.y, px.z - mean_value.z);
output[n * 0 + m] = bgr.x;
output[n * 1 + m] = bgr.y;
output[n * 2 + m] = bgr.z;
}
// cudaPreImageNetMeanRGB
hipError_t cudaPreImageNetMeanRGB( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
const float3& mean_value, hipStream_t stream )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuPreImageNetMeanRGB), dim3(gridDim), dim3(blockDim), 0, stream, scale, input, inputWidth, output, outputWidth, outputHeight, mean_value);
return CUDA(hipGetLastError());
}
// gpuPreImageNetMeanBGR
__global__ void gpuPreImageNetMeanBGR( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float3 mean_value )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.z - mean_value.x, px.y - mean_value.y, px.x - mean_value.z);
output[n * 0 + m] = bgr.x;
output[n * 1 + m] = bgr.y;
output[n * 2 + m] = bgr.z;
}
// cudaPreImageNetMeanBGR
hipError_t cudaPreImageNetMeanBGR( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
const float3& mean_value, hipStream_t stream )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuPreImageNetMeanBGR), dim3(gridDim), dim3(blockDim), 0, stream, scale, input, inputWidth, output, outputWidth, outputHeight, mean_value);
return CUDA(hipGetLastError());
}
// gpuPreImageNetNormRGB
__global__ void gpuPreImageNetNormRGB( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float multiplier, float min_value )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.x, px.y, px.z);
output[n * 0 + m] = bgr.x * multiplier + min_value;
output[n * 1 + m] = bgr.y * multiplier + min_value;
output[n * 2 + m] = bgr.z * multiplier + min_value;
}
// cudaPreImageNetNormRGB
hipError_t cudaPreImageNetNormRGB( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
const float2& range, hipStream_t stream )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
const float multiplier = (range.y - range.x) / 255.0f;
//printf("cudaPreImageNetNorm([%f, %f]) multiplier=%f\n", range.x, range.y, multiplier);
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuPreImageNetNormRGB), dim3(gridDim), dim3(blockDim), 0, stream, scale, input, inputWidth, output, outputWidth, outputHeight, multiplier, range.x);
return CUDA(hipGetLastError());
}
// gpuPreImageNetNormBGR
__global__ void gpuPreImageNetNormBGR( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float multiplier, float min_value )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.z, px.y, px.x);
output[n * 0 + m] = bgr.x * multiplier + min_value;
output[n * 1 + m] = bgr.y * multiplier + min_value;
output[n * 2 + m] = bgr.z * multiplier + min_value;
}
// cudaPreImageNetNorm
hipError_t cudaPreImageNetNormBGR( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
const float2& range, hipStream_t stream )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
const float multiplier = (range.y - range.x) / 255.0f;
//printf("cudaPreImageNetNorm([%f, %f]) multiplier=%f\n", range.x, range.y, multiplier);
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuPreImageNetNormBGR), dim3(gridDim), dim3(blockDim), 0, stream, scale, input, inputWidth, output, outputWidth, outputHeight, multiplier, range.x);
return CUDA(hipGetLastError());
}
// gpuPreImageNetNormMeanRGB
__global__ void gpuPreImageNetNormMeanRGB( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float multiplier, float min_value, const float3 mean, const float3 stdDev )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.x * multiplier + min_value, px.y * multiplier + min_value, px.z * multiplier + min_value);
output[n * 0 + m] = (bgr.x - mean.x) / stdDev.x;
output[n * 1 + m] = (bgr.y - mean.y) / stdDev.y;
output[n * 2 + m] = (bgr.z - mean.z) / stdDev.z;
}
// cudaPreImageNetNormMeanRGB
hipError_t cudaPreImageNetNormMeanRGB( float4* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight, const float2& range, const float3& mean, const float3& stdDev, hipStream_t stream )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
const float multiplier = (range.y - range.x) / 255.0f;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuPreImageNetNormMeanRGB), dim3(gridDim), dim3(blockDim), 0, stream, scale, input, inputWidth, output, outputWidth, outputHeight, multiplier, range.x, mean, stdDev);
return CUDA(hipGetLastError());
}
// gpuPreImageNetNormMeanBGR
__global__ void gpuPreImageNetNormMeanBGR( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float multiplier, float min_value, const float3 mean, const float3 stdDev )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.z * multiplier + min_value, px.y * multiplier + min_value, px.x * multiplier + min_value);
output[n * 0 + m] = (bgr.x - mean.x) / stdDev.x;
output[n * 1 + m] = (bgr.y - mean.y) / stdDev.y;
output[n * 2 + m] = (bgr.z - mean.z) / stdDev.z;
}
// cudaPreImageNetNormMeanBGR
hipError_t cudaPreImageNetNormMeanBGR( float4* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight, const float2& range, const float3& mean, const float3& stdDev, hipStream_t stream )
{
if( !input || !output )
return hipErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return hipErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
const float multiplier = (range.y - range.x) / 255.0f;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
hipLaunchKernelGGL(( gpuPreImageNetNormMeanBGR), dim3(gridDim), dim3(blockDim), 0, stream, scale, input, inputWidth, output, outputWidth, outputHeight, multiplier, range.x, mean, stdDev);
return CUDA(hipGetLastError());
}
| 00f0403e588e303d6ca3f6b2da54a667e850c141.cu | /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaUtility.h"
// gpuPreImageNetRGB
__global__ void gpuPreImageNetRGB( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.x, px.y, px.z);
output[n * 0 + m] = bgr.x;
output[n * 1 + m] = bgr.y;
output[n * 2 + m] = bgr.z;
}
// cudaPreImageNetRGB
cudaError_t cudaPreImageNetRGB( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
cudaStream_t stream )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuPreImageNetRGB<<<gridDim, blockDim, 0, stream>>>(scale, input, inputWidth, output, outputWidth, outputHeight);
return CUDA(cudaGetLastError());
}
// gpuPreImageNetBGR
__global__ void gpuPreImageNetBGR( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.z, px.y, px.x);
output[n * 0 + m] = bgr.x;
output[n * 1 + m] = bgr.y;
output[n * 2 + m] = bgr.z;
}
// cudaPreImageNetBGR
cudaError_t cudaPreImageNetBGR( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
cudaStream_t stream )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuPreImageNetBGR<<<gridDim, blockDim, 0, stream>>>(scale, input, inputWidth, output, outputWidth, outputHeight);
return CUDA(cudaGetLastError());
}
// gpuPreImageNetMeanRGB
__global__ void gpuPreImageNetMeanRGB( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float3 mean_value )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.x - mean_value.x, px.y - mean_value.y, px.z - mean_value.z);
output[n * 0 + m] = bgr.x;
output[n * 1 + m] = bgr.y;
output[n * 2 + m] = bgr.z;
}
// cudaPreImageNetMeanRGB
cudaError_t cudaPreImageNetMeanRGB( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
const float3& mean_value, cudaStream_t stream )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuPreImageNetMeanRGB<<<gridDim, blockDim, 0, stream>>>(scale, input, inputWidth, output, outputWidth, outputHeight, mean_value);
return CUDA(cudaGetLastError());
}
// gpuPreImageNetMeanBGR
__global__ void gpuPreImageNetMeanBGR( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float3 mean_value )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.z - mean_value.x, px.y - mean_value.y, px.x - mean_value.z);
output[n * 0 + m] = bgr.x;
output[n * 1 + m] = bgr.y;
output[n * 2 + m] = bgr.z;
}
// cudaPreImageNetMeanBGR
cudaError_t cudaPreImageNetMeanBGR( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
const float3& mean_value, cudaStream_t stream )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuPreImageNetMeanBGR<<<gridDim, blockDim, 0, stream>>>(scale, input, inputWidth, output, outputWidth, outputHeight, mean_value);
return CUDA(cudaGetLastError());
}
// gpuPreImageNetNormRGB
__global__ void gpuPreImageNetNormRGB( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float multiplier, float min_value )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.x, px.y, px.z);
output[n * 0 + m] = bgr.x * multiplier + min_value;
output[n * 1 + m] = bgr.y * multiplier + min_value;
output[n * 2 + m] = bgr.z * multiplier + min_value;
}
// cudaPreImageNetNormRGB
cudaError_t cudaPreImageNetNormRGB( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
const float2& range, cudaStream_t stream )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
const float multiplier = (range.y - range.x) / 255.0f;
//printf("cudaPreImageNetNorm([%f, %f]) multiplier=%f\n", range.x, range.y, multiplier);
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuPreImageNetNormRGB<<<gridDim, blockDim, 0, stream>>>(scale, input, inputWidth, output, outputWidth, outputHeight, multiplier, range.x);
return CUDA(cudaGetLastError());
}
// gpuPreImageNetNormBGR
__global__ void gpuPreImageNetNormBGR( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float multiplier, float min_value )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.z, px.y, px.x);
output[n * 0 + m] = bgr.x * multiplier + min_value;
output[n * 1 + m] = bgr.y * multiplier + min_value;
output[n * 2 + m] = bgr.z * multiplier + min_value;
}
// cudaPreImageNetNorm
cudaError_t cudaPreImageNetNormBGR( float4* input, size_t inputWidth, size_t inputHeight,
float* output, size_t outputWidth, size_t outputHeight,
const float2& range, cudaStream_t stream )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
const float multiplier = (range.y - range.x) / 255.0f;
//printf("cudaPreImageNetNorm([%f, %f]) multiplier=%f\n", range.x, range.y, multiplier);
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuPreImageNetNormBGR<<<gridDim, blockDim, 0, stream>>>(scale, input, inputWidth, output, outputWidth, outputHeight, multiplier, range.x);
return CUDA(cudaGetLastError());
}
// gpuPreImageNetNormMeanRGB
__global__ void gpuPreImageNetNormMeanRGB( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float multiplier, float min_value, const float3 mean, const float3 stdDev )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.x * multiplier + min_value, px.y * multiplier + min_value, px.z * multiplier + min_value);
output[n * 0 + m] = (bgr.x - mean.x) / stdDev.x;
output[n * 1 + m] = (bgr.y - mean.y) / stdDev.y;
output[n * 2 + m] = (bgr.z - mean.z) / stdDev.z;
}
// cudaPreImageNetNormMeanRGB
cudaError_t cudaPreImageNetNormMeanRGB( float4* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight, const float2& range, const float3& mean, const float3& stdDev, cudaStream_t stream )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
const float multiplier = (range.y - range.x) / 255.0f;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuPreImageNetNormMeanRGB<<<gridDim, blockDim, 0, stream>>>(scale, input, inputWidth, output, outputWidth, outputHeight, multiplier, range.x, mean, stdDev);
return CUDA(cudaGetLastError());
}
// gpuPreImageNetNormMeanBGR
__global__ void gpuPreImageNetNormMeanBGR( float2 scale, float4* input, int iWidth, float* output, int oWidth, int oHeight, float multiplier, float min_value, const float3 mean, const float3 stdDev )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= oWidth || y >= oHeight )
return;
const int n = oWidth * oHeight;
const int m = y * oWidth + x;
const int dx = ((float)x * scale.x);
const int dy = ((float)y * scale.y);
const float4 px = input[ dy * iWidth + dx ];
const float3 bgr = make_float3(px.z * multiplier + min_value, px.y * multiplier + min_value, px.x * multiplier + min_value);
output[n * 0 + m] = (bgr.x - mean.x) / stdDev.x;
output[n * 1 + m] = (bgr.y - mean.y) / stdDev.y;
output[n * 2 + m] = (bgr.z - mean.z) / stdDev.z;
}
// cudaPreImageNetNormMeanBGR
cudaError_t cudaPreImageNetNormMeanBGR( float4* input, size_t inputWidth, size_t inputHeight, float* output, size_t outputWidth, size_t outputHeight, const float2& range, const float3& mean, const float3& stdDev, cudaStream_t stream )
{
if( !input || !output )
return cudaErrorInvalidDevicePointer;
if( inputWidth == 0 || outputWidth == 0 || inputHeight == 0 || outputHeight == 0 )
return cudaErrorInvalidValue;
const float2 scale = make_float2( float(inputWidth) / float(outputWidth),
float(inputHeight) / float(outputHeight) );
const float multiplier = (range.y - range.x) / 255.0f;
// launch kernel
const dim3 blockDim(8, 8);
const dim3 gridDim(iDivUp(outputWidth,blockDim.x), iDivUp(outputHeight,blockDim.y));
gpuPreImageNetNormMeanBGR<<<gridDim, blockDim, 0, stream>>>(scale, input, inputWidth, output, outputWidth, outputHeight, multiplier, range.x, mean, stdDev);
return CUDA(cudaGetLastError());
}
|
a2225f0b701982eccc6359f2d1747ff072615aab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Python.h>
#include <emc_cuda.h>
#include "cuda_tools.h"
using namespace std;
__global__ void kernel_set_to_value(float *const array,
const int size,
const float value)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
array[index] = value;
}
}
void set_to_value(float *const array,
const int size,
const float value)
{
const int nthreads = NTHREADS;
const int nblocks = (size-1) / nthreads + 1;
hipLaunchKernelGGL(( kernel_set_to_value), dim3(nblocks), dim3(nthreads), 0, 0, array, size, value);
cudaErrorCheck(hipPeekAtLastError());
cudaErrorCheck(hipDeviceSynchronize());
}
__global__ void kernel_masked_set(float *const array,
const int *const mask,
const int size,
const float value)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size && mask[index] > 0) {
array[index] = value;
}
}
void masked_set(float *const array,
const int *const mask,
const int size,
const float value)
{
const int nthreads = NTHREADS;
const int nblocks = (size-1) / nthreads + 1;
hipLaunchKernelGGL(( kernel_masked_set), dim3(nblocks), dim3(nthreads), 0, 0, array, mask, size, value);
cudaErrorCheck(hipPeekAtLastError());
cudaErrorCheck(hipDeviceSynchronize());
}
float *int_to_float_pointer(const unsigned long long pointer_int)
{
float *pointer = (float *)pointer_int;
return pointer;
}
int *int_to_int_pointer(const unsigned long long pointer_int)
{
int *pointer = (int *)pointer_int;
return pointer;
}
__device__ void quaternion_multiply(float *const res, const float *const quat1, const float *const quat2)
{
res[0] = quat1[0]*quat2[0] - quat1[1]*quat2[1] - quat1[2]*quat2[2] - quat1[3]*quat2[3];
res[1] = quat1[0]*quat2[1] + quat1[1]*quat2[0] + quat1[2]*quat2[3] - quat1[3]*quat2[2];
res[2] = quat1[0]*quat2[2] - quat1[1]*quat2[3] + quat1[2]*quat2[0] + quat1[3]*quat2[1];
res[3] = quat1[0]*quat2[3] + quat1[1]*quat2[2] - quat1[2]*quat2[1] + quat1[3]*quat2[0];
}
__device__ void device_interpolate_get_coordinate_weight(const float coordinate,
const int side,
int *low_coordinate,
float *low_weight,
float *high_weight,
int *out_of_range)
{
*low_coordinate = (int)ceil(coordinate) - 1;
*low_weight = ceil(coordinate) - coordinate;
*high_weight = 1.-*low_weight;
if (*low_coordinate < -1) {
*out_of_range = 1;
} else if (*low_coordinate == -1) {
*low_weight = 0.;
} else if (*low_coordinate == side-1) {
*high_weight = 0.;
} else if (*low_coordinate > side-1) {
*out_of_range = 1;
}
}
__device__ float device_model_get(const float *const model,
const int model_x,
const int model_y,
const int model_z,
const float coordinate_x,
const float coordinate_y,
const float coordinate_z)
{
int low_x, low_y, low_z;
float low_weight_x, low_weight_y, low_weight_z;
float high_weight_x, high_weight_y, high_weight_z;
int out_of_range = 0;
device_interpolate_get_coordinate_weight(coordinate_x, model_x,
&low_x, &low_weight_x,
&high_weight_x, &out_of_range);
device_interpolate_get_coordinate_weight(coordinate_y, model_y,
&low_y, &low_weight_y,
&high_weight_y, &out_of_range);
device_interpolate_get_coordinate_weight(coordinate_z, model_z,
&low_z, &low_weight_z,
&high_weight_z, &out_of_range);
if (out_of_range != 0) {
return -1.f;
} else {
float interp_sum = 0.;
float interp_weight = 0.;
int index_x, index_y, index_z;
float weight_x, weight_y, weight_z;
for (index_x = low_x; index_x <= low_x+1; index_x += 1) {
if (index_x == low_x && low_weight_x == 0.) continue;
if (index_x == (low_x+1) && high_weight_x == 0.) continue;
if (index_x == low_x) weight_x = low_weight_x;
else weight_x = high_weight_x;
for (index_y = low_y; index_y <= low_y+1; index_y += 1) {
if (index_y == low_y && low_weight_y == 0.) continue;
if (index_y == (low_y+1) && high_weight_y == 0.) continue;
if (index_y == low_y) weight_y = low_weight_y;
else weight_y = high_weight_y;
for (index_z = low_z; index_z <= low_z+1; index_z += 1) {
if (index_z == low_z && low_weight_z == 0.) continue;
if (index_z == (low_z+1) && high_weight_z == 0.) continue;
if (index_z == low_z) weight_z = low_weight_z;
else weight_z = high_weight_z;
if (model[model_z*model_y*index_x + model_z*index_y + index_z] >= 0.) {
interp_sum += weight_x*weight_y*weight_z*model[model_z*model_y*index_x + model_z*index_y + index_z];
interp_weight += weight_x*weight_y*weight_z;
}
}
}
}
if (interp_weight > 0.) {
return interp_sum / interp_weight;
} else {
return -1.f;
}
}
}
__device__ void device_get_slice(const float *const model,
const int model_x,
const int model_y,
const int model_z,
float *const slice,
const int image_x,
const int image_y,
const float *const rotation,
const float *const coordinates) {
const float *const coordinates_0 = &coordinates[0*image_x*image_y];
const float *const coordinates_1 = &coordinates[1*image_x*image_y];
const float *const coordinates_2 = &coordinates[2*image_x*image_y];
float m00 = (rotation[0]*rotation[0] + rotation[1]*rotation[1] -
rotation[2]*rotation[2] - rotation[3]*rotation[3]);
float m01 = 2.0f*rotation[1]*rotation[2] - 2.0f*rotation[0]*rotation[3];
float m02 = 2.0f*rotation[1]*rotation[3] + 2.0f*rotation[0]*rotation[2];
float m10 = 2.0f*rotation[1]*rotation[2] + 2.0f*rotation[0]*rotation[3];
float m11 = (rotation[0]*rotation[0] - rotation[1]*rotation[1] +
rotation[2]*rotation[2] - rotation[3]*rotation[3]);
float m12 = 2.0f*rotation[2]*rotation[3] - 2.0f*rotation[0]*rotation[1];
float m20 = 2.0f*rotation[1]*rotation[3] - 2.0f*rotation[0]*rotation[2];
float m21 = 2.0f*rotation[2]*rotation[3] + 2.0f*rotation[0]*rotation[1];
float m22 = (rotation[0]*rotation[0] - rotation[1]*rotation[1] -
rotation[2]*rotation[2] + rotation[3]*rotation[3]);
float new_x, new_y, new_z;
for (int x = 0; x < image_x; x++) {
for (int y = threadIdx.x; y < image_y; y+=blockDim.x) {
/* This is just a matrix multiplication with rotation */
new_x = (m00*coordinates_0[x*image_y+y] +
m01*coordinates_1[x*image_y+y] +
m02*coordinates_2[x*image_y+y] +
model_x/2.0 - 0.5);
new_y = (m10*coordinates_0[x*image_y+y] +
m11*coordinates_1[x*image_y+y] +
m12*coordinates_2[x*image_y+y] +
model_y/2.0 - 0.5);
new_z = (m20*coordinates_0[x*image_y+y] +
m21*coordinates_1[x*image_y+y] +
m22*coordinates_2[x*image_y+y] +
model_z/2.0 - 0.5);
slice[x*image_y+y] = device_model_get(model, model_x, model_y, model_z, new_x, new_y, new_z);
}
}
}
__global__ void kernel_expand_model(const float *const model,
const int model_x,
const int model_y,
const int model_z,
float *const slices,
const int image_x,
const int image_y,
const float *const rotations,
const float *const coordinates)
{
const int rotation_index = blockIdx.x;
device_get_slice(model,
model_x,
model_y,
model_z,
&slices[image_x*image_y*rotation_index],
image_x,
image_y,
&rotations[4*rotation_index],
coordinates);
}
void expand_model(const float *const model,
const int model_x,
const int model_y,
const int model_z,
float *const slices,
const int image_x,
const int image_y,
const float *const rotations,
const int number_of_rotations,
const float *const coordinates)
{
int nblocks = number_of_rotations;
int nthreads = NTHREADS;
hipLaunchKernelGGL(( kernel_expand_model), dim3(nblocks), dim3(nthreads), 0, 0, model,
model_x,
model_y,
model_z,
slices,
image_x,
image_y,
rotations,
coordinates);
cudaErrorCheck(hipPeekAtLastError());
cudaErrorCheck(hipDeviceSynchronize());
}
__device__ void device_model_set(float *const model,
float *const model_weights,
const int model_x,
const int model_y,
const int model_z,
const float coordinate_x,
const float coordinate_y,
const float coordinate_z,
const float value,
const float value_weight)
{
int low_x, low_y, low_z;
float low_weight_x, low_weight_y, low_weight_z;
float high_weight_x, high_weight_y, high_weight_z;
int out_of_range = 0;
device_interpolate_get_coordinate_weight(coordinate_x, model_x,
&low_x, &low_weight_x,
&high_weight_x, &out_of_range);
device_interpolate_get_coordinate_weight(coordinate_y, model_y,
&low_y, &low_weight_y,
&high_weight_y, &out_of_range);
device_interpolate_get_coordinate_weight(coordinate_z, model_z,
&low_z, &low_weight_z,
&high_weight_z, &out_of_range);
if (out_of_range == 0) {
int index_x, index_y, index_z;
float weight_x, weight_y, weight_z;
for (index_x = low_x; index_x <= low_x+1; index_x += 1) {
if (index_x == low_x && low_weight_x == 0.) continue;
if (index_x == (low_x+1) && high_weight_x == 0.) continue;
if (index_x == low_x) weight_x = low_weight_x;
else weight_x = high_weight_x;
for (index_y = low_y; index_y <= low_y+1; index_y += 1) {
if (index_y == low_y && low_weight_y == 0.) continue;
if (index_y == (low_y+1) && high_weight_y == 0.) continue;
if (index_y == low_y) weight_y = low_weight_y;
else weight_y = high_weight_y;
for (index_z = low_z; index_z <= low_z+1; index_z += 1) {
if (index_z == low_z && low_weight_z == 0.) continue;
if (index_z == (low_z+1) && high_weight_z == 0.) continue;
if (index_z == low_z) weight_z = low_weight_z;
else weight_z = high_weight_z;
atomicAdd(&model[model_z*model_y*index_x + model_z*index_y + index_z],
weight_x*weight_y*weight_z*value_weight*value);
atomicAdd(&model_weights[model_z*model_y*index_x + model_z*index_y + index_z],
weight_x*weight_y*weight_z*value_weight);
}
}
}
}
}
__device__ void device_model_set_nn(float *const model,
float *const model_weights,
const int model_x,
const int model_y,
const int model_z,
const float coordinate_x,
const float coordinate_y,
const float coordinate_z,
const float value,
const float value_weight)
{
int index_x = (int) (coordinate_x + 0.5);
int index_y = (int) (coordinate_y + 0.5);
int index_z = (int) (coordinate_z + 0.5);
if (index_x >= 0 && index_x < model_x &&
index_y >= 0 && index_y < model_y &&
index_z >= 0 && index_z < model_z) {
atomicAdd(&model[model_z*model_y*index_x + model_z*index_y + index_z],
value_weight*value);
atomicAdd(&model_weights[model_z*model_y*index_x + model_z*index_y + index_z],
value_weight);
}
}
__device__ void device_insert_slice(float *const model,
float *const model_weights,
const int model_x,
const int model_y,
const int model_z,
const float *const slice,
const int image_x,
const int image_y,
const float slice_weight,
const float *const rotation,
const float *const coordinates,
const int interpolation)
{
const float *const coordinates_0 = &coordinates[0*image_x*image_y];
const float *const coordinates_1 = &coordinates[1*image_x*image_y];
const float *const coordinates_2 = &coordinates[2*image_x*image_y];
float m00 = (rotation[0]*rotation[0] + rotation[1]*rotation[1] -
rotation[2]*rotation[2] - rotation[3]*rotation[3]);
float m01 = 2.0f*rotation[1]*rotation[2] - 2.0f*rotation[0]*rotation[3];
float m02 = 2.0f*rotation[1]*rotation[3] + 2.0f*rotation[0]*rotation[2];
float m10 = 2.0f*rotation[1]*rotation[2] + 2.0f*rotation[0]*rotation[3];
float m11 = (rotation[0]*rotation[0] - rotation[1]*rotation[1] +
rotation[2]*rotation[2] - rotation[3]*rotation[3]);
float m12 = 2.0f*rotation[2]*rotation[3] - 2.0f*rotation[0]*rotation[1];
float m20 = 2.0f*rotation[1]*rotation[3] - 2.0f*rotation[0]*rotation[2];
float m21 = 2.0f*rotation[2]*rotation[3] + 2.0f*rotation[0]*rotation[1];
float m22 = (rotation[0]*rotation[0] - rotation[1]*rotation[1] -
rotation[2]*rotation[2] + rotation[3]*rotation[3]);
float new_x, new_y, new_z;
for (int x = 0; x < image_x; x++) {
for (int y = threadIdx.x; y < image_y; y+=blockDim.x) {
if (slice[x*image_y+y] >= 0.) {
/* This is just a matrix multiplication with rotation */
new_x = (m00*coordinates_0[x*image_y+y] +
m01*coordinates_1[x*image_y+y] +
m02*coordinates_2[x*image_y+y] +
model_x/2.0 - 0.5);
new_y = (m10*coordinates_0[x*image_y+y] +
m11*coordinates_1[x*image_y+y] +
m12*coordinates_2[x*image_y+y] +
model_y/2.0 - 0.5);
new_z = (m20*coordinates_0[x*image_y+y] +
m21*coordinates_1[x*image_y+y] +
m22*coordinates_2[x*image_y+y] +
model_z/2.0 - 0.5);
if (interpolation == 0) {
device_model_set_nn(model, model_weights,
model_x, model_y, model_z,
new_x, new_y, new_z,
slice[x*image_y+y], slice_weight);
} else {
device_model_set(model, model_weights,
model_x, model_y, model_z,
new_x, new_y, new_z,
slice[x*image_y+y], slice_weight);
}
}
}
}
}
__global__ void kernel_insert_slices(float *const model,
float *const model_weights,
const int model_x,
const int model_y,
const int model_z,
const float *const slices,
const int image_x,
const int image_y,
const float *const slice_weights,
const float *const rotations,
const float *const coordinates,
const int interpolation) {
const int rotation_index = blockIdx.x;
device_insert_slice(model,
model_weights,
model_x,
model_y,
model_z,
&slices[image_x*image_y*rotation_index],
image_x,
image_y,
slice_weights[rotation_index],
&rotations[4*rotation_index],
coordinates,
interpolation);
}
void insert_slices(float *const model,
float *const model_weights,
const int model_x,
const int model_y,
const int model_z,
const float *const slices,
const int image_x,
const int image_y,
const float *const slice_weights,
const float *const rotations,
const int number_of_rotations,
const float *const coordinates,
const int interpolation)
{
int nblocks = number_of_rotations;
int nthreads = NTHREADS;
hipLaunchKernelGGL(( kernel_insert_slices), dim3(nblocks), dim3(nthreads), 0, 0, model,
model_weights,
model_x,
model_y,
model_z,
slices,
image_x,
image_y,
slice_weights,
rotations,
coordinates,
interpolation);
cudaErrorCheck(hipPeekAtLastError());
cudaErrorCheck(hipDeviceSynchronize());
}
__device__ void device_insert_slice_partial(float *const model,
float *const model_weights,
const int model_x_tot,
const int model_x_min,
const int model_x_max,
const int model_y_tot,
const int model_y_min,
const int model_y_max,
const int model_z_tot,
const int model_z_min,
const int model_z_max,
const float *const slice,
const int image_x,
const int image_y,
const float slice_weight,
const float *const rotation,
const float *const coordinates,
const int interpolation)
{
const float *const coordinates_0 = &coordinates[0*image_x*image_y];
const float *const coordinates_1 = &coordinates[1*image_x*image_y];
const float *const coordinates_2 = &coordinates[2*image_x*image_y];
float m00 = (rotation[0]*rotation[0] + rotation[1]*rotation[1] -
rotation[2]*rotation[2] - rotation[3]*rotation[3]);
float m01 = 2.0f*rotation[1]*rotation[2] - 2.0f*rotation[0]*rotation[3];
float m02 = 2.0f*rotation[1]*rotation[3] + 2.0f*rotation[0]*rotation[2];
float m10 = 2.0f*rotation[1]*rotation[2] + 2.0f*rotation[0]*rotation[3];
float m11 = (rotation[0]*rotation[0] - rotation[1]*rotation[1] +
rotation[2]*rotation[2] - rotation[3]*rotation[3]);
float m12 = 2.0f*rotation[2]*rotation[3] - 2.0f*rotation[0]*rotation[1];
float m20 = 2.0f*rotation[1]*rotation[3] - 2.0f*rotation[0]*rotation[2];
float m21 = 2.0f*rotation[2]*rotation[3] + 2.0f*rotation[0]*rotation[1];
float m22 = (rotation[0]*rotation[0] - rotation[1]*rotation[1] -
rotation[2]*rotation[2] + rotation[3]*rotation[3]);
if (blockIdx.x == 0 && threadIdx.x == 0) {
printf("inside kernel\n");
/*
printf("%i %i %i\n", model_x_tot, model_x_min, model_x_max);
printf("%i %i %i\n", model_y_tot, model_y_min, model_y_max);
printf("%i %i %i\n", model_z_tot, model_z_min, model_z_max);
*/
printf("%i %i %i\n", model_x_max-model_x_min, model_y_max-model_y_min, model_z_max-model_z_min);
}
float new_x, new_y, new_z;
for (int x = 0; x < image_x; x++) {
for (int y = threadIdx.x; y < image_y; y+=blockDim.x) {
if (slice[x*image_y+y] >= 0.) {
/* This is just a matrix multiplication with rotation */
new_x = (m00*coordinates_0[x*image_y+y] +
m01*coordinates_1[x*image_y+y] +
m02*coordinates_2[x*image_y+y] +
model_x_tot/2.0 - 0.5);
new_y = (m10*coordinates_0[x*image_y+y] +
m11*coordinates_1[x*image_y+y] +
m12*coordinates_2[x*image_y+y] +
model_y_tot/2.0 - 0.5);
new_z = (m20*coordinates_0[x*image_y+y] +
m21*coordinates_1[x*image_y+y] +
m22*coordinates_2[x*image_y+y] +
model_z_tot/2.0 - 0.5);
if (blockIdx.x == 0 && x == 10 && y == 10) {
printf("%g %g %g\n", new_x, new_y, new_z);
printf("%g %g %g\n", new_x-(float)model_x_min, new_y-(float)model_y_min, new_z-(float)model_z_min);
}
if (interpolation == 0) {
device_model_set_nn(model,
model_weights,
model_x_max-model_x_min,
model_y_max-model_y_min,
model_z_max-model_z_min,
new_x-(float)model_x_min,
new_y-(float)model_y_min,
new_z-(float)model_z_min,
slice[x*image_y+y],
slice_weight);
} else {
device_model_set(model,
model_weights,
model_x_max-model_x_min,
model_y_max-model_y_min,
model_z_max-model_z_min,
new_x-(float)model_x_min,
new_y-(float)model_y_min,
new_z-(float)model_z_min,
slice[x*image_y+y],
slice_weight);
}
}
}
}
}
__global__ void kernel_insert_slices_partial(float *const model,
float *const model_weights,
const int model_x_tot,
const int model_x_min,
const int model_x_max,
const int model_y_tot,
const int model_y_min,
const int model_y_max,
const int model_z_tot,
const int model_z_min,
const int model_z_max,
const float *const slices,
const int image_x,
const int image_y,
const float *const slice_weights,
const float *const rotations,
const float *const coordinates,
const int interpolation) {
const int rotation_index = blockIdx.x;
device_insert_slice_partial(model,
model_weights,
model_x_tot,
model_x_min,
model_x_max,
model_y_tot,
model_y_min,
model_y_max,
model_z_tot,
model_z_min,
model_z_max,
&slices[image_x*image_y*rotation_index],
image_x,
image_y,
slice_weights[rotation_index],
&rotations[4*rotation_index],
coordinates,
interpolation);
}
void insert_slices_partial(float *const model,
float *const model_weights,
const int model_x_tot,
const int model_x_min,
const int model_x_max,
const int model_y_tot,
const int model_y_min,
const int model_y_max,
const int model_z_tot,
const int model_z_min,
const int model_z_max,
const float *const slices,
const int image_x,
const int image_y,
const float *const slice_weights,
const float *const rotations,
const int number_of_rotations,
const float *const coordinates,
const int interpolation)
{
int nblocks = number_of_rotations;
int nthreads = NTHREADS;
hipLaunchKernelGGL(( kernel_insert_slices_partial), dim3(nblocks), dim3(nthreads), 0, 0, model,
model_weights,
model_x_tot,
model_x_min,
model_x_max,
model_y_tot,
model_y_min,
model_y_max,
model_z_tot,
model_z_min,
model_z_max,
slices,
image_x,
image_y,
slice_weights,
rotations,
coordinates,
interpolation);
cudaErrorCheck(hipPeekAtLastError());
cudaErrorCheck(hipDeviceSynchronize());
}
__device__ float device_compare_balanced(const float expected_value,
const float measured_value)
{
return pow(expected_value - measured_value, 2) / expected_value;
}
__global__ void kernel_rotate_model(const float *const model,
float *const rotated_model,
const int model_x,
const int model_y,
const int model_z,
const float *const rotation) {
__shared__ float rotation_matrix[9];
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (threadIdx.x == 0) {
rotation_matrix[0] = rotation[0]*rotation[0] + rotation[1]*rotation[1] - rotation[2]*rotation[2] - rotation[3]*rotation[3]; // 00
rotation_matrix[1] = 2.0f*rotation[1]*rotation[2] - 2.0f*rotation[0]*rotation[3]; // 01
rotation_matrix[2] = 2.0f*rotation[1]*rotation[3] + 2.0f*rotation[0]*rotation[2]; // 02
rotation_matrix[3] = 2.0f*rotation[1]*rotation[2] + 2.0f*rotation[0]*rotation[3]; // 10
rotation_matrix[4] = rotation[0]*rotation[0] - rotation[1]*rotation[1] + rotation[2]*rotation[2] - rotation[3]*rotation[3]; // 11
rotation_matrix[5] = 2.0f*rotation[2]*rotation[3] - 2.0f*rotation[0]*rotation[1]; // 12
rotation_matrix[6] = 2.0f*rotation[1]*rotation[3] - 2.0f*rotation[0]*rotation[2]; // 20
rotation_matrix[7] = 2.0f*rotation[2]*rotation[3] + 2.0f*rotation[0]*rotation[1]; // 21
rotation_matrix[8] = rotation[0]*rotation[0] - rotation[1]*rotation[1] - rotation[2]*rotation[2] + rotation[3]*rotation[3]; // 22
}
__syncthreads();
if (index < model_x*model_y*model_z) {
float start_x = ((float) ((index % (model_x*model_y)) % model_x)) - model_x/2. + 0.5;
float start_y = ((float) ((index / model_x) % model_y)) - model_y/2. + 0.5;
float start_z = ((float) (index / (model_x*model_y))) - model_z/2. + 0.5;
/*
float start_x = ((float) ((index % (model_x*model_y)) % model_x));
float start_y = ((float) ((index / model_x) % model_y));
float start_z = ((float) (index / (model_x*model_y)));
*/
float new_x, new_y, new_z;
/* This is just a matrix multiplication with rotation */
new_x = model_x/2. - 0.5 + (rotation_matrix[0]*start_x +
rotation_matrix[1]*start_y +
rotation_matrix[2]*start_z);
new_y = model_y/2. - 0.5 + (rotation_matrix[3]*start_x +
rotation_matrix[4]*start_y +
rotation_matrix[5]*start_z);
new_z = model_z/2. - 0.5 + (rotation_matrix[6]*start_x +
rotation_matrix[7]*start_y +
rotation_matrix[8]*start_z);
rotated_model[index] = device_model_get(model,
model_x, model_y, model_z,
new_x, new_y, new_z);
}
}
void rotate_model(const float *const model,
float *const rotated_model,
const int model_x,
const int model_y,
const int model_z,
const float *const rotation)
{
const int nthreads = NTHREADS;
const int nblocks = (model_x*model_y*model_z-1) / nthreads + 1;
hipLaunchKernelGGL(( kernel_rotate_model), dim3(nblocks), dim3(nthreads), 0, 0, model,
rotated_model,
model_x,
model_y,
model_z,
rotation);
cudaErrorCheck(hipPeekAtLastError());
cudaErrorCheck(hipDeviceSynchronize());
}
| a2225f0b701982eccc6359f2d1747ff072615aab.cu | #include <Python.h>
#include <emc_cuda.h>
#include "cuda_tools.h"
using namespace std;
__global__ void kernel_set_to_value(float *const array,
const int size,
const float value)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
array[index] = value;
}
}
void set_to_value(float *const array,
const int size,
const float value)
{
const int nthreads = NTHREADS;
const int nblocks = (size-1) / nthreads + 1;
kernel_set_to_value<<<nblocks, nthreads>>>(array, size, value);
cudaErrorCheck(cudaPeekAtLastError());
cudaErrorCheck(cudaDeviceSynchronize());
}
__global__ void kernel_masked_set(float *const array,
const int *const mask,
const int size,
const float value)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size && mask[index] > 0) {
array[index] = value;
}
}
void masked_set(float *const array,
const int *const mask,
const int size,
const float value)
{
const int nthreads = NTHREADS;
const int nblocks = (size-1) / nthreads + 1;
kernel_masked_set<<<nblocks, nthreads>>>(array, mask, size, value);
cudaErrorCheck(cudaPeekAtLastError());
cudaErrorCheck(cudaDeviceSynchronize());
}
float *int_to_float_pointer(const unsigned long long pointer_int)
{
float *pointer = (float *)pointer_int;
return pointer;
}
int *int_to_int_pointer(const unsigned long long pointer_int)
{
int *pointer = (int *)pointer_int;
return pointer;
}
__device__ void quaternion_multiply(float *const res, const float *const quat1, const float *const quat2)
{
res[0] = quat1[0]*quat2[0] - quat1[1]*quat2[1] - quat1[2]*quat2[2] - quat1[3]*quat2[3];
res[1] = quat1[0]*quat2[1] + quat1[1]*quat2[0] + quat1[2]*quat2[3] - quat1[3]*quat2[2];
res[2] = quat1[0]*quat2[2] - quat1[1]*quat2[3] + quat1[2]*quat2[0] + quat1[3]*quat2[1];
res[3] = quat1[0]*quat2[3] + quat1[1]*quat2[2] - quat1[2]*quat2[1] + quat1[3]*quat2[0];
}
__device__ void device_interpolate_get_coordinate_weight(const float coordinate,
const int side,
int *low_coordinate,
float *low_weight,
float *high_weight,
int *out_of_range)
{
*low_coordinate = (int)ceil(coordinate) - 1;
*low_weight = ceil(coordinate) - coordinate;
*high_weight = 1.-*low_weight;
if (*low_coordinate < -1) {
*out_of_range = 1;
} else if (*low_coordinate == -1) {
*low_weight = 0.;
} else if (*low_coordinate == side-1) {
*high_weight = 0.;
} else if (*low_coordinate > side-1) {
*out_of_range = 1;
}
}
__device__ float device_model_get(const float *const model,
const int model_x,
const int model_y,
const int model_z,
const float coordinate_x,
const float coordinate_y,
const float coordinate_z)
{
int low_x, low_y, low_z;
float low_weight_x, low_weight_y, low_weight_z;
float high_weight_x, high_weight_y, high_weight_z;
int out_of_range = 0;
device_interpolate_get_coordinate_weight(coordinate_x, model_x,
&low_x, &low_weight_x,
&high_weight_x, &out_of_range);
device_interpolate_get_coordinate_weight(coordinate_y, model_y,
&low_y, &low_weight_y,
&high_weight_y, &out_of_range);
device_interpolate_get_coordinate_weight(coordinate_z, model_z,
&low_z, &low_weight_z,
&high_weight_z, &out_of_range);
if (out_of_range != 0) {
return -1.f;
} else {
float interp_sum = 0.;
float interp_weight = 0.;
int index_x, index_y, index_z;
float weight_x, weight_y, weight_z;
for (index_x = low_x; index_x <= low_x+1; index_x += 1) {
if (index_x == low_x && low_weight_x == 0.) continue;
if (index_x == (low_x+1) && high_weight_x == 0.) continue;
if (index_x == low_x) weight_x = low_weight_x;
else weight_x = high_weight_x;
for (index_y = low_y; index_y <= low_y+1; index_y += 1) {
if (index_y == low_y && low_weight_y == 0.) continue;
if (index_y == (low_y+1) && high_weight_y == 0.) continue;
if (index_y == low_y) weight_y = low_weight_y;
else weight_y = high_weight_y;
for (index_z = low_z; index_z <= low_z+1; index_z += 1) {
if (index_z == low_z && low_weight_z == 0.) continue;
if (index_z == (low_z+1) && high_weight_z == 0.) continue;
if (index_z == low_z) weight_z = low_weight_z;
else weight_z = high_weight_z;
if (model[model_z*model_y*index_x + model_z*index_y + index_z] >= 0.) {
interp_sum += weight_x*weight_y*weight_z*model[model_z*model_y*index_x + model_z*index_y + index_z];
interp_weight += weight_x*weight_y*weight_z;
}
}
}
}
if (interp_weight > 0.) {
return interp_sum / interp_weight;
} else {
return -1.f;
}
}
}
__device__ void device_get_slice(const float *const model,
const int model_x,
const int model_y,
const int model_z,
float *const slice,
const int image_x,
const int image_y,
const float *const rotation,
const float *const coordinates) {
const float *const coordinates_0 = &coordinates[0*image_x*image_y];
const float *const coordinates_1 = &coordinates[1*image_x*image_y];
const float *const coordinates_2 = &coordinates[2*image_x*image_y];
float m00 = (rotation[0]*rotation[0] + rotation[1]*rotation[1] -
rotation[2]*rotation[2] - rotation[3]*rotation[3]);
float m01 = 2.0f*rotation[1]*rotation[2] - 2.0f*rotation[0]*rotation[3];
float m02 = 2.0f*rotation[1]*rotation[3] + 2.0f*rotation[0]*rotation[2];
float m10 = 2.0f*rotation[1]*rotation[2] + 2.0f*rotation[0]*rotation[3];
float m11 = (rotation[0]*rotation[0] - rotation[1]*rotation[1] +
rotation[2]*rotation[2] - rotation[3]*rotation[3]);
float m12 = 2.0f*rotation[2]*rotation[3] - 2.0f*rotation[0]*rotation[1];
float m20 = 2.0f*rotation[1]*rotation[3] - 2.0f*rotation[0]*rotation[2];
float m21 = 2.0f*rotation[2]*rotation[3] + 2.0f*rotation[0]*rotation[1];
float m22 = (rotation[0]*rotation[0] - rotation[1]*rotation[1] -
rotation[2]*rotation[2] + rotation[3]*rotation[3]);
float new_x, new_y, new_z;
for (int x = 0; x < image_x; x++) {
for (int y = threadIdx.x; y < image_y; y+=blockDim.x) {
/* This is just a matrix multiplication with rotation */
new_x = (m00*coordinates_0[x*image_y+y] +
m01*coordinates_1[x*image_y+y] +
m02*coordinates_2[x*image_y+y] +
model_x/2.0 - 0.5);
new_y = (m10*coordinates_0[x*image_y+y] +
m11*coordinates_1[x*image_y+y] +
m12*coordinates_2[x*image_y+y] +
model_y/2.0 - 0.5);
new_z = (m20*coordinates_0[x*image_y+y] +
m21*coordinates_1[x*image_y+y] +
m22*coordinates_2[x*image_y+y] +
model_z/2.0 - 0.5);
slice[x*image_y+y] = device_model_get(model, model_x, model_y, model_z, new_x, new_y, new_z);
}
}
}
__global__ void kernel_expand_model(const float *const model,
const int model_x,
const int model_y,
const int model_z,
float *const slices,
const int image_x,
const int image_y,
const float *const rotations,
const float *const coordinates)
{
const int rotation_index = blockIdx.x;
device_get_slice(model,
model_x,
model_y,
model_z,
&slices[image_x*image_y*rotation_index],
image_x,
image_y,
&rotations[4*rotation_index],
coordinates);
}
void expand_model(const float *const model,
const int model_x,
const int model_y,
const int model_z,
float *const slices,
const int image_x,
const int image_y,
const float *const rotations,
const int number_of_rotations,
const float *const coordinates)
{
int nblocks = number_of_rotations;
int nthreads = NTHREADS;
kernel_expand_model<<<nblocks, nthreads>>>(model,
model_x,
model_y,
model_z,
slices,
image_x,
image_y,
rotations,
coordinates);
cudaErrorCheck(cudaPeekAtLastError());
cudaErrorCheck(cudaDeviceSynchronize());
}
__device__ void device_model_set(float *const model,
float *const model_weights,
const int model_x,
const int model_y,
const int model_z,
const float coordinate_x,
const float coordinate_y,
const float coordinate_z,
const float value,
const float value_weight)
{
int low_x, low_y, low_z;
float low_weight_x, low_weight_y, low_weight_z;
float high_weight_x, high_weight_y, high_weight_z;
int out_of_range = 0;
device_interpolate_get_coordinate_weight(coordinate_x, model_x,
&low_x, &low_weight_x,
&high_weight_x, &out_of_range);
device_interpolate_get_coordinate_weight(coordinate_y, model_y,
&low_y, &low_weight_y,
&high_weight_y, &out_of_range);
device_interpolate_get_coordinate_weight(coordinate_z, model_z,
&low_z, &low_weight_z,
&high_weight_z, &out_of_range);
if (out_of_range == 0) {
int index_x, index_y, index_z;
float weight_x, weight_y, weight_z;
for (index_x = low_x; index_x <= low_x+1; index_x += 1) {
if (index_x == low_x && low_weight_x == 0.) continue;
if (index_x == (low_x+1) && high_weight_x == 0.) continue;
if (index_x == low_x) weight_x = low_weight_x;
else weight_x = high_weight_x;
for (index_y = low_y; index_y <= low_y+1; index_y += 1) {
if (index_y == low_y && low_weight_y == 0.) continue;
if (index_y == (low_y+1) && high_weight_y == 0.) continue;
if (index_y == low_y) weight_y = low_weight_y;
else weight_y = high_weight_y;
for (index_z = low_z; index_z <= low_z+1; index_z += 1) {
if (index_z == low_z && low_weight_z == 0.) continue;
if (index_z == (low_z+1) && high_weight_z == 0.) continue;
if (index_z == low_z) weight_z = low_weight_z;
else weight_z = high_weight_z;
atomicAdd(&model[model_z*model_y*index_x + model_z*index_y + index_z],
weight_x*weight_y*weight_z*value_weight*value);
atomicAdd(&model_weights[model_z*model_y*index_x + model_z*index_y + index_z],
weight_x*weight_y*weight_z*value_weight);
}
}
}
}
}
__device__ void device_model_set_nn(float *const model,
float *const model_weights,
const int model_x,
const int model_y,
const int model_z,
const float coordinate_x,
const float coordinate_y,
const float coordinate_z,
const float value,
const float value_weight)
{
int index_x = (int) (coordinate_x + 0.5);
int index_y = (int) (coordinate_y + 0.5);
int index_z = (int) (coordinate_z + 0.5);
if (index_x >= 0 && index_x < model_x &&
index_y >= 0 && index_y < model_y &&
index_z >= 0 && index_z < model_z) {
atomicAdd(&model[model_z*model_y*index_x + model_z*index_y + index_z],
value_weight*value);
atomicAdd(&model_weights[model_z*model_y*index_x + model_z*index_y + index_z],
value_weight);
}
}
__device__ void device_insert_slice(float *const model,
float *const model_weights,
const int model_x,
const int model_y,
const int model_z,
const float *const slice,
const int image_x,
const int image_y,
const float slice_weight,
const float *const rotation,
const float *const coordinates,
const int interpolation)
{
const float *const coordinates_0 = &coordinates[0*image_x*image_y];
const float *const coordinates_1 = &coordinates[1*image_x*image_y];
const float *const coordinates_2 = &coordinates[2*image_x*image_y];
float m00 = (rotation[0]*rotation[0] + rotation[1]*rotation[1] -
rotation[2]*rotation[2] - rotation[3]*rotation[3]);
float m01 = 2.0f*rotation[1]*rotation[2] - 2.0f*rotation[0]*rotation[3];
float m02 = 2.0f*rotation[1]*rotation[3] + 2.0f*rotation[0]*rotation[2];
float m10 = 2.0f*rotation[1]*rotation[2] + 2.0f*rotation[0]*rotation[3];
float m11 = (rotation[0]*rotation[0] - rotation[1]*rotation[1] +
rotation[2]*rotation[2] - rotation[3]*rotation[3]);
float m12 = 2.0f*rotation[2]*rotation[3] - 2.0f*rotation[0]*rotation[1];
float m20 = 2.0f*rotation[1]*rotation[3] - 2.0f*rotation[0]*rotation[2];
float m21 = 2.0f*rotation[2]*rotation[3] + 2.0f*rotation[0]*rotation[1];
float m22 = (rotation[0]*rotation[0] - rotation[1]*rotation[1] -
rotation[2]*rotation[2] + rotation[3]*rotation[3]);
float new_x, new_y, new_z;
for (int x = 0; x < image_x; x++) {
for (int y = threadIdx.x; y < image_y; y+=blockDim.x) {
if (slice[x*image_y+y] >= 0.) {
/* This is just a matrix multiplication with rotation */
new_x = (m00*coordinates_0[x*image_y+y] +
m01*coordinates_1[x*image_y+y] +
m02*coordinates_2[x*image_y+y] +
model_x/2.0 - 0.5);
new_y = (m10*coordinates_0[x*image_y+y] +
m11*coordinates_1[x*image_y+y] +
m12*coordinates_2[x*image_y+y] +
model_y/2.0 - 0.5);
new_z = (m20*coordinates_0[x*image_y+y] +
m21*coordinates_1[x*image_y+y] +
m22*coordinates_2[x*image_y+y] +
model_z/2.0 - 0.5);
if (interpolation == 0) {
device_model_set_nn(model, model_weights,
model_x, model_y, model_z,
new_x, new_y, new_z,
slice[x*image_y+y], slice_weight);
} else {
device_model_set(model, model_weights,
model_x, model_y, model_z,
new_x, new_y, new_z,
slice[x*image_y+y], slice_weight);
}
}
}
}
}
__global__ void kernel_insert_slices(float *const model,
float *const model_weights,
const int model_x,
const int model_y,
const int model_z,
const float *const slices,
const int image_x,
const int image_y,
const float *const slice_weights,
const float *const rotations,
const float *const coordinates,
const int interpolation) {
const int rotation_index = blockIdx.x;
device_insert_slice(model,
model_weights,
model_x,
model_y,
model_z,
&slices[image_x*image_y*rotation_index],
image_x,
image_y,
slice_weights[rotation_index],
&rotations[4*rotation_index],
coordinates,
interpolation);
}
void insert_slices(float *const model,
float *const model_weights,
const int model_x,
const int model_y,
const int model_z,
const float *const slices,
const int image_x,
const int image_y,
const float *const slice_weights,
const float *const rotations,
const int number_of_rotations,
const float *const coordinates,
const int interpolation)
{
int nblocks = number_of_rotations;
int nthreads = NTHREADS;
kernel_insert_slices<<<nblocks, nthreads>>>(model,
model_weights,
model_x,
model_y,
model_z,
slices,
image_x,
image_y,
slice_weights,
rotations,
coordinates,
interpolation);
cudaErrorCheck(cudaPeekAtLastError());
cudaErrorCheck(cudaDeviceSynchronize());
}
__device__ void device_insert_slice_partial(float *const model,
float *const model_weights,
const int model_x_tot,
const int model_x_min,
const int model_x_max,
const int model_y_tot,
const int model_y_min,
const int model_y_max,
const int model_z_tot,
const int model_z_min,
const int model_z_max,
const float *const slice,
const int image_x,
const int image_y,
const float slice_weight,
const float *const rotation,
const float *const coordinates,
const int interpolation)
{
const float *const coordinates_0 = &coordinates[0*image_x*image_y];
const float *const coordinates_1 = &coordinates[1*image_x*image_y];
const float *const coordinates_2 = &coordinates[2*image_x*image_y];
float m00 = (rotation[0]*rotation[0] + rotation[1]*rotation[1] -
rotation[2]*rotation[2] - rotation[3]*rotation[3]);
float m01 = 2.0f*rotation[1]*rotation[2] - 2.0f*rotation[0]*rotation[3];
float m02 = 2.0f*rotation[1]*rotation[3] + 2.0f*rotation[0]*rotation[2];
float m10 = 2.0f*rotation[1]*rotation[2] + 2.0f*rotation[0]*rotation[3];
float m11 = (rotation[0]*rotation[0] - rotation[1]*rotation[1] +
rotation[2]*rotation[2] - rotation[3]*rotation[3]);
float m12 = 2.0f*rotation[2]*rotation[3] - 2.0f*rotation[0]*rotation[1];
float m20 = 2.0f*rotation[1]*rotation[3] - 2.0f*rotation[0]*rotation[2];
float m21 = 2.0f*rotation[2]*rotation[3] + 2.0f*rotation[0]*rotation[1];
float m22 = (rotation[0]*rotation[0] - rotation[1]*rotation[1] -
rotation[2]*rotation[2] + rotation[3]*rotation[3]);
if (blockIdx.x == 0 && threadIdx.x == 0) {
printf("inside kernel\n");
/*
printf("%i %i %i\n", model_x_tot, model_x_min, model_x_max);
printf("%i %i %i\n", model_y_tot, model_y_min, model_y_max);
printf("%i %i %i\n", model_z_tot, model_z_min, model_z_max);
*/
printf("%i %i %i\n", model_x_max-model_x_min, model_y_max-model_y_min, model_z_max-model_z_min);
}
float new_x, new_y, new_z;
for (int x = 0; x < image_x; x++) {
for (int y = threadIdx.x; y < image_y; y+=blockDim.x) {
if (slice[x*image_y+y] >= 0.) {
/* This is just a matrix multiplication with rotation */
new_x = (m00*coordinates_0[x*image_y+y] +
m01*coordinates_1[x*image_y+y] +
m02*coordinates_2[x*image_y+y] +
model_x_tot/2.0 - 0.5);
new_y = (m10*coordinates_0[x*image_y+y] +
m11*coordinates_1[x*image_y+y] +
m12*coordinates_2[x*image_y+y] +
model_y_tot/2.0 - 0.5);
new_z = (m20*coordinates_0[x*image_y+y] +
m21*coordinates_1[x*image_y+y] +
m22*coordinates_2[x*image_y+y] +
model_z_tot/2.0 - 0.5);
if (blockIdx.x == 0 && x == 10 && y == 10) {
printf("%g %g %g\n", new_x, new_y, new_z);
printf("%g %g %g\n", new_x-(float)model_x_min, new_y-(float)model_y_min, new_z-(float)model_z_min);
}
if (interpolation == 0) {
device_model_set_nn(model,
model_weights,
model_x_max-model_x_min,
model_y_max-model_y_min,
model_z_max-model_z_min,
new_x-(float)model_x_min,
new_y-(float)model_y_min,
new_z-(float)model_z_min,
slice[x*image_y+y],
slice_weight);
} else {
device_model_set(model,
model_weights,
model_x_max-model_x_min,
model_y_max-model_y_min,
model_z_max-model_z_min,
new_x-(float)model_x_min,
new_y-(float)model_y_min,
new_z-(float)model_z_min,
slice[x*image_y+y],
slice_weight);
}
}
}
}
}
__global__ void kernel_insert_slices_partial(float *const model,
float *const model_weights,
const int model_x_tot,
const int model_x_min,
const int model_x_max,
const int model_y_tot,
const int model_y_min,
const int model_y_max,
const int model_z_tot,
const int model_z_min,
const int model_z_max,
const float *const slices,
const int image_x,
const int image_y,
const float *const slice_weights,
const float *const rotations,
const float *const coordinates,
const int interpolation) {
const int rotation_index = blockIdx.x;
device_insert_slice_partial(model,
model_weights,
model_x_tot,
model_x_min,
model_x_max,
model_y_tot,
model_y_min,
model_y_max,
model_z_tot,
model_z_min,
model_z_max,
&slices[image_x*image_y*rotation_index],
image_x,
image_y,
slice_weights[rotation_index],
&rotations[4*rotation_index],
coordinates,
interpolation);
}
void insert_slices_partial(float *const model,
float *const model_weights,
const int model_x_tot,
const int model_x_min,
const int model_x_max,
const int model_y_tot,
const int model_y_min,
const int model_y_max,
const int model_z_tot,
const int model_z_min,
const int model_z_max,
const float *const slices,
const int image_x,
const int image_y,
const float *const slice_weights,
const float *const rotations,
const int number_of_rotations,
const float *const coordinates,
const int interpolation)
{
int nblocks = number_of_rotations;
int nthreads = NTHREADS;
kernel_insert_slices_partial<<<nblocks, nthreads>>>(model,
model_weights,
model_x_tot,
model_x_min,
model_x_max,
model_y_tot,
model_y_min,
model_y_max,
model_z_tot,
model_z_min,
model_z_max,
slices,
image_x,
image_y,
slice_weights,
rotations,
coordinates,
interpolation);
cudaErrorCheck(cudaPeekAtLastError());
cudaErrorCheck(cudaDeviceSynchronize());
}
__device__ float device_compare_balanced(const float expected_value,
const float measured_value)
{
return pow(expected_value - measured_value, 2) / expected_value;
}
__global__ void kernel_rotate_model(const float *const model,
float *const rotated_model,
const int model_x,
const int model_y,
const int model_z,
const float *const rotation) {
__shared__ float rotation_matrix[9];
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (threadIdx.x == 0) {
rotation_matrix[0] = rotation[0]*rotation[0] + rotation[1]*rotation[1] - rotation[2]*rotation[2] - rotation[3]*rotation[3]; // 00
rotation_matrix[1] = 2.0f*rotation[1]*rotation[2] - 2.0f*rotation[0]*rotation[3]; // 01
rotation_matrix[2] = 2.0f*rotation[1]*rotation[3] + 2.0f*rotation[0]*rotation[2]; // 02
rotation_matrix[3] = 2.0f*rotation[1]*rotation[2] + 2.0f*rotation[0]*rotation[3]; // 10
rotation_matrix[4] = rotation[0]*rotation[0] - rotation[1]*rotation[1] + rotation[2]*rotation[2] - rotation[3]*rotation[3]; // 11
rotation_matrix[5] = 2.0f*rotation[2]*rotation[3] - 2.0f*rotation[0]*rotation[1]; // 12
rotation_matrix[6] = 2.0f*rotation[1]*rotation[3] - 2.0f*rotation[0]*rotation[2]; // 20
rotation_matrix[7] = 2.0f*rotation[2]*rotation[3] + 2.0f*rotation[0]*rotation[1]; // 21
rotation_matrix[8] = rotation[0]*rotation[0] - rotation[1]*rotation[1] - rotation[2]*rotation[2] + rotation[3]*rotation[3]; // 22
}
__syncthreads();
if (index < model_x*model_y*model_z) {
float start_x = ((float) ((index % (model_x*model_y)) % model_x)) - model_x/2. + 0.5;
float start_y = ((float) ((index / model_x) % model_y)) - model_y/2. + 0.5;
float start_z = ((float) (index / (model_x*model_y))) - model_z/2. + 0.5;
/*
float start_x = ((float) ((index % (model_x*model_y)) % model_x));
float start_y = ((float) ((index / model_x) % model_y));
float start_z = ((float) (index / (model_x*model_y)));
*/
float new_x, new_y, new_z;
/* This is just a matrix multiplication with rotation */
new_x = model_x/2. - 0.5 + (rotation_matrix[0]*start_x +
rotation_matrix[1]*start_y +
rotation_matrix[2]*start_z);
new_y = model_y/2. - 0.5 + (rotation_matrix[3]*start_x +
rotation_matrix[4]*start_y +
rotation_matrix[5]*start_z);
new_z = model_z/2. - 0.5 + (rotation_matrix[6]*start_x +
rotation_matrix[7]*start_y +
rotation_matrix[8]*start_z);
rotated_model[index] = device_model_get(model,
model_x, model_y, model_z,
new_x, new_y, new_z);
}
}
void rotate_model(const float *const model,
float *const rotated_model,
const int model_x,
const int model_y,
const int model_z,
const float *const rotation)
{
const int nthreads = NTHREADS;
const int nblocks = (model_x*model_y*model_z-1) / nthreads + 1;
kernel_rotate_model<<<nblocks, nthreads>>>(model,
rotated_model,
model_x,
model_y,
model_z,
rotation);
cudaErrorCheck(cudaPeekAtLastError());
cudaErrorCheck(cudaDeviceSynchronize());
}
|
84861c45d98dfb6690c6d8e543939bdba594de32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE)
#ifndef ENABLE_SHUFFLE
typedef struct {
real x, y, z;
real q;
real fx, fy, fz;
ATOM_PARAMETER_DATA
#ifndef PARAMETER_SIZE_IS_EVEN
real padding;
#endif
} AtomData;
#endif
#ifdef ENABLE_SHUFFLE
//support for 64 bit shuffles
static __inline__ __device__ float real_shfl(float var, int srcLane) {
return SHFL(var, srcLane);
}
static __inline__ __device__ float real_shfl(int var, int srcLane) {
return SHFL(var, srcLane);
}
static __inline__ __device__ double real_shfl(double var, int srcLane) {
int hi, lo;
asm volatile("mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
hi = SHFL(hi, srcLane);
lo = SHFL(lo, srcLane);
return __hiloint2double( hi, lo );
}
static __inline__ __device__ long long real_shfl(long long var, int srcLane) {
int hi, lo;
asm volatile("mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
hi = SHFL(hi, srcLane);
lo = SHFL(lo, srcLane);
// unforunately there isn't an __nv_hiloint2long(hi,lo) intrinsic cast
int2 fuse; fuse.x = lo; fuse.y = hi;
return *reinterpret_cast<long long*>(&fuse);
}
#endif
/**
* Compute nonbonded interactions. The kernel is separated into two parts,
* tiles with exclusions and tiles without exclusions. It relies heavily on
* implicit warp-level synchronization. A tile is defined by two atom blocks
* each of warpsize. Each warp computes a range of tiles.
*
* Tiles with exclusions compute the entire set of interactions across
* atom blocks, equal to warpsize*warpsize. In order to avoid access conflicts
* the forces are computed and accumulated diagonally in the manner shown below
* where, suppose
*
* [a-h] comprise atom block 1, [i-p] comprise atom block 2
*
* 1 denotes the first set of calculations within the warp
* 2 denotes the second set of calculations within the warp
* ... etc.
*
* threads
* 0 1 2 3 4 5 6 7
* atom1
* L a b c d e f g h
* o i 1 2 3 4 5 6 7 8
* c j 8 1 2 3 4 5 6 7
* a k 7 8 1 2 3 4 5 6
* l l 6 7 8 1 2 3 4 5
* D m 5 6 7 8 1 2 3 4
* a n 4 5 6 7 8 1 2 3
* t o 3 4 5 6 7 8 1 2
* a p 2 3 4 5 6 7 8 1
*
* Tiles without exclusions read off directly from the neighbourlist interactingAtoms
* and follows the same force accumulation method. If more there are more interactingTiles
* than the size of the neighbourlist initially allocated, the neighbourlist is rebuilt
* and the full tileset is computed. This should happen on the first step, and very rarely
* afterwards.
*
* On CUDA devices that support the shuffle intrinsic, on diagonal exclusion tiles use
* __shfl to broadcast. For all other types of tiles __shfl is used to pass around the
* forces, positions, and parameters when computing the forces.
*
* [out]forceBuffers - forces on each atom to eventually be accumulated
* [out]energyBuffer - energyBuffer to eventually be accumulated
* [in]posq - x,y,z,charge
* [in]exclusions - 1024-bit flags denoting atom-atom exclusions for each tile
* [in]exclusionTiles - x,y denotes the indices of tiles that have an exclusion
* [in]startTileIndex - index into first tile to be processed
* [in]numTileIndices - number of tiles this context is responsible for processing
* [in]int tiles - the atom block for each tile
* [in]interactionCount - total number of tiles that have an interaction
* [in]maxTiles - stores the size of the neighbourlist in case it needs
* - to be expanded
* [in]periodicBoxSize - size of the Periodic Box, last dimension (w) not used
* [in]invPeriodicBox - inverse of the periodicBoxSize, pre-computed for speed
* [in]blockCenter - the center of each block in euclidean coordinates
* [in]blockSize - size of the each block, radiating from the center
* - x is half the distance of total length
* - y is half the distance of total width
* - z is half the distance of total height
* - w is not used
* [in]interactingAtoms - a list of interactions within a given tile
*
*/
extern "C" __global__ void computeNonbonded(
unsigned long long* __restrict__ forceBuffers, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, const tileflags* __restrict__ exclusions,
const ushort2* __restrict__ exclusionTiles, unsigned int startTileIndex, unsigned int numTileIndices
#ifdef USE_CUTOFF
, const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms, unsigned int maxSinglePairs,
const int2* __restrict__ singlePairs
#endif
PARAMETER_ARGUMENTS) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; // global warpIndex
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); // index within the warp
const unsigned int tbx = threadIdx.x - tgx; // block warpIndex
mixed energy = 0;
INIT_DERIVATIVES
// used shared memory if the device cannot shuffle
#ifndef ENABLE_SHUFFLE
__shared__ AtomData localData[THREAD_BLOCK_SIZE];
#endif
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
real3 force = make_real3(0);
unsigned int atom1 = x*TILE_SIZE + tgx;
real4 posq1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
#ifdef USE_EXCLUSIONS
tileflags excl = exclusions[pos*TILE_SIZE+tgx];
#endif
const bool hasExclusions = true;
if (x == y) {
// This tile is on the diagonal.
#ifdef ENABLE_SHUFFLE
real4 shflPosq = posq1;
#else
localData[threadIdx.x].x = posq1.x;
localData[threadIdx.x].y = posq1.y;
localData[threadIdx.x].z = posq1.z;
localData[threadIdx.x].q = posq1.w;
LOAD_LOCAL_PARAMETERS_FROM_1
#endif
// we do not need to fetch parameters from global since this is a symmetric tile
// instead we can broadcast the values using shuffle
for (unsigned int j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+j;
real4 posq2;
#ifdef ENABLE_SHUFFLE
BROADCAST_WARP_DATA
#else
posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = y*TILE_SIZE+j;
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS || !(excl & 0x1));
#endif
real tempEnergy = 0.0f;
const real interactionScale = 0.5f;
COMPUTE_INTERACTION
energy += 0.5f*tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
force.x -= delta.x*dEdR;
force.y -= delta.y*dEdR;
force.z -= delta.z*dEdR;
#else
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#endif
#endif
#ifdef USE_EXCLUSIONS
excl >>= 1;
#endif
}
}
else {
// This is an off-diagonal tile.
unsigned int j = y*TILE_SIZE + tgx;
real4 shflPosq = posq[j];
#ifdef ENABLE_SHUFFLE
real3 shflForce;
shflForce.x = 0.0f;
shflForce.y = 0.0f;
shflForce.z = 0.0f;
#else
localData[threadIdx.x].x = shflPosq.x;
localData[threadIdx.x].y = shflPosq.y;
localData[threadIdx.x].z = shflPosq.z;
localData[threadIdx.x].q = shflPosq.w;
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
#endif
DECLARE_LOCAL_PARAMETERS
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
#ifdef USE_EXCLUSIONS
excl = (excl >> tgx) | (excl << (TILE_SIZE - tgx));
#endif
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
#ifdef ENABLE_SHUFFLE
real4 posq2 = shflPosq;
#else
real4 posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = y*TILE_SIZE+tj;
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS || !(excl & 0x1));
#endif
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += delta.x;
shflForce.y += delta.y;
shflForce.z += delta.z;
#else
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
#endif
#else // !USE_SYMMETRIC
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += dEdR2.x;
shflForce.y += dEdR2.y;
shflForce.z += dEdR2.z;
#else
localData[tbx+tj].fx += dEdR2.x;
localData[tbx+tj].fy += dEdR2.y;
localData[tbx+tj].fz += dEdR2.z;
#endif
#endif // end USE_SYMMETRIC
#endif
#ifdef ENABLE_SHUFFLE
SHUFFLE_WARP_DATA
#endif
#ifdef USE_EXCLUSIONS
excl >>= 1;
#endif
// cycles the indices
// 0 1 2 3 4 5 6 7 -> 1 2 3 4 5 6 7 0
tj = (tj + 1) & (TILE_SIZE - 1);
}
const unsigned int offset = y*TILE_SIZE + tgx;
// write results for off diagonal tiles
#ifdef INCLUDE_FORCES
#ifdef ENABLE_SHUFFLE
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (shflForce.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.z*0x100000000)));
#else
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
#endif
#endif
}
// Write results for on and off diagonal tiles
#ifdef INCLUDE_FORCES
const unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
#endif
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
const unsigned int numTiles = interactionCount[0];
if (numTiles > maxTiles)
return; // There wasn't enough memory for the neighbor list.
int pos = (int) (numTiles > maxTiles ? startTileIndex+warp*(long long)numTileIndices/totalWarps : warp*(long long)numTiles/totalWarps);
int end = (int) (numTiles > maxTiles ? startTileIndex+(warp+1)*(long long)numTileIndices/totalWarps : (warp+1)*(long long)numTiles/totalWarps);
#else
const unsigned int numTiles = numTileIndices;
int pos = (int) (startTileIndex+warp*(long long)numTiles/totalWarps);
int end = (int) (startTileIndex+(warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
// atomIndices can probably be shuffled as well
// but it probably wouldn't make things any faster
__shared__ int atomIndices[THREAD_BLOCK_SIZE];
__shared__ volatile int skipTiles[THREAD_BLOCK_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
const bool hasExclusions = false;
real3 force = make_real3(0);
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
bool singlePeriodicCopy = false;
#ifdef USE_CUTOFF
x = tiles[pos];
real4 blockSizeX = blockSize[x];
singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= MAX_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= MAX_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= MAX_CUTOFF);
#else
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
#endif
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
real4 posq1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
//const unsigned int localAtomIndex = threadIdx.x;
#ifdef USE_CUTOFF
unsigned int j = interactingAtoms[pos*TILE_SIZE+tgx];
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
#ifdef ENABLE_SHUFFLE
DECLARE_LOCAL_PARAMETERS
real4 shflPosq;
real3 shflForce;
shflForce.x = 0.0f;
shflForce.y = 0.0f;
shflForce.z = 0.0f;
#endif
if (j < PADDED_NUM_ATOMS) {
// Load position of atom j from from global memory
#ifdef ENABLE_SHUFFLE
shflPosq = posq[j];
#else
localData[threadIdx.x].x = posq[j].x;
localData[threadIdx.x].y = posq[j].y;
localData[threadIdx.x].z = posq[j].z;
localData[threadIdx.x].q = posq[j].w;
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
#endif
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
}
else {
#ifdef ENABLE_SHUFFLE
shflPosq = make_real4(0, 0, 0, 0);
#else
localData[threadIdx.x].x = 0;
localData[threadIdx.x].y = 0;
localData[threadIdx.x].z = 0;
#endif
}
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
real4 blockCenterX = blockCenter[x];
APPLY_PERIODIC_TO_POS_WITH_CENTER(posq1, blockCenterX)
#ifdef ENABLE_SHUFFLE
APPLY_PERIODIC_TO_POS_WITH_CENTER(shflPosq, blockCenterX)
#else
APPLY_PERIODIC_TO_POS_WITH_CENTER(localData[threadIdx.x], blockCenterX)
#endif
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
#ifdef ENABLE_SHUFFLE
real4 posq2 = shflPosq;
#else
real4 posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = atomIndices[tbx+tj];
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS);
#endif
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += delta.x;
shflForce.y += delta.y;
shflForce.z += delta.z;
#else
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
#endif
#else // !USE_SYMMETRIC
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += dEdR2.x;
shflForce.y += dEdR2.y;
shflForce.z += dEdR2.z;
#else
localData[tbx+tj].fx += dEdR2.x;
localData[tbx+tj].fy += dEdR2.y;
localData[tbx+tj].fz += dEdR2.z;
#endif
#endif // end USE_SYMMETRIC
#endif
#ifdef ENABLE_SHUFFLE
SHUFFLE_WARP_DATA
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
else
#endif
{
// We need to apply periodic boundary conditions separately for each interaction.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
#ifdef ENABLE_SHUFFLE
real4 posq2 = shflPosq;
#else
real4 posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = atomIndices[tbx+tj];
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS);
#endif
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += delta.x;
shflForce.y += delta.y;
shflForce.z += delta.z;
#else
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
#endif
#else // !USE_SYMMETRIC
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += dEdR2.x;
shflForce.y += dEdR2.y;
shflForce.z += dEdR2.z;
#else
localData[tbx+tj].fx += dEdR2.x;
localData[tbx+tj].fy += dEdR2.y;
localData[tbx+tj].fz += dEdR2.z;
#endif
#endif // end USE_SYMMETRIC
#endif
#ifdef ENABLE_SHUFFLE
SHUFFLE_WARP_DATA
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
#ifdef INCLUDE_FORCES
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
#ifdef USE_CUTOFF
unsigned int atom2 = atomIndices[threadIdx.x];
#else
unsigned int atom2 = y*TILE_SIZE + tgx;
#endif
if (atom2 < PADDED_NUM_ATOMS) {
#ifdef ENABLE_SHUFFLE
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (shflForce.x*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.y*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.z*0x100000000)));
#else
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
#endif
}
#endif
}
pos++;
}
// Third loop: single pairs that aren't part of a tile.
#if USE_CUTOFF
const unsigned int numPairs = interactionCount[1];
if (numPairs > maxSinglePairs)
return; // There wasn't enough memory for the neighbor list.
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < numPairs; i += blockDim.x*gridDim.x) {
int2 pair = singlePairs[i];
int atom1 = pair.x;
int atom2 = pair.y;
real4 posq1 = posq[atom1];
real4 posq2 = posq[atom2];
LOAD_ATOM1_PARAMETERS
int j = atom2;
atom2 = threadIdx.x;
DECLARE_LOCAL_PARAMETERS
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
LOAD_ATOM2_PARAMETERS
atom2 = pair.y;
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
bool hasExclusions = false;
bool isExcluded = false;
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
real3 dEdR1 = delta*dEdR;
real3 dEdR2 = -dEdR1;
#endif
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (-dEdR1.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR1.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR1.z*0x100000000)));
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (-dEdR2.x*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR2.y*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR2.z*0x100000000)));
#endif
}
#endif
#ifdef INCLUDE_ENERGY
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
#endif
SAVE_DERIVATIVES
} | 84861c45d98dfb6690c6d8e543939bdba594de32.cu | #define WARPS_PER_GROUP (THREAD_BLOCK_SIZE/TILE_SIZE)
#ifndef ENABLE_SHUFFLE
typedef struct {
real x, y, z;
real q;
real fx, fy, fz;
ATOM_PARAMETER_DATA
#ifndef PARAMETER_SIZE_IS_EVEN
real padding;
#endif
} AtomData;
#endif
#ifdef ENABLE_SHUFFLE
//support for 64 bit shuffles
static __inline__ __device__ float real_shfl(float var, int srcLane) {
return SHFL(var, srcLane);
}
static __inline__ __device__ float real_shfl(int var, int srcLane) {
return SHFL(var, srcLane);
}
static __inline__ __device__ double real_shfl(double var, int srcLane) {
int hi, lo;
asm volatile("mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "d"(var));
hi = SHFL(hi, srcLane);
lo = SHFL(lo, srcLane);
return __hiloint2double( hi, lo );
}
static __inline__ __device__ long long real_shfl(long long var, int srcLane) {
int hi, lo;
asm volatile("mov.b64 { %0, %1 }, %2;" : "=r"(lo), "=r"(hi) : "l"(var));
hi = SHFL(hi, srcLane);
lo = SHFL(lo, srcLane);
// unforunately there isn't an __nv_hiloint2long(hi,lo) intrinsic cast
int2 fuse; fuse.x = lo; fuse.y = hi;
return *reinterpret_cast<long long*>(&fuse);
}
#endif
/**
* Compute nonbonded interactions. The kernel is separated into two parts,
* tiles with exclusions and tiles without exclusions. It relies heavily on
* implicit warp-level synchronization. A tile is defined by two atom blocks
* each of warpsize. Each warp computes a range of tiles.
*
* Tiles with exclusions compute the entire set of interactions across
* atom blocks, equal to warpsize*warpsize. In order to avoid access conflicts
* the forces are computed and accumulated diagonally in the manner shown below
* where, suppose
*
* [a-h] comprise atom block 1, [i-p] comprise atom block 2
*
* 1 denotes the first set of calculations within the warp
* 2 denotes the second set of calculations within the warp
* ... etc.
*
* threads
* 0 1 2 3 4 5 6 7
* atom1
* L a b c d e f g h
* o i 1 2 3 4 5 6 7 8
* c j 8 1 2 3 4 5 6 7
* a k 7 8 1 2 3 4 5 6
* l l 6 7 8 1 2 3 4 5
* D m 5 6 7 8 1 2 3 4
* a n 4 5 6 7 8 1 2 3
* t o 3 4 5 6 7 8 1 2
* a p 2 3 4 5 6 7 8 1
*
* Tiles without exclusions read off directly from the neighbourlist interactingAtoms
* and follows the same force accumulation method. If more there are more interactingTiles
* than the size of the neighbourlist initially allocated, the neighbourlist is rebuilt
* and the full tileset is computed. This should happen on the first step, and very rarely
* afterwards.
*
* On CUDA devices that support the shuffle intrinsic, on diagonal exclusion tiles use
* __shfl to broadcast. For all other types of tiles __shfl is used to pass around the
* forces, positions, and parameters when computing the forces.
*
* [out]forceBuffers - forces on each atom to eventually be accumulated
* [out]energyBuffer - energyBuffer to eventually be accumulated
* [in]posq - x,y,z,charge
* [in]exclusions - 1024-bit flags denoting atom-atom exclusions for each tile
* [in]exclusionTiles - x,y denotes the indices of tiles that have an exclusion
* [in]startTileIndex - index into first tile to be processed
* [in]numTileIndices - number of tiles this context is responsible for processing
* [in]int tiles - the atom block for each tile
* [in]interactionCount - total number of tiles that have an interaction
* [in]maxTiles - stores the size of the neighbourlist in case it needs
* - to be expanded
* [in]periodicBoxSize - size of the Periodic Box, last dimension (w) not used
* [in]invPeriodicBox - inverse of the periodicBoxSize, pre-computed for speed
* [in]blockCenter - the center of each block in euclidean coordinates
* [in]blockSize - size of the each block, radiating from the center
* - x is half the distance of total length
* - y is half the distance of total width
* - z is half the distance of total height
* - w is not used
* [in]interactingAtoms - a list of interactions within a given tile
*
*/
extern "C" __global__ void computeNonbonded(
unsigned long long* __restrict__ forceBuffers, mixed* __restrict__ energyBuffer, const real4* __restrict__ posq, const tileflags* __restrict__ exclusions,
const ushort2* __restrict__ exclusionTiles, unsigned int startTileIndex, unsigned int numTileIndices
#ifdef USE_CUTOFF
, const int* __restrict__ tiles, const unsigned int* __restrict__ interactionCount, real4 periodicBoxSize, real4 invPeriodicBoxSize,
real4 periodicBoxVecX, real4 periodicBoxVecY, real4 periodicBoxVecZ, unsigned int maxTiles, const real4* __restrict__ blockCenter,
const real4* __restrict__ blockSize, const unsigned int* __restrict__ interactingAtoms, unsigned int maxSinglePairs,
const int2* __restrict__ singlePairs
#endif
PARAMETER_ARGUMENTS) {
const unsigned int totalWarps = (blockDim.x*gridDim.x)/TILE_SIZE;
const unsigned int warp = (blockIdx.x*blockDim.x+threadIdx.x)/TILE_SIZE; // global warpIndex
const unsigned int tgx = threadIdx.x & (TILE_SIZE-1); // index within the warp
const unsigned int tbx = threadIdx.x - tgx; // block warpIndex
mixed energy = 0;
INIT_DERIVATIVES
// used shared memory if the device cannot shuffle
#ifndef ENABLE_SHUFFLE
__shared__ AtomData localData[THREAD_BLOCK_SIZE];
#endif
// First loop: process tiles that contain exclusions.
const unsigned int firstExclusionTile = FIRST_EXCLUSION_TILE+warp*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
const unsigned int lastExclusionTile = FIRST_EXCLUSION_TILE+(warp+1)*(LAST_EXCLUSION_TILE-FIRST_EXCLUSION_TILE)/totalWarps;
for (int pos = firstExclusionTile; pos < lastExclusionTile; pos++) {
const ushort2 tileIndices = exclusionTiles[pos];
const unsigned int x = tileIndices.x;
const unsigned int y = tileIndices.y;
real3 force = make_real3(0);
unsigned int atom1 = x*TILE_SIZE + tgx;
real4 posq1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
#ifdef USE_EXCLUSIONS
tileflags excl = exclusions[pos*TILE_SIZE+tgx];
#endif
const bool hasExclusions = true;
if (x == y) {
// This tile is on the diagonal.
#ifdef ENABLE_SHUFFLE
real4 shflPosq = posq1;
#else
localData[threadIdx.x].x = posq1.x;
localData[threadIdx.x].y = posq1.y;
localData[threadIdx.x].z = posq1.z;
localData[threadIdx.x].q = posq1.w;
LOAD_LOCAL_PARAMETERS_FROM_1
#endif
// we do not need to fetch parameters from global since this is a symmetric tile
// instead we can broadcast the values using shuffle
for (unsigned int j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+j;
real4 posq2;
#ifdef ENABLE_SHUFFLE
BROADCAST_WARP_DATA
#else
posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = y*TILE_SIZE+j;
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS || !(excl & 0x1));
#endif
real tempEnergy = 0.0f;
const real interactionScale = 0.5f;
COMPUTE_INTERACTION
energy += 0.5f*tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
force.x -= delta.x*dEdR;
force.y -= delta.y*dEdR;
force.z -= delta.z*dEdR;
#else
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#endif
#endif
#ifdef USE_EXCLUSIONS
excl >>= 1;
#endif
}
}
else {
// This is an off-diagonal tile.
unsigned int j = y*TILE_SIZE + tgx;
real4 shflPosq = posq[j];
#ifdef ENABLE_SHUFFLE
real3 shflForce;
shflForce.x = 0.0f;
shflForce.y = 0.0f;
shflForce.z = 0.0f;
#else
localData[threadIdx.x].x = shflPosq.x;
localData[threadIdx.x].y = shflPosq.y;
localData[threadIdx.x].z = shflPosq.z;
localData[threadIdx.x].q = shflPosq.w;
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
#endif
DECLARE_LOCAL_PARAMETERS
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
#ifdef USE_EXCLUSIONS
excl = (excl >> tgx) | (excl << (TILE_SIZE - tgx));
#endif
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
#ifdef ENABLE_SHUFFLE
real4 posq2 = shflPosq;
#else
real4 posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = y*TILE_SIZE+tj;
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS || !(excl & 0x1));
#endif
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += delta.x;
shflForce.y += delta.y;
shflForce.z += delta.z;
#else
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
#endif
#else // !USE_SYMMETRIC
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += dEdR2.x;
shflForce.y += dEdR2.y;
shflForce.z += dEdR2.z;
#else
localData[tbx+tj].fx += dEdR2.x;
localData[tbx+tj].fy += dEdR2.y;
localData[tbx+tj].fz += dEdR2.z;
#endif
#endif // end USE_SYMMETRIC
#endif
#ifdef ENABLE_SHUFFLE
SHUFFLE_WARP_DATA
#endif
#ifdef USE_EXCLUSIONS
excl >>= 1;
#endif
// cycles the indices
// 0 1 2 3 4 5 6 7 -> 1 2 3 4 5 6 7 0
tj = (tj + 1) & (TILE_SIZE - 1);
}
const unsigned int offset = y*TILE_SIZE + tgx;
// write results for off diagonal tiles
#ifdef INCLUDE_FORCES
#ifdef ENABLE_SHUFFLE
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (shflForce.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.z*0x100000000)));
#else
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
#endif
#endif
}
// Write results for on and off diagonal tiles
#ifdef INCLUDE_FORCES
const unsigned int offset = x*TILE_SIZE + tgx;
atomicAdd(&forceBuffers[offset], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[offset+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[offset+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
#endif
}
// Second loop: tiles without exclusions, either from the neighbor list (with cutoff) or just enumerating all
// of them (no cutoff).
#ifdef USE_CUTOFF
const unsigned int numTiles = interactionCount[0];
if (numTiles > maxTiles)
return; // There wasn't enough memory for the neighbor list.
int pos = (int) (numTiles > maxTiles ? startTileIndex+warp*(long long)numTileIndices/totalWarps : warp*(long long)numTiles/totalWarps);
int end = (int) (numTiles > maxTiles ? startTileIndex+(warp+1)*(long long)numTileIndices/totalWarps : (warp+1)*(long long)numTiles/totalWarps);
#else
const unsigned int numTiles = numTileIndices;
int pos = (int) (startTileIndex+warp*(long long)numTiles/totalWarps);
int end = (int) (startTileIndex+(warp+1)*(long long)numTiles/totalWarps);
#endif
int skipBase = 0;
int currentSkipIndex = tbx;
// atomIndices can probably be shuffled as well
// but it probably wouldn't make things any faster
__shared__ int atomIndices[THREAD_BLOCK_SIZE];
__shared__ volatile int skipTiles[THREAD_BLOCK_SIZE];
skipTiles[threadIdx.x] = -1;
while (pos < end) {
const bool hasExclusions = false;
real3 force = make_real3(0);
bool includeTile = true;
// Extract the coordinates of this tile.
int x, y;
bool singlePeriodicCopy = false;
#ifdef USE_CUTOFF
x = tiles[pos];
real4 blockSizeX = blockSize[x];
singlePeriodicCopy = (0.5f*periodicBoxSize.x-blockSizeX.x >= MAX_CUTOFF &&
0.5f*periodicBoxSize.y-blockSizeX.y >= MAX_CUTOFF &&
0.5f*periodicBoxSize.z-blockSizeX.z >= MAX_CUTOFF);
#else
y = (int) floor(NUM_BLOCKS+0.5f-SQRT((NUM_BLOCKS+0.5f)*(NUM_BLOCKS+0.5f)-2*pos));
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
if (x < y || x >= NUM_BLOCKS) { // Occasionally happens due to roundoff error.
y += (x < y ? -1 : 1);
x = (pos-y*NUM_BLOCKS+y*(y+1)/2);
}
// Skip over tiles that have exclusions, since they were already processed.
while (skipTiles[tbx+TILE_SIZE-1] < pos) {
if (skipBase+tgx < NUM_TILES_WITH_EXCLUSIONS) {
ushort2 tile = exclusionTiles[skipBase+tgx];
skipTiles[threadIdx.x] = tile.x + tile.y*NUM_BLOCKS - tile.y*(tile.y+1)/2;
}
else
skipTiles[threadIdx.x] = end;
skipBase += TILE_SIZE;
currentSkipIndex = tbx;
}
while (skipTiles[currentSkipIndex] < pos)
currentSkipIndex++;
includeTile = (skipTiles[currentSkipIndex] != pos);
#endif
if (includeTile) {
unsigned int atom1 = x*TILE_SIZE + tgx;
// Load atom data for this tile.
real4 posq1 = posq[atom1];
LOAD_ATOM1_PARAMETERS
//const unsigned int localAtomIndex = threadIdx.x;
#ifdef USE_CUTOFF
unsigned int j = interactingAtoms[pos*TILE_SIZE+tgx];
#else
unsigned int j = y*TILE_SIZE + tgx;
#endif
atomIndices[threadIdx.x] = j;
#ifdef ENABLE_SHUFFLE
DECLARE_LOCAL_PARAMETERS
real4 shflPosq;
real3 shflForce;
shflForce.x = 0.0f;
shflForce.y = 0.0f;
shflForce.z = 0.0f;
#endif
if (j < PADDED_NUM_ATOMS) {
// Load position of atom j from from global memory
#ifdef ENABLE_SHUFFLE
shflPosq = posq[j];
#else
localData[threadIdx.x].x = posq[j].x;
localData[threadIdx.x].y = posq[j].y;
localData[threadIdx.x].z = posq[j].z;
localData[threadIdx.x].q = posq[j].w;
localData[threadIdx.x].fx = 0.0f;
localData[threadIdx.x].fy = 0.0f;
localData[threadIdx.x].fz = 0.0f;
#endif
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
}
else {
#ifdef ENABLE_SHUFFLE
shflPosq = make_real4(0, 0, 0, 0);
#else
localData[threadIdx.x].x = 0;
localData[threadIdx.x].y = 0;
localData[threadIdx.x].z = 0;
#endif
}
#ifdef USE_PERIODIC
if (singlePeriodicCopy) {
// The box is small enough that we can just translate all the atoms into a single periodic
// box, then skip having to apply periodic boundary conditions later.
real4 blockCenterX = blockCenter[x];
APPLY_PERIODIC_TO_POS_WITH_CENTER(posq1, blockCenterX)
#ifdef ENABLE_SHUFFLE
APPLY_PERIODIC_TO_POS_WITH_CENTER(shflPosq, blockCenterX)
#else
APPLY_PERIODIC_TO_POS_WITH_CENTER(localData[threadIdx.x], blockCenterX)
#endif
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
#ifdef ENABLE_SHUFFLE
real4 posq2 = shflPosq;
#else
real4 posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = atomIndices[tbx+tj];
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS);
#endif
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += delta.x;
shflForce.y += delta.y;
shflForce.z += delta.z;
#else
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
#endif
#else // !USE_SYMMETRIC
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += dEdR2.x;
shflForce.y += dEdR2.y;
shflForce.z += dEdR2.z;
#else
localData[tbx+tj].fx += dEdR2.x;
localData[tbx+tj].fy += dEdR2.y;
localData[tbx+tj].fz += dEdR2.z;
#endif
#endif // end USE_SYMMETRIC
#endif
#ifdef ENABLE_SHUFFLE
SHUFFLE_WARP_DATA
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
else
#endif
{
// We need to apply periodic boundary conditions separately for each interaction.
unsigned int tj = tgx;
for (j = 0; j < TILE_SIZE; j++) {
int atom2 = tbx+tj;
#ifdef ENABLE_SHUFFLE
real4 posq2 = shflPosq;
#else
real4 posq2 = make_real4(localData[atom2].x, localData[atom2].y, localData[atom2].z, localData[atom2].q);
#endif
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
LOAD_ATOM2_PARAMETERS
atom2 = atomIndices[tbx+tj];
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
#ifdef USE_EXCLUSIONS
bool isExcluded = (atom1 >= NUM_ATOMS || atom2 >= NUM_ATOMS);
#endif
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
delta *= dEdR;
force.x -= delta.x;
force.y -= delta.y;
force.z -= delta.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += delta.x;
shflForce.y += delta.y;
shflForce.z += delta.z;
#else
localData[tbx+tj].fx += delta.x;
localData[tbx+tj].fy += delta.y;
localData[tbx+tj].fz += delta.z;
#endif
#else // !USE_SYMMETRIC
force.x -= dEdR1.x;
force.y -= dEdR1.y;
force.z -= dEdR1.z;
#ifdef ENABLE_SHUFFLE
shflForce.x += dEdR2.x;
shflForce.y += dEdR2.y;
shflForce.z += dEdR2.z;
#else
localData[tbx+tj].fx += dEdR2.x;
localData[tbx+tj].fy += dEdR2.y;
localData[tbx+tj].fz += dEdR2.z;
#endif
#endif // end USE_SYMMETRIC
#endif
#ifdef ENABLE_SHUFFLE
SHUFFLE_WARP_DATA
#endif
tj = (tj + 1) & (TILE_SIZE - 1);
}
}
// Write results.
#ifdef INCLUDE_FORCES
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
#ifdef USE_CUTOFF
unsigned int atom2 = atomIndices[threadIdx.x];
#else
unsigned int atom2 = y*TILE_SIZE + tgx;
#endif
if (atom2 < PADDED_NUM_ATOMS) {
#ifdef ENABLE_SHUFFLE
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (shflForce.x*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.y*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (shflForce.z*0x100000000)));
#else
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fx*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fy*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (localData[threadIdx.x].fz*0x100000000)));
#endif
}
#endif
}
pos++;
}
// Third loop: single pairs that aren't part of a tile.
#if USE_CUTOFF
const unsigned int numPairs = interactionCount[1];
if (numPairs > maxSinglePairs)
return; // There wasn't enough memory for the neighbor list.
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < numPairs; i += blockDim.x*gridDim.x) {
int2 pair = singlePairs[i];
int atom1 = pair.x;
int atom2 = pair.y;
real4 posq1 = posq[atom1];
real4 posq2 = posq[atom2];
LOAD_ATOM1_PARAMETERS
int j = atom2;
atom2 = threadIdx.x;
DECLARE_LOCAL_PARAMETERS
LOAD_LOCAL_PARAMETERS_FROM_GLOBAL
LOAD_ATOM2_PARAMETERS
atom2 = pair.y;
real3 delta = make_real3(posq2.x-posq1.x, posq2.y-posq1.y, posq2.z-posq1.z);
#ifdef USE_PERIODIC
APPLY_PERIODIC_TO_DELTA(delta)
#endif
real r2 = delta.x*delta.x + delta.y*delta.y + delta.z*delta.z;
real invR = RSQRT(r2);
real r = r2*invR;
#ifdef USE_SYMMETRIC
real dEdR = 0.0f;
#else
real3 dEdR1 = make_real3(0);
real3 dEdR2 = make_real3(0);
#endif
bool hasExclusions = false;
bool isExcluded = false;
real tempEnergy = 0.0f;
const real interactionScale = 1.0f;
COMPUTE_INTERACTION
energy += tempEnergy;
#ifdef INCLUDE_FORCES
#ifdef USE_SYMMETRIC
real3 dEdR1 = delta*dEdR;
real3 dEdR2 = -dEdR1;
#endif
atomicAdd(&forceBuffers[atom1], static_cast<unsigned long long>((long long) (-dEdR1.x*0x100000000)));
atomicAdd(&forceBuffers[atom1+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR1.y*0x100000000)));
atomicAdd(&forceBuffers[atom1+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR1.z*0x100000000)));
atomicAdd(&forceBuffers[atom2], static_cast<unsigned long long>((long long) (-dEdR2.x*0x100000000)));
atomicAdd(&forceBuffers[atom2+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR2.y*0x100000000)));
atomicAdd(&forceBuffers[atom2+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (-dEdR2.z*0x100000000)));
#endif
}
#endif
#ifdef INCLUDE_ENERGY
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
#endif
SAVE_DERIVATIVES
} |
b85c7205891e476680658f4a550eb0c1663296e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//double* x, * devx, * val, * gra, * r, * graMax;
//double* hes_value;
////int size;
//int* pos_x, * pos_y;
//int* csr;
double* x;
//thrust::pair<int, int> *device_pos;
//typedef double (*fp)(double);
//typedef void (*val_fp)(double*, double*, int);
//typedef void (*valsum_fp)(double*, double*,int);
//typedef void (*gra_fp)(double*, double*, int);
//typedef void (*gramin_fp)(double*, double*,int);
//typedef void (*hes_fp)( double*, thrust::pair<int, int>*, double*, int);
//typedef void (*print_fp)(double*, int);
int numSMs;
__device__ double sqr(double x) {
return x * x;
}
__global__ void calculate_gra(double* devx, double* gra,int size) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < size;
index += blockDim.x * gridDim.x)
{
int pre = index - 1;
if (pre < 0) pre += size;
int next = index + 1;
if (next >= size) next -= size;
gra[index] = devx[pre] * sin(2.0 * devx[index] * devx[pre]) + devx[next] * sin(2.0 * devx[index] * devx[next]);
printf("gra %d %d %d %f %f %f\n", pre, index, next, sqr(devx[index]), devx[pre] * sin(2.0 * devx[index] * devx[pre]), gra[index]);
}
} | b85c7205891e476680658f4a550eb0c1663296e7.cu | #include "includes.h"
//double* x, * devx, * val, * gra, * r, * graMax;
//double* hes_value;
////int size;
//int* pos_x, * pos_y;
//int* csr;
double* x;
//thrust::pair<int, int> *device_pos;
//typedef double (*fp)(double);
//typedef void (*val_fp)(double*, double*, int);
//typedef void (*valsum_fp)(double*, double*,int);
//typedef void (*gra_fp)(double*, double*, int);
//typedef void (*gramin_fp)(double*, double*,int);
//typedef void (*hes_fp)( double*, thrust::pair<int, int>*, double*, int);
//typedef void (*print_fp)(double*, int);
int numSMs;
__device__ double sqr(double x) {
return x * x;
}
__global__ void calculate_gra(double* devx, double* gra,int size) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < size;
index += blockDim.x * gridDim.x)
{
int pre = index - 1;
if (pre < 0) pre += size;
int next = index + 1;
if (next >= size) next -= size;
gra[index] = devx[pre] * sin(2.0 * devx[index] * devx[pre]) + devx[next] * sin(2.0 * devx[index] * devx[next]);
printf("gra %d %d %d %f %f %f\n", pre, index, next, sqr(devx[index]), devx[pre] * sin(2.0 * devx[index] * devx[pre]), gra[index]);
}
} |
d265466e3504c47c1f04705d5312da7405a946cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <unistd.h>
#include <vector>
#include <iostream>
//#include <Windows.h>
//#include <direct.h>
using namespace std;
#ifndef __HIPCC__
#define __HIPCC__
#endif
__device__ int minIndex(double* data,double* centers,bool* centerflags,int kCenter,int dimension)
{
int result;
int firstindex=0;
while(firstindex<kCenter && (! centerflags[ firstindex]))
++firstindex;
result=firstindex;
double mindis=0.0;
for (int i = 0; i < dimension; i++)
{
double tdis=centers[firstindex*dimension+i]-data[i];
mindis+=tdis*tdis;
}
for(int i=firstindex+1;i<kCenter;++i)
{
if(centerflags[i])
{
double tdis=0.0;
for (int j = 0; j < dimension; j++)
{
double ttdis=centers[i*dimension+j]-data[j];
tdis+=ttdis*ttdis;
}
if(tdis<mindis)
{
mindis=tdis;
result=i;
}
}
}
return result;
}
vector<int> shuffledOrder(int n,int m)
{
//assert(n>=m);
vector<int> result(m);
vector<int> index(n);
for(int i=0;i<n;++i)
{
index[i]=i;
}
for(int i=0;i<m;++i)
{
int tem=rand()%(n-i);
result[i]=index[tem];
index[tem]=index[n-i-1];
}
return result;
}
__global__ void updatebelonging4(int index,double* dataset,int datasize,int dimension,double* centers,int* labels,int kCenter,bool* goodCenterFlag,bool* CenterChangeFlag)
{
int j=index+blockDim.x* blockIdx.x +threadIdx.x;
int tlabel;
if(j<datasize)
{
CenterChangeFlag[j]=false;
tlabel=minIndex(dataset+j*dimension, centers,goodCenterFlag,kCenter,dimension);
if(tlabel!=labels[j])
CenterChangeFlag[j]=true;
labels[j]=tlabel;
}
}
__global__ void updateCorresponds(int* labels,int datasize,int kCenter,int* correspondings,bool* centerChangeFlag,int* centerStartIndex,int* centerCount,int* curCount,bool* goodCenterFlag,bool* nochange)
{
// hipMemset(correspondings,0,sizeof(int)*datasize);
// hipMemset(centerCount,0,sizeof(int)*kCenter);
*nochange=true;
for (int i = 0; i < kCenter; i++)
{
centerCount[i]=0;
curCount[i]=0;
}
for (int i = 0; i < datasize; i++)
{
++centerCount[labels[i]];
if(centerChangeFlag[i])
{
*nochange=false;
centerChangeFlag[i]=false;
}
}
centerStartIndex[0]=0;
for (int i = 0; i < kCenter; i++)
{
if (centerCount[i]==0)
{
goodCenterFlag[i]=false;
}
if(i>0)
{
centerStartIndex[i]=centerStartIndex[i-1]+centerCount[i-1];
}
}
//curCount=new int[kCenter];
for (int i = 0; i < datasize; i++)
{
int tlabel=labels[i];
//int ind=;
correspondings[centerStartIndex[tlabel] + curCount[tlabel]]=i;
++curCount[tlabel];
}
}
__global__ void updateCenters4(int ind,double* dataset,int datasize,int dimension,double* centers,int kCenter,int* corresponding,int* centerStartIndex,int* centerCount)
{
int j=ind+blockDim.x*blockIdx.x+threadIdx.x;
if(j<kCenter)
{
if(centerCount[j]>0)
{
for (int i = 0; i < dimension; i++)
{
centers[j*dimension+i]=0;
}
for (int i = 0; i < centerCount[j]; i++)
{
int curinde=corresponding[ centerStartIndex[j]+i];
for (int k = 0; k < dimension; k++)
{
centers[j*dimension+k]+=dataset[curinde*dimension+k]/centerCount[j];
}
}
}
}
}
void kmeans4(double* dataset,int datasize,int dimension,double* centers,int* labels,int kCenter,int maxIterationNumber,int threadsize,int blocksize=65535)
{
vector<int> initialCenterIndex=shuffledOrder(datasize,kCenter);
for(int i=0;i<kCenter;++i)
for(int j=0;j<dimension;++j)
centers[i*dimension+j]=dataset[initialCenterIndex[i]*dimension+j];
bool* goodCenterFlag;
int* centerCount;
int* curCount;
bool* centerChangeFlag;
int* corresponding;
int* centerStartIndex;
bool* noChange;
hipMallocManaged(&goodCenterFlag,sizeof(bool)*kCenter);
hipMallocManaged(¢erCount,sizeof(int)*kCenter);
hipMallocManaged(&curCount,sizeof(int)*kCenter);
hipMallocManaged(¢erChangeFlag,sizeof(bool)*datasize);
hipMallocManaged(&corresponding,sizeof(bool)*datasize);
hipMallocManaged(¢erStartIndex,sizeof(int)*kCenter);
hipMalloc(&noChange,sizeof(bool));
for(int i=0;i<kCenter;++i) goodCenterFlag[i]=true;
for(int i=0;i<kCenter;++i) centerCount[i]=0;
for(int i=0;i<datasize;++i) centerChangeFlag[i]=false;
hipError_t error;
for(int iterN=0;iterN<maxIterationNumber;++iterN)
{
int remain=datasize;
while(remain>0)
{
int tblocksize=blocksize;
if(blocksize*threadsize>=remain)
{
tblocksize=remain/threadsize+(remain%threadsize==0?0:1);
}
hipLaunchKernelGGL(( updatebelonging4), dim3(tblocksize),dim3(threadsize), 0, 0, datasize-remain,dataset,datasize,
dimension,centers,labels,
kCenter,goodCenterFlag,
centerChangeFlag);
remain-=tblocksize*threadsize;
}
//printf("belongings ok\n");
error = hipGetLastError();
if(error != hipSuccess)
{
printf("belong CUDA error: %s\n", hipGetErrorString(error));
}
hipLaunchKernelGGL(( updateCorresponds), dim3(1),dim3(1), 0, 0, labels,datasize,kCenter,corresponding,centerChangeFlag,centerStartIndex,centerCount,curCount,goodCenterFlag,noChange);
error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("corresponding CUDA error: %s\n", hipGetErrorString(error));
// exit(-1);
}
remain=kCenter;
while(remain>0)
{
//printf("%d ",i);
int tblocksize=blocksize;
if(blocksize*threadsize>remain)
{
tblocksize=remain/threadsize+(remain%threadsize==0?0:1);
}
hipLaunchKernelGGL(( updateCenters4), dim3(tblocksize),dim3(threadsize), 0, 0, kCenter-remain,dataset,datasize,dimension,centers,kCenter,corresponding,centerStartIndex,centerCount);
remain-=tblocksize*threadsize;
}
//printf("center ok\n");
error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("center update CUDA error: %s\n", hipGetErrorString(error));
// exit(-1);
}
printf("finished iteration NO. %d\n",iterN);
bool hnochange;
hipMemcpy(&hnochange,noChange,sizeof(bool),hipMemcpyDeviceToHost);
error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("memcopy free CUDA error: %s\n", hipGetErrorString(error));
// exit(-1);
}
if(hnochange)
break;
}
hipError_t cudaStatus;
error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("before free CUDA error: %s\n", hipGetErrorString(error));
// exit(-1);
}
hipFree(noChange);
hipFree(goodCenterFlag);
hipFree(corresponding);
hipFree(centerStartIndex);
hipFree(centerCount);
hipFree(curCount);
hipFree(centerChangeFlag);
hipFree(dataset);
error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("before CUDA error: %s\n", hipGetErrorString(error));
// exit(-1);
}
hipDeviceSynchronize();
error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
// exit(-1);
}
}
int main()
{
// _chdir("D:\\DATA\\Fujitsu\\images\\training\\");
chdir("/home/pitybea/features/");
hipDeviceProp_t prop;
hipSetDevice(0);
hipGetDeviceProperties(&prop,0);
cout<<prop.maxThreadsPerBlock<<endl;
double* dataset;
FILE* fp=fopen("task1_features.txt","r");
int size,dimension;
fscanf(fp,"%d %d\n",&size,&dimension);
size=100000;
hipMallocManaged(&dataset,sizeof(double)*size*dimension);
printf("%d %d\n",size,dimension);
for (int i=0;i<size;i++)
{
if(i%10000==0) printf("%d\t",i);
for (int j=0;j<dimension;j++)
{
fscanf(fp,"%lf ",&dataset[i*dimension+j]);
}
fscanf(fp,"\n");
}
fclose(fp);
int k=size/1000;
double* centers;
int* labels;
hipMallocManaged(¢ers,sizeof(double)*k*dimension);
hipMallocManaged(&labels,sizeof(int)*size);
for(int i=0;i<k*dimension;++i)
centers[i]=0;
for(int i=0;i<size;++i)
labels[i]=0;
kmeans4(dataset,size,dimension,centers,labels,k,400,prop.maxThreadsPerBlock);
//cout<<labels[0]<<" "<<endl;
//FILE* fp;
fp=fopen("labels.txt","w");
fprintf(fp,"%d\n",size);
for(int i=0;i<size;i++)
{
//if(i%1000==0)
cout<<labels[i]<<" ";
fprintf(fp,"%d\n",labels[i]);
}
fclose(fp);
/*
fp=fopen("centers.txt","w");
fprintf(fp,"%d %d\n",k,dimension);
for(int i=0;i<k;i++)
{
for(int j=0;j<dimension;j++)
fprintf(fp,"%lf ",centers[i*dimension+j]);
fprintf(fp,"\n");
}
fclose(fp);
*/
hipFree(labels);
hipFree(centers);
hipDeviceReset();
return 0;
}
| d265466e3504c47c1f04705d5312da7405a946cd.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <unistd.h>
#include <vector>
#include <iostream>
//#include <Windows.h>
//#include <direct.h>
using namespace std;
#ifndef __CUDACC__
#define __CUDACC__
#endif
__device__ int minIndex(double* data,double* centers,bool* centerflags,int kCenter,int dimension)
{
int result;
int firstindex=0;
while(firstindex<kCenter && (! centerflags[ firstindex]))
++firstindex;
result=firstindex;
double mindis=0.0;
for (int i = 0; i < dimension; i++)
{
double tdis=centers[firstindex*dimension+i]-data[i];
mindis+=tdis*tdis;
}
for(int i=firstindex+1;i<kCenter;++i)
{
if(centerflags[i])
{
double tdis=0.0;
for (int j = 0; j < dimension; j++)
{
double ttdis=centers[i*dimension+j]-data[j];
tdis+=ttdis*ttdis;
}
if(tdis<mindis)
{
mindis=tdis;
result=i;
}
}
}
return result;
}
vector<int> shuffledOrder(int n,int m)
{
//assert(n>=m);
vector<int> result(m);
vector<int> index(n);
for(int i=0;i<n;++i)
{
index[i]=i;
}
for(int i=0;i<m;++i)
{
int tem=rand()%(n-i);
result[i]=index[tem];
index[tem]=index[n-i-1];
}
return result;
}
__global__ void updatebelonging4(int index,double* dataset,int datasize,int dimension,double* centers,int* labels,int kCenter,bool* goodCenterFlag,bool* CenterChangeFlag)
{
int j=index+blockDim.x* blockIdx.x +threadIdx.x;
int tlabel;
if(j<datasize)
{
CenterChangeFlag[j]=false;
tlabel=minIndex(dataset+j*dimension, centers,goodCenterFlag,kCenter,dimension);
if(tlabel!=labels[j])
CenterChangeFlag[j]=true;
labels[j]=tlabel;
}
}
__global__ void updateCorresponds(int* labels,int datasize,int kCenter,int* correspondings,bool* centerChangeFlag,int* centerStartIndex,int* centerCount,int* curCount,bool* goodCenterFlag,bool* nochange)
{
// cudaMemset(correspondings,0,sizeof(int)*datasize);
// cudaMemset(centerCount,0,sizeof(int)*kCenter);
*nochange=true;
for (int i = 0; i < kCenter; i++)
{
centerCount[i]=0;
curCount[i]=0;
}
for (int i = 0; i < datasize; i++)
{
++centerCount[labels[i]];
if(centerChangeFlag[i])
{
*nochange=false;
centerChangeFlag[i]=false;
}
}
centerStartIndex[0]=0;
for (int i = 0; i < kCenter; i++)
{
if (centerCount[i]==0)
{
goodCenterFlag[i]=false;
}
if(i>0)
{
centerStartIndex[i]=centerStartIndex[i-1]+centerCount[i-1];
}
}
//curCount=new int[kCenter];
for (int i = 0; i < datasize; i++)
{
int tlabel=labels[i];
//int ind=;
correspondings[centerStartIndex[tlabel] + curCount[tlabel]]=i;
++curCount[tlabel];
}
}
__global__ void updateCenters4(int ind,double* dataset,int datasize,int dimension,double* centers,int kCenter,int* corresponding,int* centerStartIndex,int* centerCount)
{
int j=ind+blockDim.x*blockIdx.x+threadIdx.x;
if(j<kCenter)
{
if(centerCount[j]>0)
{
for (int i = 0; i < dimension; i++)
{
centers[j*dimension+i]=0;
}
for (int i = 0; i < centerCount[j]; i++)
{
int curinde=corresponding[ centerStartIndex[j]+i];
for (int k = 0; k < dimension; k++)
{
centers[j*dimension+k]+=dataset[curinde*dimension+k]/centerCount[j];
}
}
}
}
}
void kmeans4(double* dataset,int datasize,int dimension,double* centers,int* labels,int kCenter,int maxIterationNumber,int threadsize,int blocksize=65535)
{
vector<int> initialCenterIndex=shuffledOrder(datasize,kCenter);
for(int i=0;i<kCenter;++i)
for(int j=0;j<dimension;++j)
centers[i*dimension+j]=dataset[initialCenterIndex[i]*dimension+j];
bool* goodCenterFlag;
int* centerCount;
int* curCount;
bool* centerChangeFlag;
int* corresponding;
int* centerStartIndex;
bool* noChange;
cudaMallocManaged(&goodCenterFlag,sizeof(bool)*kCenter);
cudaMallocManaged(¢erCount,sizeof(int)*kCenter);
cudaMallocManaged(&curCount,sizeof(int)*kCenter);
cudaMallocManaged(¢erChangeFlag,sizeof(bool)*datasize);
cudaMallocManaged(&corresponding,sizeof(bool)*datasize);
cudaMallocManaged(¢erStartIndex,sizeof(int)*kCenter);
cudaMalloc(&noChange,sizeof(bool));
for(int i=0;i<kCenter;++i) goodCenterFlag[i]=true;
for(int i=0;i<kCenter;++i) centerCount[i]=0;
for(int i=0;i<datasize;++i) centerChangeFlag[i]=false;
cudaError_t error;
for(int iterN=0;iterN<maxIterationNumber;++iterN)
{
int remain=datasize;
while(remain>0)
{
int tblocksize=blocksize;
if(blocksize*threadsize>=remain)
{
tblocksize=remain/threadsize+(remain%threadsize==0?0:1);
}
updatebelonging4<<<tblocksize,threadsize>>>(datasize-remain,dataset,datasize,
dimension,centers,labels,
kCenter,goodCenterFlag,
centerChangeFlag);
remain-=tblocksize*threadsize;
}
//printf("belongings ok\n");
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("belong CUDA error: %s\n", cudaGetErrorString(error));
}
updateCorresponds<<<1,1>>>(labels,datasize,kCenter,corresponding,centerChangeFlag,centerStartIndex,centerCount,curCount,goodCenterFlag,noChange);
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("corresponding CUDA error: %s\n", cudaGetErrorString(error));
// exit(-1);
}
remain=kCenter;
while(remain>0)
{
//printf("%d ",i);
int tblocksize=blocksize;
if(blocksize*threadsize>remain)
{
tblocksize=remain/threadsize+(remain%threadsize==0?0:1);
}
updateCenters4<<<tblocksize,threadsize>>>(kCenter-remain,dataset,datasize,dimension,centers,kCenter,corresponding,centerStartIndex,centerCount);
remain-=tblocksize*threadsize;
}
//printf("center ok\n");
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("center update CUDA error: %s\n", cudaGetErrorString(error));
// exit(-1);
}
printf("finished iteration NO. %d\n",iterN);
bool hnochange;
cudaMemcpy(&hnochange,noChange,sizeof(bool),cudaMemcpyDeviceToHost);
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("memcopy free CUDA error: %s\n", cudaGetErrorString(error));
// exit(-1);
}
if(hnochange)
break;
}
cudaError_t cudaStatus;
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("before free CUDA error: %s\n", cudaGetErrorString(error));
// exit(-1);
}
cudaFree(noChange);
cudaFree(goodCenterFlag);
cudaFree(corresponding);
cudaFree(centerStartIndex);
cudaFree(centerCount);
cudaFree(curCount);
cudaFree(centerChangeFlag);
cudaFree(dataset);
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("before CUDA error: %s\n", cudaGetErrorString(error));
// exit(-1);
}
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
// exit(-1);
}
}
int main()
{
// _chdir("D:\\DATA\\Fujitsu\\images\\training\\");
chdir("/home/pitybea/features/");
cudaDeviceProp prop;
cudaSetDevice(0);
cudaGetDeviceProperties(&prop,0);
cout<<prop.maxThreadsPerBlock<<endl;
double* dataset;
FILE* fp=fopen("task1_features.txt","r");
int size,dimension;
fscanf(fp,"%d %d\n",&size,&dimension);
size=100000;
cudaMallocManaged(&dataset,sizeof(double)*size*dimension);
printf("%d %d\n",size,dimension);
for (int i=0;i<size;i++)
{
if(i%10000==0) printf("%d\t",i);
for (int j=0;j<dimension;j++)
{
fscanf(fp,"%lf ",&dataset[i*dimension+j]);
}
fscanf(fp,"\n");
}
fclose(fp);
int k=size/1000;
double* centers;
int* labels;
cudaMallocManaged(¢ers,sizeof(double)*k*dimension);
cudaMallocManaged(&labels,sizeof(int)*size);
for(int i=0;i<k*dimension;++i)
centers[i]=0;
for(int i=0;i<size;++i)
labels[i]=0;
kmeans4(dataset,size,dimension,centers,labels,k,400,prop.maxThreadsPerBlock);
//cout<<labels[0]<<" "<<endl;
//FILE* fp;
fp=fopen("labels.txt","w");
fprintf(fp,"%d\n",size);
for(int i=0;i<size;i++)
{
//if(i%1000==0)
cout<<labels[i]<<" ";
fprintf(fp,"%d\n",labels[i]);
}
fclose(fp);
/*
fp=fopen("centers.txt","w");
fprintf(fp,"%d %d\n",k,dimension);
for(int i=0;i<k;i++)
{
for(int j=0;j<dimension;j++)
fprintf(fp,"%lf ",centers[i*dimension+j]);
fprintf(fp,"\n");
}
fclose(fp);
*/
cudaFree(labels);
cudaFree(centers);
cudaDeviceReset();
return 0;
}
|
f450f66d69255ff7c62874dc3ff19359fbb77a5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Inverse Discrete Sine Transform in row wise (DST one)
* DST_I_Row_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_I_Row_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#define DELTA(i, j) ((i==j)?1:0)
#define DEFAULT_DIM 32
const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DSTI_Row__InverseKernel_GPUA(double const * const A, double const * const B, double * const C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
__global__ void DSTI_Row__InverseKernel(double *A, double *B, double *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void CalculateTransform(double * A, double * B, double * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA=0;
double * deviceB=0;
double * deviceC=0;
//hostA = (double *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
// Allocate GPU buffers for three vectors (two input, one output) .
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
hipMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns);
hipMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns);
hipMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns);
hipMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, hipMemcpyHostToDevice);
hipMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, hipMemcpyHostToDevice);
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Row__InverseKernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
hipDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost);
C = hostC;
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/* Throw an error if the input is not a GPU array. */
if ((nrhs!=1) || !(mxIsGPUArray(prhs[0]))) {
mexErrMsgIdAndTxt(errId, errMsg);
}
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
if (numAColumns==1)
{
printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n");
return;
}
numDCOSRows=numDCOSColumns=numAColumns;
numCRows = numARows;
numCColumns = numDCOSColumns;
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
//Inverse Discrete Sine Transform in row wise
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
pointer[i + j* numDCOSColumns] = sin((((j + 1)*PI_d*(i + 1)) / (numDCOSColumns + 1)))*sqrt(2.0 / (numDCOSColumns + 1));
//hostB[i + j* numBColumns] = 1;
//hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
//hostB[i + j* numBColumns] = 1;
}
}
// for (int i = 0; i < numDCOSRows; i++){
// for (int j = 0; j < numDCOSColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//
// if (numDCOSColumns != 1){
// pointer[i + j* numDCOSColumns] = cos((j*PI_d*i / (numDCOSColumns - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numDCOSRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numDCOSColumns, j + 1)))*sqrt(2.0 / numDCOSColumns);
// //hostB[i + j* numBColumns] = 1;
// //hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
// //hostB[i + j* numBColumns] = 1;
//
// }
//
// else{
// pointer[i + j* numDCOSColumns] =1;
// }
// }
// }
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
// hipError_t err1 = hipPeekAtLastError();//To capture last error in function call
//hipDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numAColumns;
numCRows = numARows;
if (numAColumns==1)
{
printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n");
return;
}
numCColumns = numBColumns;
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Inverse Discrete Sine Transform in row wise
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
hostB[i + j* numBColumns] = sin((((j + 1)*PI_d*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
//hostB[i + j* numBColumns] = 1;
//hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
//hostB[i + j* numBColumns] = 1;
}
}
// for (int i = 0; i < numBRows; i++){
// for (int j = 0; j < numBColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//
// if (numBColumns != 1){
// hostB[i + j* numBColumns] = cos((j*PI_d*i / (numBColumns - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numBRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numBColumns, j + 1)))*sqrt(2.0 / numBColumns);
// //hostB[i + j* numBColumns] = 1;
// //hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
// //hostB[i + j* numBColumns] = 1;
//
// }
//
// else{
// hostB[i + j* numBColumns] =1;
// }
// }
// }
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
// testing
// printf("\n plhs[0]:");
// printf("\n");
// for (int i = 0; i<numCRows; i++){
// for (int j = 0; j<numCColumns; j++){
// printf(" %g ", round (pointer[i * numCColumns + j]));
// }
// printf("\n");
// }
free(hostB);
}
}
| f450f66d69255ff7c62874dc3ff19359fbb77a5f.cu | /*
* Inverse Discrete Sine Transform in row wise (DST one)
* DST_I_Row_Inverse
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DST_I_Row_Inverse(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#define DELTA(i, j) ((i==j)?1:0)
#define DEFAULT_DIM 32
const double PI_d = 3.141592653589793238462643383279502884; //pi
__global__ void DSTI_Row__InverseKernel_GPUA(double const * const A, double const * const B, double * const C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
__global__ void DSTI_Row__InverseKernel(double *A, double *B, double *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double CValue = 0.0;
int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y;
int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x;
for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) {
for (int n = 0; n < DEFAULT_DIM; ++n)
if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns))
CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col];
}
if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue;
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void CalculateTransform(double * A, double * B, double * C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns)
{
double * hostA = A; // The A matrix
double * hostB = B; // The B matrix
double * hostC = C; // The output C matrix
//double * hostComputedC;
double * deviceA=0;
double * deviceB=0;
double * deviceC=0;
//hostA = (double *)malloc(sizeof(float)*numARows*numAColumns);
//hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns);
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numBColumns;
// Allocate GPU buffers for three vectors (two input, one output) .
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
cudaMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns);
cudaMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns);
cudaMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns);
cudaMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, cudaMemcpyHostToDevice);
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Row__InverseKernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
cudaDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost);
C = hostC;
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
}
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
/// input standard GPUarray
if (mxIsGPUArray(prhs[0])) {
//mexErrMsgIdAndTxt(errId, errMsg);
/* Declare all variables.*/
mxGPUArray const *A;
mxGPUArray const *DCOS;
mxGPUArray *B;
double const *d_A, *d_DCOS;
double *d_B;
// mxArray * hostcos;
//test
// double * hostcos, *pointer;
double *pointer;
//int N;
int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns;
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/* Throw an error if the input is not a GPU array. */
if ((nrhs!=1) || !(mxIsGPUArray(prhs[0]))) {
mexErrMsgIdAndTxt(errId, errMsg);
}
A = mxGPUCreateFromMxArray(prhs[0]);
const mwSize *dims;
dims=mxGPUGetDimensions(A);
numARows = (int)dims[0]; /* gets number of rows of A */
numAColumns = (int)dims[1]; /* gets number of columns of A */
if (numAColumns==1)
{
printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n");
return;
}
numDCOSRows=numDCOSColumns=numAColumns;
numCRows = numARows;
numCColumns = numDCOSColumns;
mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL);
pointer = mxGetPr(COS);
//Inverse Discrete Sine Transform in row wise
for (int i = 0; i < numDCOSRows; i++){
for (int j = 0; j < numDCOSColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
pointer[i + j* numDCOSColumns] = sin((((j + 1)*PI_d*(i + 1)) / (numDCOSColumns + 1)))*sqrt(2.0 / (numDCOSColumns + 1));
//hostB[i + j* numBColumns] = 1;
//hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
//hostB[i + j* numBColumns] = 1;
}
}
// for (int i = 0; i < numDCOSRows; i++){
// for (int j = 0; j < numDCOSColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//
// if (numDCOSColumns != 1){
// pointer[i + j* numDCOSColumns] = cos((j*PI_d*i / (numDCOSColumns - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numDCOSRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numDCOSColumns, j + 1)))*sqrt(2.0 / numDCOSColumns);
// //hostB[i + j* numBColumns] = 1;
// //hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
// //hostB[i + j* numBColumns] = 1;
//
// }
//
// else{
// pointer[i + j* numDCOSColumns] =1;
// }
// }
// }
DCOS=mxGPUCreateFromMxArray(COS);
// DCOS=mxGPUCreateFromMxArray(hostcos);
if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) {
mexErrMsgIdAndTxt(errId, errMsg);
}
d_A = (double const *)(mxGPUGetDataReadOnly(A));
d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS));
B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A),
mxGPUGetDimensions(A),
mxGPUGetClassID(A),
mxGPUGetComplexity(A),
MX_GPU_DO_NOT_INITIALIZE);
d_B = (double *)(mxGPUGetData(B));
dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1);
dim3 dimGrid;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DSTI_Row__InverseKernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns);
// cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call
//cudaDeviceSynchronize();//To synchronize the device
plhs[0] = mxGPUCreateMxArrayOnGPU(B);
mxGPUDestroyGPUArray(A);
mxGPUDestroyGPUArray(DCOS);
mxGPUDestroyGPUArray(B);
}
/// input standard array
else if (!(mxIsGPUArray(prhs[0]))){
int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A
int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A
int numBRows; // number of rows in the matrix B
int numBColumns; // number of columns in the matrix B
int numCRows; // number of rows in the matrix C (you have to set this)
int numCColumns; // number of columns in the matrix C (you have to set this)
numBRows = numBColumns = numAColumns;
numCRows = numARows;
if (numAColumns==1)
{
printf("Attention, this is a column vector, please try Inverse Discrete Sine Transform in column wise \n");
return;
}
numCColumns = numBColumns;
//char const * const errId = "parallel:gpu:DCTTWO:InvalidInput";
//char const * const errMsg = "Invalid input to MEX file.";
double * hostA ; // The A matrix
double * hostB ; // The B matrix
/* Initialize the MathWorks GPU API. */
//mxInitGPU();
/* Throw an error if the input is not a GPU array. */
//if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) {
//mexErrMsgIdAndTxt(errId, errMsg);
//}
//hostA = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns);
//hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns);
hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns);
//const mxArray *G =prhs[0];
// if ((nrhs != 1) || (mxIsGPUArray(G))) {
//mexErrMsgIdAndTxt(errId, errMsg);
// G = gather(G);
// }
hostA = (double *)mxGetData(prhs[0]);
// hostA = (double *)mxGetData(G);
//Inverse Discrete Sine Transform in row wise
for (int i = 0; i < numBRows; i++){
for (int j = 0; j < numBColumns; j++){
//hostB[i * numBColumns + j] = i + j* numAColumns;
//hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
//hostBinv[i * numBColumns + j] = 1;
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//hostB[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
hostB[i + j* numBColumns] = sin((((j + 1)*PI_d*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));
//hostB[i + j* numBColumns] = 1;
//hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
//hostB[i + j* numBColumns] = 1;
}
}
// for (int i = 0; i < numBRows; i++){
// for (int j = 0; j < numBColumns; j++){
// //hostB[i * numBColumns + j] = i + j* numAColumns;
// //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns];
// //hostBinv[i * numBColumns + j] = 1;
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
// //hostB[i * numBColumns + j] = cosvalx[i + j* numAColumns];
//
// if (numBColumns != 1){
// hostB[i + j* numBColumns] = cos((j*PI_d*i / (numBColumns - 1)))*sqrt(1.0 / (1 + DELTA(i + 1, 1) + DELTA(i + 1, numBRows)))*sqrt(1.0 / (1 + DELTA(1, j + 1) + DELTA(numBColumns, j + 1)))*sqrt(2.0 / numBColumns);
// //hostB[i + j* numBColumns] = 1;
// //hostBinv[i + j* numBColumns] = cos((j*3.14*i / (numBColumns - 1)))*sqrt(2.0 / numBColumns)*(1.0 / 2.0);
// //hostB[i + j* numBColumns] = 1;
//
// }
//
// else{
// hostB[i + j* numBColumns] =1;
// }
// }
// }
//plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL);
//hostC = (double*)mxGetData(plhs[0]);
plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL);
double *pointer = mxGetPr(plhs[0]);
//CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
//memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double));
// testing
// printf("\n plhs[0]:");
// printf("\n");
// for (int i = 0; i<numCRows; i++){
// for (int j = 0; j<numCColumns; j++){
// printf(" %g ", round (pointer[i * numCColumns + j]));
// }
// printf("\n");
// }
free(hostB);
}
}
|
f8508bb513802d201f3340a937a730f88e6de206.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from ztrtri_upper.cu normal z -> c, Wed Sep 17 15:08:23 2014
@author Peng Du
@author Tingxing Dong
@author Mark Gates
This file implements upper case, and is called by ctrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "common_magma.h"
#include "ctrtri.h"
/*
This inverts the diagonal IB by IB inner blocks of A,
and stores the results in d_dinvA.
Each thread block with IB threads does one inner block.
Each thread deals with one row of the inner block.
*/
__global__ void
ctrtri_diag_kernel_upper(
magma_diag_t diag, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *d_dinvA)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int blk_ind = bx*IB;
//int ind = blk_ind + tx;
A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind)
// TODO sB should be [IB][IB+1] to avoid bank conflicts, right?
__shared__ magmaFloatComplex sB[IB*IB];
magmaFloatComplex y_tx;
// load upper triangle of inner block of A; zero lower triangle & outside matrix
#pragma unroll
for( int j=0; j < IB; j++ ) {
if (tx <= j && blk_ind + j < n) {
sB[tx + j*IB] = A[tx + j*lda];
}
else {
sB[tx + j*IB] = MAGMA_C_ZERO;
}
}
__syncthreads();
// invert the diagonal
if (diag == MagmaUnit) {
sB[tx + tx*IB] = MAGMA_C_ONE;
}
else {
if ( sB[tx + tx*IB] == MAGMA_C_ZERO ) { // singular or outside matrix
sB[tx + tx*IB] = MAGMA_C_ONE;
}
else {
sB[tx + tx*IB] = MAGMA_C_ONE / sB[tx + tx*IB];
}
}
// compute elements 0:j-1 of j-th column.
for( int j=1; j < IB; j++ ) {
if ( tx < j ) {
// trmv: y = sB(0:j-1, 0:j-1) * sB(0:j-1, j)
// each thread sums one element, y[tx]
y_tx = MAGMA_C_ZERO;
#pragma unroll
for( int k=0; k < j; k++ )
y_tx += sB[tx + k*IB] * sB[k + j*IB];
// scal: sB(0:j-1, j) = -sB(j,j) * y
sB[tx + j*IB] = -sB[j + j*IB] * y_tx;
}
__syncthreads();
}
// go to the (bx / ib_per_NB) outer NB*NB block,
// then the (bx % ib_per_NB) inner IB*IB block inside that.
int ib_per_NB = NB/IB;
d_dinvA += (bx / ib_per_NB)*NB*NB
+ (bx % ib_per_NB)*(NB*IB + IB);
// write result
#pragma unroll
for( int j=0; j < IB; j++ ) {
d_dinvA[tx + j*NB] = sB[tx + j*IB];
}
}
/*
Let A be an NB*NB upper triangular matrix, and B its inverse.
Then the block decomposition
[ A11 A12 ] * [ B11 B12 ] = [ I 0 ]
[ 0 A22 ] [ 0 B22 ] [ 0 I ]
yields
A11*B11 = I ==> B11 = A11^{-1},
A22*B22 = I ==> B22 = A22^{-1},
A11*B12 + A12*B22 = 0 ==> B12 = -A11^{-1}*A12*B22 = -B11*A12*B22.
ctrtri_diag_kernel inverts A11 and A22.
triple_cgemm16 routines multiply:
part 1: B12 = A12 * B22,
part 2: B12 = -B11 * B12.
At this level, inner block is jb=16, with one 4x4 thread block per inner block.
Each submatrix Aij and Bij is jb x jb.
The submatrix dimension is multiplied by 2 at each level,
so the next level is jb*2 = 32.
A "page" is the next bigger block, here jb*2=32,
[ B11 B12 ]
which contains [ 0 B22 ].
Outer blocks are NB x NB.
A12 may have < jb cols, but is guaranteed to have jb rows since A22 is on
the bottom. Unfortunately, this means checking every single reference. We
could easily verify that A12 is full, and select between a fast version
without checks and a slow version with checks.
B is stored in workspace that is a full multiple of NB x NB; no checks needed.
We split this into part1 & part2 to synchronize all blocks and make sure
that writes to B12 are observed by all blocks.
*/
/*
* B12 = A12 * B22
*/
__global__ void
triple_cgemm16_part1_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
caxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
caxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
caxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
caxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
caxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
caxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
caxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
caxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
caxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
caxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
caxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
__global__ void
triple_cgemm16_part2_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int lda = NB; // shadows lda argument
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
caxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
caxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
caxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
caxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
caxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
caxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
caxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
caxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
caxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
caxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
caxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
__global__ void
triple_cgemm32_part1_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
caxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
caxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
caxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
caxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
caxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
caxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
caxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
caxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
caxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
caxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
caxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
__global__ void
triple_cgemm32_part2_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
//int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
caxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
caxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
caxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
caxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
caxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
caxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
caxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
caxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
caxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
caxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
caxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
__global__ void
triple_cgemm64_part1_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
caxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
caxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
caxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
caxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
caxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
caxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
caxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
caxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
caxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
caxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
caxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
__global__ void
triple_cgemm64_part2_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
//int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
caxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
caxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
caxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
caxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
caxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
caxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
caxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
caxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
caxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
caxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
caxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
__global__ void
triple_cgemm_above64_part1_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int ldb = NB;
int ldc = NB;
// For jb > 64, we process B12 as gridDim.x sections of 64 rows each, with gridDim.x > 1.
// Each section needs all of the B matrix, so C cannot overwrite B.
// Therefore, store B21 temporarily in the previously unused B12 matrix
// (i.e., above diagonal), then in part 3, zero out B12.
//
// Kernels with jb <= 64 don't have this problem, because only the
// NT x 16 section of C that overwrites the same section of B depends
// on that section of B.
//
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B12; write to B21 temp location
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
caxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
caxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
caxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
caxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
caxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
caxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
caxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
caxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
caxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
caxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
caxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
__global__ void
triple_cgemm_above64_part2_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
//int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
B = d_dinvA + jb; // B12, read from B21 temp location
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
caxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
caxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
caxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
caxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
caxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
caxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
caxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
caxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
caxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
caxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
caxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* zero out B21 temp location
*/
__global__ void
triple_cgemm_above64_part3_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part three---------------------------//
{
// zero out B21 temp location
magmaFloatComplex *B21;
int ldb = NB;
B21 = d_dinvA + jb;
B21 += ibx + id + iby*ldb;
#pragma unroll
for( int i = 0; i < 16; i++ ) {
B21[i*ldb] = MAGMA_C_ZERO;
}
}
}
| f8508bb513802d201f3340a937a730f88e6de206.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from ztrtri_upper.cu normal z -> c, Wed Sep 17 15:08:23 2014
@author Peng Du
@author Tingxing Dong
@author Mark Gates
This file implements upper case, and is called by ctrtri_kernel.cu.
It's convenient to have separate files for lower & upper, to diff the sources.
*/
#include "common_magma.h"
#include "ctrtri.h"
/*
This inverts the diagonal IB by IB inner blocks of A,
and stores the results in d_dinvA.
Each thread block with IB threads does one inner block.
Each thread deals with one row of the inner block.
*/
__global__ void
ctrtri_diag_kernel_upper(
magma_diag_t diag, int n, const magmaFloatComplex *A, int lda, magmaFloatComplex *d_dinvA)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int blk_ind = bx*IB;
//int ind = blk_ind + tx;
A += blk_ind + blk_ind*lda; // A(blk_ind, blk_ind)
// TODO sB should be [IB][IB+1] to avoid bank conflicts, right?
__shared__ magmaFloatComplex sB[IB*IB];
magmaFloatComplex y_tx;
// load upper triangle of inner block of A; zero lower triangle & outside matrix
#pragma unroll
for( int j=0; j < IB; j++ ) {
if (tx <= j && blk_ind + j < n) {
sB[tx + j*IB] = A[tx + j*lda];
}
else {
sB[tx + j*IB] = MAGMA_C_ZERO;
}
}
__syncthreads();
// invert the diagonal
if (diag == MagmaUnit) {
sB[tx + tx*IB] = MAGMA_C_ONE;
}
else {
if ( sB[tx + tx*IB] == MAGMA_C_ZERO ) { // singular or outside matrix
sB[tx + tx*IB] = MAGMA_C_ONE;
}
else {
sB[tx + tx*IB] = MAGMA_C_ONE / sB[tx + tx*IB];
}
}
// compute elements 0:j-1 of j-th column.
for( int j=1; j < IB; j++ ) {
if ( tx < j ) {
// trmv: y = sB(0:j-1, 0:j-1) * sB(0:j-1, j)
// each thread sums one element, y[tx]
y_tx = MAGMA_C_ZERO;
#pragma unroll
for( int k=0; k < j; k++ )
y_tx += sB[tx + k*IB] * sB[k + j*IB];
// scal: sB(0:j-1, j) = -sB(j,j) * y
sB[tx + j*IB] = -sB[j + j*IB] * y_tx;
}
__syncthreads();
}
// go to the (bx / ib_per_NB) outer NB*NB block,
// then the (bx % ib_per_NB) inner IB*IB block inside that.
int ib_per_NB = NB/IB;
d_dinvA += (bx / ib_per_NB)*NB*NB
+ (bx % ib_per_NB)*(NB*IB + IB);
// write result
#pragma unroll
for( int j=0; j < IB; j++ ) {
d_dinvA[tx + j*NB] = sB[tx + j*IB];
}
}
/*
Let A be an NB*NB upper triangular matrix, and B its inverse.
Then the block decomposition
[ A11 A12 ] * [ B11 B12 ] = [ I 0 ]
[ 0 A22 ] [ 0 B22 ] [ 0 I ]
yields
A11*B11 = I ==> B11 = A11^{-1},
A22*B22 = I ==> B22 = A22^{-1},
A11*B12 + A12*B22 = 0 ==> B12 = -A11^{-1}*A12*B22 = -B11*A12*B22.
ctrtri_diag_kernel inverts A11 and A22.
triple_cgemm16 routines multiply:
part 1: B12 = A12 * B22,
part 2: B12 = -B11 * B12.
At this level, inner block is jb=16, with one 4x4 thread block per inner block.
Each submatrix Aij and Bij is jb x jb.
The submatrix dimension is multiplied by 2 at each level,
so the next level is jb*2 = 32.
A "page" is the next bigger block, here jb*2=32,
[ B11 B12 ]
which contains [ 0 B22 ].
Outer blocks are NB x NB.
A12 may have < jb cols, but is guaranteed to have jb rows since A22 is on
the bottom. Unfortunately, this means checking every single reference. We
could easily verify that A12 is full, and select between a fast version
without checks and a slow version with checks.
B is stored in workspace that is a full multiple of NB x NB; no checks needed.
We split this into part1 & part2 to synchronize all blocks and make sure
that writes to B12 are observed by all blocks.
*/
/*
* B12 = A12 * B22
*/
__global__ void
triple_cgemm16_part1_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
caxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
caxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
caxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
caxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
caxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
caxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
caxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
caxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
caxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
caxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
caxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
__global__ void
triple_cgemm16_part2_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int lda = NB; // shadows lda argument
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 4 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
caxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
caxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
caxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
caxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
caxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
caxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
caxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
caxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
caxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
caxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
caxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
__global__ void
triple_cgemm32_part1_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
caxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
caxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
caxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
caxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
caxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
caxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
caxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
caxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
caxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
caxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
caxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
__global__ void
triple_cgemm32_part2_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x * (blockDim.x*blockDim.y);
const int iby = by * 16;
const int id = tx + ty*blockDim.x;
//int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 8 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
caxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
caxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
caxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
caxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
caxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
caxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
caxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
caxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
caxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
caxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
caxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
__global__ void
triple_cgemm64_part1_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
caxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
caxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
caxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
caxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
caxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
caxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
caxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
caxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
caxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
caxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
caxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
__global__ void
triple_cgemm64_part2_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
//int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
C = d_dinvA + jb*NB; // B12
B = C; // B12, okay to overwrite
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
caxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
caxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
caxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
caxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
caxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
caxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
caxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
caxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
caxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
caxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
caxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* B12 = A12 * B22
*/
__global__ void
triple_cgemm_above64_part1_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part one---------------------------//
{
// B12 = A12 * B22
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int ldb = NB;
int ldc = NB;
// For jb > 64, we process B12 as gridDim.x sections of 64 rows each, with gridDim.x > 1.
// Each section needs all of the B matrix, so C cannot overwrite B.
// Therefore, store B21 temporarily in the previously unused B12 matrix
// (i.e., above diagonal), then in part 3, zero out B12.
//
// Kernels with jb <= 64 don't have this problem, because only the
// NT x 16 section of C that overwrites the same section of B depends
// on that section of B.
//
// in gemm notation: C = A*B
A = Ain + page*jb*2*lda + page*jb*2 + jb*lda; // A12
B = d_dinvA + jb*NB + jb; // B22
C = d_dinvA + jb; // B12; write to B21 temp location
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
if ( col++ < n ) { rA[0] = A[0*lda]; }
if ( col++ < n ) { rA[1] = A[1*lda]; }
if ( col++ < n ) { rA[2] = A[2*lda]; }
if ( col++ < n ) { rA[3] = A[3*lda]; }
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); if ( col++ < n ) { rA[0] = A[ 4*lda]; }
caxpy16( rA[1], &sB[ 1][0], rC ); if ( col++ < n ) { rA[1] = A[ 5*lda]; }
caxpy16( rA[2], &sB[ 2][0], rC ); if ( col++ < n ) { rA[2] = A[ 6*lda]; }
caxpy16( rA[3], &sB[ 3][0], rC ); if ( col++ < n ) { rA[3] = A[ 7*lda]; }
caxpy16( rA[0], &sB[ 4][0], rC ); if ( col++ < n ) { rA[0] = A[ 8*lda]; }
caxpy16( rA[1], &sB[ 5][0], rC ); if ( col++ < n ) { rA[1] = A[ 9*lda]; }
caxpy16( rA[2], &sB[ 6][0], rC ); if ( col++ < n ) { rA[2] = A[10*lda]; }
caxpy16( rA[3], &sB[ 7][0], rC ); if ( col++ < n ) { rA[3] = A[11*lda]; }
caxpy16( rA[0], &sB[ 8][0], rC ); if ( col++ < n ) { rA[0] = A[12*lda]; }
caxpy16( rA[1], &sB[ 9][0], rC ); if ( col++ < n ) { rA[1] = A[13*lda]; }
caxpy16( rA[2], &sB[10][0], rC ); if ( col++ < n ) { rA[2] = A[14*lda]; }
caxpy16( rA[3], &sB[11][0], rC ); if ( col++ < n ) { rA[3] = A[15*lda]; }
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = rC[i];
C += ldc;
}
}
}
/*
* B12 = -B11 * B12
*/
__global__ void
triple_cgemm_above64_part2_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
//int col = page*jb*2 + jb;
__shared__ magmaFloatComplex sB[16][17];
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part two---------------------------//
{
// B12 = -B11 * B12
const magmaFloatComplex *A, *B;
magmaFloatComplex *C;
int lda = NB;
int ldb = NB;
int ldc = NB;
// in gemm notation: C = A*B
A = d_dinvA; // B11
B = d_dinvA + jb; // B12, read from B21 temp location
C = d_dinvA + jb*NB; // B12
A += ibx + id;
B += tx + (iby + ty)*ldb;
C += ibx + id + iby*ldc;
const magmaFloatComplex *Blast = B + jb;
// compute NT x 16 block of C
// each thread computes one 1x16 row, C(id,0:15)
magmaFloatComplex rC[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
magmaFloatComplex rA[4] = {0, 0, 0, 0};
do {
// load 16 x 16 block of B using NX x 4 threads
#pragma unroll
for( int i=0; i < 16; i += 16 ) { // += blockDim.x
#pragma unroll
for( int j=0; j < 16; j += 4 ) { // += blockDim.y
sB[tx + i][ty + j] = B[i + j*ldb];
}
}
__syncthreads();
// load NT x 16 block of A; each thread initially loads 1x4 row,
// then continues loading more elements as axpys are done.
rA[0] = A[0*lda];
rA[1] = A[1*lda];
rA[2] = A[2*lda];
rA[3] = A[3*lda];
// axpy: C(id,:) += A(id,k) * B(k,:) for k=0, ..., 15
caxpy16( rA[0], &sB[ 0][0], rC ); rA[0] = A[ 4*lda];
caxpy16( rA[1], &sB[ 1][0], rC ); rA[1] = A[ 5*lda];
caxpy16( rA[2], &sB[ 2][0], rC ); rA[2] = A[ 6*lda];
caxpy16( rA[3], &sB[ 3][0], rC ); rA[3] = A[ 7*lda];
caxpy16( rA[0], &sB[ 4][0], rC ); rA[0] = A[ 8*lda];
caxpy16( rA[1], &sB[ 5][0], rC ); rA[1] = A[ 9*lda];
caxpy16( rA[2], &sB[ 6][0], rC ); rA[2] = A[10*lda];
caxpy16( rA[3], &sB[ 7][0], rC ); rA[3] = A[11*lda];
caxpy16( rA[0], &sB[ 8][0], rC ); rA[0] = A[12*lda];
caxpy16( rA[1], &sB[ 9][0], rC ); rA[1] = A[13*lda];
caxpy16( rA[2], &sB[10][0], rC ); rA[2] = A[14*lda];
caxpy16( rA[3], &sB[11][0], rC ); rA[3] = A[15*lda];
caxpy16( rA[0], &sB[12][0], rC );
caxpy16( rA[1], &sB[13][0], rC );
caxpy16( rA[2], &sB[14][0], rC );
caxpy16( rA[3], &sB[15][0], rC );
// move to next block of A and B
A += 16*lda;
B += 16;
__syncthreads();
} while( B < Blast );
// write NT x 16 result; each thread writes one 16x1 row, C(id,0:15)
for( int i = 0; i < 16; i++ ) {
C[0] = -rC[i];
C += ldc;
}
}
}
/*
* zero out B21 temp location
*/
__global__ void
triple_cgemm_above64_part3_upper(
int n, const magmaFloatComplex *Ain, int lda, magmaFloatComplex *d_dinvA, int jb, int npages)
{
const int by = blockIdx.y / npages;
const int page = blockIdx.y % npages;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int ibx = blockIdx.x*64;
const int iby = by*16;
const int id = tx + ty*16;
// go to the (page / pages_per_NB) outer NB*NB block,
// then the (page % pages_per_NB) inner (jb*2)*(jb*2) page inside that.
int pages_per_NB = NB/(jb*2);
d_dinvA += (page / pages_per_NB)*NB*NB
+ (page % pages_per_NB)*(jb*2*NB + jb*2);
//--------------------------part three---------------------------//
{
// zero out B21 temp location
magmaFloatComplex *B21;
int ldb = NB;
B21 = d_dinvA + jb;
B21 += ibx + id + iby*ldb;
#pragma unroll
for( int i = 0; i < 16; i++ ) {
B21[i*ldb] = MAGMA_C_ZERO;
}
}
}
|
8bffd693ab0c75804fb6bf26a490201e13e277df.hip | // !!! This is a file automatically generated by hipify!!!
/**
* \file
* Fill a gpu-based array with a constant value.
* \todo This only works for contiguous strides.
*/
#include "../core.h"
#include "../ops.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdint.h>
#include <float.h>
#include <algorithm>
#include "macros.h"
TYPEDEFS;
#define ENDL "\n"
#define LOG(...) ndLogError(dst,__VA_ARGS__)
#define TRY(e) do{if(!(e)) {LOG("%s(%d): %s()"ENDL "\tExpression evaluated as failure."ENDL "\t%s"ENDL,__FILE__,__LINE__,__FUNCTION__,#e); goto Error; }}while(0)
#define CUTRY(e) do{hipError_t ecode=(e); if(ecode!=hipSuccess) {LOG("%s(%d): %s()"ENDL "\tExpression evaluated as failure."ENDL "\t%s"ENDL "\t%s"ENDL,__FILE__,__LINE__,__FUNCTION__,#e,hipGetErrorString(ecode)); goto Error; }}while(0)
#define FAIL LOG("%s(%d) %s()"ENDL "\tExecution should not have reached here."ENDL,__FILE__,__LINE__,__FUNCTION__); goto Error
#define max_(a,b) (((a)<(b))?(b):(a))
#define min_(a,b) (((a)<(b))?(a):(b))
typedef union val_t_ { int d; unsigned u; unsigned long long llu; long long lld; double f;} val_t; ///< Generic value type for passing parameters.
#define VAL_u8(v) (v.u)
#define VAL_u16(v) (v.u)
#define VAL_u32(v) (v.u)
#define VAL_u64(v) (v.llu)
#define VAL_i8(v) (v.d)
#define VAL_i16(v) (v.d)
#define VAL_i32(v) (v.d)
#define VAL_i64(v) (v.lld)
#define VAL_f32(v) (v.f)
#define VAL_f64(v) (v.f)
#define VAL(v,type) VAL_##type(v)
template<typename T> __device__ const T& clamp(const T& v, const T& mn, const T& mx)
{ if(v<mn) return mn;
else if (v>mx) return mx;
else return v;
}
/* Could be more efficient? First id which threads in a block need to be set. Then coallesce and write. */
template<typename T,unsigned BX,unsigned BY,unsigned WORK>
__global__ void __launch_bounds__(BX*BY,1)
saturate_ip_kernel(T* dst,unsigned w,unsigned h,T mn, T mx)
{ const int ox=threadIdx.x+(blockIdx.x*WORK)*BX,
oy=threadIdx.y+ blockIdx.y *BY;
if(oy<h)
{ dst+=ox+oy*(int)w;
if(blockIdx.x!=(gridDim.x-1))
{
#pragma unroll
for(int i=0;i<WORK;++i) dst[i*BX]=clamp(dst[i*BX],mn,mx);
} else
{ // last block - bounds check
#pragma unroll
for(int i=0;i<WORK;++i) if(w-ox>i*BX) dst[i*BX]=clamp(dst[i*BX],mn,mx);
}
}
}
static unsigned prod(size_t n, size_t *v)
{ size_t o=1;
while(n-->0) o*=v[n];
return (unsigned)o;
}
extern "C" unsigned saturate_ip_cuda(nd_t dst,val_t mn, val_t mx)
{ unsigned w=ndshape(dst)[0],
h=prod(ndndim(dst)-1,ndshape(dst)+1);
const unsigned BX=32,BY=32,WORK=8;
dim3 blocks((unsigned)ceil(w/(float)(WORK*BX)), (unsigned)ceil(h/(float)BY)),
threads(BX,BY); // run max threads per block (1024). Set BX to be 1 warp (32).
/// @cond DEFINES
#define CASE(T)hipLaunchKernelGGL(( saturate_ip_kernel<T,BX,BY,WORK>), dim3(blocks),dim3(threads),0,(hipStream_t)ndCudaStream(dst), (T*)nddata(dst),w,h,VAL(mn,T),VAL(mx,T)); break
{TYPECASE(ndtype(dst));}
#undef CASE
/// @endcond
CUTRY(hipGetLastError());
return 1;
Error:
return 0;
} | 8bffd693ab0c75804fb6bf26a490201e13e277df.cu | /**
* \file
* Fill a gpu-based array with a constant value.
* \todo This only works for contiguous strides.
*/
#include "../core.h"
#include "../ops.h"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdint.h>
#include <float.h>
#include <algorithm>
#include "macros.h"
TYPEDEFS;
#define ENDL "\n"
#define LOG(...) ndLogError(dst,__VA_ARGS__)
#define TRY(e) do{if(!(e)) {LOG("%s(%d): %s()"ENDL "\tExpression evaluated as failure."ENDL "\t%s"ENDL,__FILE__,__LINE__,__FUNCTION__,#e); goto Error; }}while(0)
#define CUTRY(e) do{cudaError_t ecode=(e); if(ecode!=cudaSuccess) {LOG("%s(%d): %s()"ENDL "\tExpression evaluated as failure."ENDL "\t%s"ENDL "\t%s"ENDL,__FILE__,__LINE__,__FUNCTION__,#e,cudaGetErrorString(ecode)); goto Error; }}while(0)
#define FAIL LOG("%s(%d) %s()"ENDL "\tExecution should not have reached here."ENDL,__FILE__,__LINE__,__FUNCTION__); goto Error
#define max_(a,b) (((a)<(b))?(b):(a))
#define min_(a,b) (((a)<(b))?(a):(b))
typedef union val_t_ { int d; unsigned u; unsigned long long llu; long long lld; double f;} val_t; ///< Generic value type for passing parameters.
#define VAL_u8(v) (v.u)
#define VAL_u16(v) (v.u)
#define VAL_u32(v) (v.u)
#define VAL_u64(v) (v.llu)
#define VAL_i8(v) (v.d)
#define VAL_i16(v) (v.d)
#define VAL_i32(v) (v.d)
#define VAL_i64(v) (v.lld)
#define VAL_f32(v) (v.f)
#define VAL_f64(v) (v.f)
#define VAL(v,type) VAL_##type(v)
template<typename T> __device__ const T& clamp(const T& v, const T& mn, const T& mx)
{ if(v<mn) return mn;
else if (v>mx) return mx;
else return v;
}
/* Could be more efficient? First id which threads in a block need to be set. Then coallesce and write. */
template<typename T,unsigned BX,unsigned BY,unsigned WORK>
__global__ void __launch_bounds__(BX*BY,1)
saturate_ip_kernel(T* dst,unsigned w,unsigned h,T mn, T mx)
{ const int ox=threadIdx.x+(blockIdx.x*WORK)*BX,
oy=threadIdx.y+ blockIdx.y *BY;
if(oy<h)
{ dst+=ox+oy*(int)w;
if(blockIdx.x!=(gridDim.x-1))
{
#pragma unroll
for(int i=0;i<WORK;++i) dst[i*BX]=clamp(dst[i*BX],mn,mx);
} else
{ // last block - bounds check
#pragma unroll
for(int i=0;i<WORK;++i) if(w-ox>i*BX) dst[i*BX]=clamp(dst[i*BX],mn,mx);
}
}
}
static unsigned prod(size_t n, size_t *v)
{ size_t o=1;
while(n-->0) o*=v[n];
return (unsigned)o;
}
extern "C" unsigned saturate_ip_cuda(nd_t dst,val_t mn, val_t mx)
{ unsigned w=ndshape(dst)[0],
h=prod(ndndim(dst)-1,ndshape(dst)+1);
const unsigned BX=32,BY=32,WORK=8;
dim3 blocks((unsigned)ceil(w/(float)(WORK*BX)), (unsigned)ceil(h/(float)BY)),
threads(BX,BY); // run max threads per block (1024). Set BX to be 1 warp (32).
/// @cond DEFINES
#define CASE(T) saturate_ip_kernel<T,BX,BY,WORK><<<blocks,threads,0,(cudaStream_t)ndCudaStream(dst)>>>((T*)nddata(dst),w,h,VAL(mn,T),VAL(mx,T)); break
{TYPECASE(ndtype(dst));}
#undef CASE
/// @endcond
CUTRY(cudaGetLastError());
return 1;
Error:
return 0;
} |
77815b983b2bae547056d0d31628190b86986098.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
/*
__global__ void get_gradient(PixType *d_imgleft_data, PixType *d_imgright_data, PixType *d_imgleft_grad, PixType *d_imgright_grad, PixType *d_tab, int d_rows, int d_cols)
{
int pixels_number_thread = d_cols / WARP_SIZE;
int row = blockIdx.x; //
int col_start = threadIdx.x * pixels_number_thread; //
int col = 0;
int i = 0;
int pre_row_add = 0, next_row_add = 0;
if(col_start < d_cols)
{
if(0 == row)//
{
pre_row_add = 0;
next_row_add = 1;
}
else if(d_rows -1 == row) //
{
pre_row_add = 1;
next_row_add = 0;
}
else
{
pre_row_add = 1;
next_row_add = 1;
}
if(0 == col_start) //0
{
for(i = 1; i < pixels_number_thread; i++)
{
col = col_start + i;
d_imgleft_grad[row * d_cols + col] = d_tab[ (d_imgleft_data[row * d_cols + col + 1] - d_imgleft_data[row * d_cols + col - 1]) * 2 \
+ d_imgleft_data[(row - pre_row_add) * d_cols + col + 1] - d_imgleft_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgleft_data[(row + next_row_add) * d_cols + col + 1] - d_imgleft_data[(row + next_row_add) * d_cols + col - 1] \
];
d_imgright_grad[row * d_cols + col] = d_tab[ (d_imgright_data[row * d_cols + col + 1] - d_imgright_data[row * d_cols + col - 1]) * 2 \
+ d_imgright_data[(row - pre_row_add) * d_cols + col + 1] - d_imgright_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgright_data[(row + next_row_add) * d_cols + col + 1] - d_imgright_data[(row + next_row_add) * d_cols + col - 1] \
];
//printf("row=%d, x=%d, right_data=%d\n", row, col, d_imgright_data[row * d_cols + col]);
// printf("row=%d,col=%d,grad=%d, pre_row=%d, org_imgdata=%d\n", row, col, d_imgleft_grad[row * d_cols + col], (row - pre_row_add), d_imgleft_data[row * d_cols + col ]);
//printf("row=%d,col=%d,grad=%d, pre_row=%d, org_imgdata=%d\n", row, col, d_imgright_grad[row * d_cols + col], (row - pre_row_add), d_imgright_data[row * d_cols + col ]);
//printf("y=%d,%d=%d\n", row, col, d_imgright_grad[row * d_cols + col]);
}
d_imgleft_grad[row * d_cols + 0] = d_tab[0];
d_imgright_grad[row * d_cols + 0] = d_tab[0];
d_imgleft_data[row * d_cols + 0] = d_tab[0];
d_imgright_data[row * d_cols + 0] = d_tab[0]; //opencvopencv
}
else if(col_start + pixels_number_thread > d_cols - 1) //,
{
for(i = 0; col_start + i < d_cols; i++)
{
col = col_start + i;
d_imgleft_grad[row * d_cols + col] = d_tab[ (d_imgleft_data[row * d_cols + col + 1] - d_imgleft_data[row * d_cols + col - 1]) * 2 \
+ d_imgleft_data[(row - pre_row_add) * d_cols + col + 1] - d_imgleft_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgleft_data[(row + next_row_add) * d_cols + col + 1] - d_imgleft_data[(row + next_row_add) * d_cols + col - 1] \
];
d_imgright_grad[row * d_cols + col] = d_tab[ (d_imgright_data[row * d_cols + col + 1] - d_imgright_data[row * d_cols + col - 1]) * 2 \
+ d_imgright_data[(row - pre_row_add) * d_cols + col + 1] - d_imgright_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgright_data[(row + next_row_add) * d_cols + col + 1] - d_imgright_data[(row + next_row_add) * d_cols + col - 1] \
];
//printf("row=%d, x=%d, right_data=%d\n", row, col, d_imgright_data[row * d_cols + col]);
// printf("row=%d,col=%d,grad=%d, pre_row=%d, org_imgdata=%d\n", row, col, d_imgleft_grad[row * d_cols + col], (row - pre_row_add), d_imgleft_data[row * d_cols + col ]);
//printf("row=%d,col=%d,grad=%d, pre_row=%d, org_imgdata=%d\n", row, col, d_imgright_grad[row * d_cols + col], (row - pre_row_add), d_imgright_data[row * d_cols + col ]);
//printf("y=%d,%d=%d\n", row, col, d_imgright_grad[row * d_cols + col]);
}
d_imgleft_grad[row * d_cols + d_cols - 1] = d_tab[0];
d_imgright_grad[row * d_cols + d_cols - 1] = d_tab[0];
// printf("row=%d, col=639 grad=%d\n", row, d_tab[0]);
d_imgleft_data[row * d_cols + d_cols - 1] = d_tab[0];
d_imgright_data[row * d_cols + d_cols - 1] = d_tab[0]; //opencvopencv
//printf("row=%d, x=%d, right_data=%d\n", row, d_cols-1, d_imgright_data[row * d_cols + d_cols - 1]);
}
else
{
for(i = 0; i < pixels_number_thread; i++)
{
col = col_start + i;
d_imgleft_grad[row * d_cols + col] = d_tab[ (d_imgleft_data[row * d_cols + col + 1] - d_imgleft_data[row * d_cols + col - 1]) * 2 \
+ d_imgleft_data[(row - pre_row_add) * d_cols + col + 1] - d_imgleft_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgleft_data[(row + next_row_add) * d_cols + col + 1] - d_imgleft_data[(row + next_row_add) * d_cols + col - 1] \
];
d_imgright_grad[row * d_cols + col] = d_tab[ (d_imgright_data[row * d_cols + col + 1] - d_imgright_data[row * d_cols + col - 1]) * 2 \
+ d_imgright_data[(row - pre_row_add) * d_cols + col + 1] - d_imgright_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgright_data[(row + next_row_add) * d_cols + col + 1] - d_imgright_data[(row + next_row_add) * d_cols + col - 1] \
];
//printf("row=%d, x=%d, right_data=%d\n", row, col, d_imgright_data[row * d_cols + col]);
// printf("row=%d,col=%d,grad=%d, pre_row=%d, org_imgdata=%d\n", row, col, d_imgleft_grad[row * d_cols + col], (row - pre_row_add), d_imgleft_data[row * d_cols + col ]);
//printf("row=%d,col=%d,grad=%d, pre_row=%d, org_imgdata=%d\n", row, col, d_imgright_grad[row * d_cols + col], (row - pre_row_add), d_imgright_data[row * d_cols + col ]);
//printf("y=%d,%d=%d\n", row, col, d_imgright_grad[row * d_cols + col]);
// if(0 == row && col == 442)
// {
// printf("index 1=%d,2=%d,3=%d,4=%d,5=%d,6=%d, pre_now_add=%d, next_row_add=%d\n", d_imgright_data[row * d_cols + col + 1] , d_imgright_data[row * d_cols + col - 1], d_imgright_data[(row - pre_row_add) * d_cols + col + 1], d_imgright_data[(row - pre_row_add) * d_cols + col - 1], d_imgright_data[(row + next_row_add) * d_cols + col + 1], d_imgright_data[(row + next_row_add) * d_cols + col - 1], pre_row_add, next_row_add);
// printf("src_data 1=%d,2=%d,3=%d,4=%d,5=%d,6=%d, d_cols=%d\n", row * d_cols + col + 1, row * d_cols + col - 1, (row - pre_row_add) * d_cols + col + 1, (row - pre_row_add) * d_cols + col - 1, (row + next_row_add) * d_cols + col + 1, (row + next_row_add) * d_cols + col - 1,d_cols);
// }
}
}
}
}
*/
__global__ void get_gradient(PixType *d_imgleft_data, PixType *d_imgright_data, PixType *d_imgleft_grad, PixType *d_imgright_grad, PixType *d_tab, int d_rows, int d_cols)
{
int row = threadIdx.x; //
int col = 0;
int pre_row_add = 0, next_row_add = 0;
if(0 == row)//
{
pre_row_add = 0;
next_row_add = 1;
}
else if(d_rows -1 == row) //
{
pre_row_add = 1;
next_row_add = 0;
}
else
{
pre_row_add = 1;
next_row_add = 1;
}
for(col = 1; col < d_cols - 1; col++)
{
d_imgleft_grad[row * d_cols + col] = d_tab[ (d_imgleft_data[row * d_cols + col + 1] - d_imgleft_data[row * d_cols + col - 1]) * 2 \
+ d_imgleft_data[(row - pre_row_add) * d_cols + col + 1] - d_imgleft_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgleft_data[(row + next_row_add) * d_cols + col + 1] - d_imgleft_data[(row + next_row_add) * d_cols + col - 1] \
];
d_imgright_grad[row * d_cols + col] = d_tab[ (d_imgright_data[row * d_cols + col + 1] - d_imgright_data[row * d_cols + col - 1]) * 2 \
+ d_imgright_data[(row - pre_row_add) * d_cols + col + 1] - d_imgright_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgright_data[(row + next_row_add) * d_cols + col + 1] - d_imgright_data[(row + next_row_add) * d_cols + col - 1] \
];
}
__syncthreads();
d_imgleft_grad[row * d_cols + 0] = d_tab[0];
d_imgright_grad[row * d_cols + 0] = d_tab[0];
d_imgleft_data[row * d_cols + 0] = d_tab[0];
d_imgright_data[row * d_cols + 0] = d_tab[0]; //opencvopencv
d_imgleft_grad[row * d_cols + d_cols - 1] = d_tab[0];
d_imgright_grad[row * d_cols + d_cols - 1] = d_tab[0];
d_imgleft_data[row * d_cols + d_cols - 1] = d_tab[0];
d_imgright_data[row * d_cols + d_cols - 1] = d_tab[0]; //opencvopencv
}
//MAX_DISPARITY
__global__ void get_pixel_diff(const PixType * d_imgleft_buf, const PixType * d_imgright_buf, int rows, int cols, int diff_scale, CostType *d_cost)
{
int row = blockIdx.x;
int now_disparity = threadIdx.x;
const PixType *local_imgleft_buf = d_imgleft_buf + row * cols;
const PixType *local_imgright_buf = d_imgright_buf + row * cols; //
for(int x = MAX_DISPARITY; x < cols; x++) //
{
int u = local_imgleft_buf[x];
int ul = x > 0 ? (u + local_imgleft_buf[x - 1])/2 : u;
int ur = x < cols - 1 ? (u + local_imgleft_buf[x + 1])/2 : u;
int u0 = min(ul, ur); u0 = min(u0, u);
int u1 = max(ul, ur); u1 = max(u1, u);
int v = local_imgright_buf[x - now_disparity];
int vl = x >= now_disparity + 1 ? (local_imgright_buf[x - now_disparity - 1] + v)/2 : v;
int vr = x < cols + now_disparity - 1 ? (local_imgright_buf[x - now_disparity + 1] + v)/2 : v;
int v0 = min(vl, vr); v0 = min(v0, v);
int v1 = max(vl, vr); v1= max(v1, v);
int c0 = max(0, u - v1); c0 = max(c0, v0 - u);
int c1 = max(0, v - u1); c1 = max(c1, u0 - v);
int pre_cost = d_cost[(row * cols + x) * MAX_DISPARITY + now_disparity];
d_cost[(row * cols + x) * MAX_DISPARITY + now_disparity] = d_cost[(row * cols + x) * MAX_DISPARITY + now_disparity] + (min(c0, c1) >> diff_scale);
}
}
__global__ void get_hsum(const CostType *d_pixel_diff, CostType *d_hsum, int rows, int cols, int blocksize)
{
int row = blockIdx.x; //
int now_disparity = threadIdx.x; //
int max_col = cols - MAX_DISPARITY; //sgbmwidth1
int SH2 = blocksize/2, SW2= blocksize/2;
CostType *local_hsumAdd = d_hsum + (row * cols + MAX_DISPARITY)* MAX_DISPARITY; // d_hsumMAX_DISPARITY
const CostType *local_pixel_diff = d_pixel_diff + (row * cols + MAX_DISPARITY) * MAX_DISPARITY; //d_pixel_diffMAX_DISPARITY
//
local_hsumAdd[now_disparity] = 0;
for(int x = 0; x <=SW2 * MAX_DISPARITY; x += MAX_DISPARITY)
{
int scale = x == 0 ? SW2 + 1 : 1;
local_hsumAdd[now_disparity] = local_hsumAdd[now_disparity] + local_pixel_diff[x + now_disparity] * scale;
// if(row == 479 && now_disparity == 0)
// printf("row=%d,d=%d,rang_x=%d, scale=%d, pixdiff=%d, src_pixdiff=%d,hsumadd=%d\n", row, now_disparity,x/MAX_DISPARITY, scale, local_pixel_diff[x + now_disparity], d_pixel_diff[row * cols *MAX_DISPARITY + MAX_DISPARITY * MAX_DISPARITY + now_disparity ], local_hsumAdd[now_disparity]);
}
//printf("final row=%d,d=%d,hsumAdd=%d\n", row, now_disparity, local_hsumAdd[now_disparity]);
for(int x = MAX_DISPARITY; x < max_col * MAX_DISPARITY; x += MAX_DISPARITY)
{
const CostType *pixAdd = local_pixel_diff + min(x + SW2 * MAX_DISPARITY, (max_col - 1) * MAX_DISPARITY);
const CostType *pixSub = local_pixel_diff + max(x - (SW2 + 1) * MAX_DISPARITY, 0);
local_hsumAdd[x + now_disparity] = local_hsumAdd[x - MAX_DISPARITY + now_disparity] + pixAdd[now_disparity] - pixSub[now_disparity];
//if(row == 479 && now_disparity == 0)
// printf("row=%d,x=%d,d=%d,hsumAdd=%d, pixSub=%d, pixAdd=%d, hsumAdd_pre=%d, pixAddIndex=%d, SW2=%d, max_col=%d, x=%d\n", row, x/MAX_DISPARITY + MAX_DISPARITY, now_disparity, local_hsumAdd[x+now_disparity], pixSub[now_disparity],pixAdd[now_disparity], local_hsumAdd[x - MAX_DISPARITY + now_disparity], min(x + SW2 * MAX_DISPARITY, (max_col - 1) * MAX_DISPARITY), SW2, max_col, x);
}
}
__global__ void get_cost(const CostType *d_hsumAdd, CostType *d_cost, int p2, int rows, int cols, int blocksize)
{
int col = blockIdx.x + MAX_DISPARITY; //, cols - MAX_DISPARITY
int now_disparity = threadIdx.x; //d
CostType *local_cost = d_cost + col * MAX_DISPARITY; //
const CostType *local_hsumAdd = d_hsumAdd + col * MAX_DISPARITY; //
int SH2 = blocksize/2, SW2= blocksize/2;
//y == 0
local_cost[0 + now_disparity] = p2;
for(int i = 0; i <= SH2; i++)
{
int scale = i == 0 ? SH2 + 1 : 1;
local_cost[now_disparity] = local_cost[0 + now_disparity] + local_hsumAdd[i * cols * MAX_DISPARITY + now_disparity] * scale;
//if(col == 128 && now_disparity == 127)
// printf("k=%d,col=%d,d=%d,hsumAdd=%d,scale=%d,C=%d\n", i,col,now_disparity,local_hsumAdd[i * cols * MAX_DISPARITY + now_disparity],scale,local_cost[now_disparity]);
}
if(MAX_DISPARITY == col) //copy opencv,
{
int k = 0;
for(k = 1 + SH2; k < rows; k++)
{
int y = k - SH2;
local_cost[cols * MAX_DISPARITY * y + now_disparity] = local_cost[cols * MAX_DISPARITY * (y - 1) + now_disparity];
}
for(int y = k - SH2; y < rows; y++)
{
local_cost[cols * MAX_DISPARITY * y + now_disparity] = local_cost[cols * MAX_DISPARITY * (y - 1) + now_disparity];
}
}
else
{
int k = 0;
for(k = 1 + SH2; k < rows; k++)
{
int y = k - SH2;
const CostType *h_sumSub = local_hsumAdd + (k >= blocksize ? cols * MAX_DISPARITY * (k - blocksize) : 0);
local_cost[cols * MAX_DISPARITY * y + now_disparity] = local_cost[cols * MAX_DISPARITY * (y - 1) + now_disparity] + local_hsumAdd[cols * MAX_DISPARITY * k + now_disparity] - h_sumSub[now_disparity];
}
for(int y = k - SH2; y < rows; y++) //fill the last rows with previous value
{
local_cost[cols * MAX_DISPARITY *y + now_disparity] = local_cost[cols * MAX_DISPARITY * (y - 1) + now_disparity];
}
}
}
__global__ void fill_tab(PixType *d_tab, int TAB_SIZE, int TAB_OFS, int ftzero)
{
for(int k = 0; k < TAB_SIZE; k++)
{
d_tab[k] = (PixType)(min(max(k - TAB_OFS, -ftzero), ftzero) + ftzero);
}
}
| 77815b983b2bae547056d0d31628190b86986098.cu | #include "common.h"
/*
__global__ void get_gradient(PixType *d_imgleft_data, PixType *d_imgright_data, PixType *d_imgleft_grad, PixType *d_imgright_grad, PixType *d_tab, int d_rows, int d_cols)
{
int pixels_number_thread = d_cols / WARP_SIZE;
int row = blockIdx.x; //该线程所在行
int col_start = threadIdx.x * pixels_number_thread; //每个线程处理的范围的开始的横坐标
int col = 0;
int i = 0;
int pre_row_add = 0, next_row_add = 0;
if(col_start < d_cols)
{
if(0 == row)//第一行
{
pre_row_add = 0;
next_row_add = 1;
}
else if(d_rows -1 == row) //最后一行
{
pre_row_add = 1;
next_row_add = 0;
}
else
{
pre_row_add = 1;
next_row_add = 1;
}
if(0 == col_start) //改线程处理的是最左端的像素,包括横坐标为0
{
for(i = 1; i < pixels_number_thread; i++)
{
col = col_start + i;
d_imgleft_grad[row * d_cols + col] = d_tab[ (d_imgleft_data[row * d_cols + col + 1] - d_imgleft_data[row * d_cols + col - 1]) * 2 \
+ d_imgleft_data[(row - pre_row_add) * d_cols + col + 1] - d_imgleft_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgleft_data[(row + next_row_add) * d_cols + col + 1] - d_imgleft_data[(row + next_row_add) * d_cols + col - 1] \
];
d_imgright_grad[row * d_cols + col] = d_tab[ (d_imgright_data[row * d_cols + col + 1] - d_imgright_data[row * d_cols + col - 1]) * 2 \
+ d_imgright_data[(row - pre_row_add) * d_cols + col + 1] - d_imgright_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgright_data[(row + next_row_add) * d_cols + col + 1] - d_imgright_data[(row + next_row_add) * d_cols + col - 1] \
];
//printf("row=%d, x=%d, right_data=%d\n", row, col, d_imgright_data[row * d_cols + col]);
// printf("row=%d,col=%d,grad=%d, pre_row=%d, org_imgdata=%d\n", row, col, d_imgleft_grad[row * d_cols + col], (row - pre_row_add), d_imgleft_data[row * d_cols + col ]);
//printf("row=%d,col=%d,grad=%d, pre_row=%d, org_imgdata=%d\n", row, col, d_imgright_grad[row * d_cols + col], (row - pre_row_add), d_imgright_data[row * d_cols + col ]);
//printf("y=%d,%d=%d\n", row, col, d_imgright_grad[row * d_cols + col]);
}
d_imgleft_grad[row * d_cols + 0] = d_tab[0];
d_imgright_grad[row * d_cols + 0] = d_tab[0];
d_imgleft_data[row * d_cols + 0] = d_tab[0];
d_imgright_data[row * d_cols + 0] = d_tab[0]; //此处仿照opencv代码,感觉opencv不对
}
else if(col_start + pixels_number_thread > d_cols - 1) //处理最右端的像素,因为这一个线程可能和其他线程处理的像素个数不一样
{
for(i = 0; col_start + i < d_cols; i++)
{
col = col_start + i;
d_imgleft_grad[row * d_cols + col] = d_tab[ (d_imgleft_data[row * d_cols + col + 1] - d_imgleft_data[row * d_cols + col - 1]) * 2 \
+ d_imgleft_data[(row - pre_row_add) * d_cols + col + 1] - d_imgleft_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgleft_data[(row + next_row_add) * d_cols + col + 1] - d_imgleft_data[(row + next_row_add) * d_cols + col - 1] \
];
d_imgright_grad[row * d_cols + col] = d_tab[ (d_imgright_data[row * d_cols + col + 1] - d_imgright_data[row * d_cols + col - 1]) * 2 \
+ d_imgright_data[(row - pre_row_add) * d_cols + col + 1] - d_imgright_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgright_data[(row + next_row_add) * d_cols + col + 1] - d_imgright_data[(row + next_row_add) * d_cols + col - 1] \
];
//printf("row=%d, x=%d, right_data=%d\n", row, col, d_imgright_data[row * d_cols + col]);
// printf("row=%d,col=%d,grad=%d, pre_row=%d, org_imgdata=%d\n", row, col, d_imgleft_grad[row * d_cols + col], (row - pre_row_add), d_imgleft_data[row * d_cols + col ]);
//printf("row=%d,col=%d,grad=%d, pre_row=%d, org_imgdata=%d\n", row, col, d_imgright_grad[row * d_cols + col], (row - pre_row_add), d_imgright_data[row * d_cols + col ]);
//printf("y=%d,%d=%d\n", row, col, d_imgright_grad[row * d_cols + col]);
}
d_imgleft_grad[row * d_cols + d_cols - 1] = d_tab[0];
d_imgright_grad[row * d_cols + d_cols - 1] = d_tab[0];
// printf("row=%d, col=639 grad=%d\n", row, d_tab[0]);
d_imgleft_data[row * d_cols + d_cols - 1] = d_tab[0];
d_imgright_data[row * d_cols + d_cols - 1] = d_tab[0]; //此处仿照opencv代码,感觉opencv不对
//printf("row=%d, x=%d, right_data=%d\n", row, d_cols-1, d_imgright_data[row * d_cols + d_cols - 1]);
}
else
{
for(i = 0; i < pixels_number_thread; i++)
{
col = col_start + i;
d_imgleft_grad[row * d_cols + col] = d_tab[ (d_imgleft_data[row * d_cols + col + 1] - d_imgleft_data[row * d_cols + col - 1]) * 2 \
+ d_imgleft_data[(row - pre_row_add) * d_cols + col + 1] - d_imgleft_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgleft_data[(row + next_row_add) * d_cols + col + 1] - d_imgleft_data[(row + next_row_add) * d_cols + col - 1] \
];
d_imgright_grad[row * d_cols + col] = d_tab[ (d_imgright_data[row * d_cols + col + 1] - d_imgright_data[row * d_cols + col - 1]) * 2 \
+ d_imgright_data[(row - pre_row_add) * d_cols + col + 1] - d_imgright_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgright_data[(row + next_row_add) * d_cols + col + 1] - d_imgright_data[(row + next_row_add) * d_cols + col - 1] \
];
//printf("row=%d, x=%d, right_data=%d\n", row, col, d_imgright_data[row * d_cols + col]);
// printf("row=%d,col=%d,grad=%d, pre_row=%d, org_imgdata=%d\n", row, col, d_imgleft_grad[row * d_cols + col], (row - pre_row_add), d_imgleft_data[row * d_cols + col ]);
//printf("row=%d,col=%d,grad=%d, pre_row=%d, org_imgdata=%d\n", row, col, d_imgright_grad[row * d_cols + col], (row - pre_row_add), d_imgright_data[row * d_cols + col ]);
//printf("y=%d,%d=%d\n", row, col, d_imgright_grad[row * d_cols + col]);
// if(0 == row && col == 442)
// {
// printf("index 1=%d,2=%d,3=%d,4=%d,5=%d,6=%d, pre_now_add=%d, next_row_add=%d\n", d_imgright_data[row * d_cols + col + 1] , d_imgright_data[row * d_cols + col - 1], d_imgright_data[(row - pre_row_add) * d_cols + col + 1], d_imgright_data[(row - pre_row_add) * d_cols + col - 1], d_imgright_data[(row + next_row_add) * d_cols + col + 1], d_imgright_data[(row + next_row_add) * d_cols + col - 1], pre_row_add, next_row_add);
// printf("src_data 1=%d,2=%d,3=%d,4=%d,5=%d,6=%d, d_cols=%d\n", row * d_cols + col + 1, row * d_cols + col - 1, (row - pre_row_add) * d_cols + col + 1, (row - pre_row_add) * d_cols + col - 1, (row + next_row_add) * d_cols + col + 1, (row + next_row_add) * d_cols + col - 1,d_cols);
// }
}
}
}
}
*/
__global__ void get_gradient(PixType *d_imgleft_data, PixType *d_imgright_data, PixType *d_imgleft_grad, PixType *d_imgright_grad, PixType *d_tab, int d_rows, int d_cols)
{
int row = threadIdx.x; //该线程所在行
int col = 0;
int pre_row_add = 0, next_row_add = 0;
if(0 == row)//第一行
{
pre_row_add = 0;
next_row_add = 1;
}
else if(d_rows -1 == row) //最后一行
{
pre_row_add = 1;
next_row_add = 0;
}
else
{
pre_row_add = 1;
next_row_add = 1;
}
for(col = 1; col < d_cols - 1; col++)
{
d_imgleft_grad[row * d_cols + col] = d_tab[ (d_imgleft_data[row * d_cols + col + 1] - d_imgleft_data[row * d_cols + col - 1]) * 2 \
+ d_imgleft_data[(row - pre_row_add) * d_cols + col + 1] - d_imgleft_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgleft_data[(row + next_row_add) * d_cols + col + 1] - d_imgleft_data[(row + next_row_add) * d_cols + col - 1] \
];
d_imgright_grad[row * d_cols + col] = d_tab[ (d_imgright_data[row * d_cols + col + 1] - d_imgright_data[row * d_cols + col - 1]) * 2 \
+ d_imgright_data[(row - pre_row_add) * d_cols + col + 1] - d_imgright_data[(row - pre_row_add) * d_cols + col - 1] \
+ d_imgright_data[(row + next_row_add) * d_cols + col + 1] - d_imgright_data[(row + next_row_add) * d_cols + col - 1] \
];
}
__syncthreads();
d_imgleft_grad[row * d_cols + 0] = d_tab[0];
d_imgright_grad[row * d_cols + 0] = d_tab[0];
d_imgleft_data[row * d_cols + 0] = d_tab[0];
d_imgright_data[row * d_cols + 0] = d_tab[0]; //此处仿照opencv代码,感觉opencv不对
d_imgleft_grad[row * d_cols + d_cols - 1] = d_tab[0];
d_imgright_grad[row * d_cols + d_cols - 1] = d_tab[0];
d_imgleft_data[row * d_cols + d_cols - 1] = d_tab[0];
d_imgright_data[row * d_cols + d_cols - 1] = d_tab[0]; //此处仿照opencv代码,感觉opencv不对
}
//每行有MAX_DISPARITY个线程,每个线程处理该行所有点的特定视差
__global__ void get_pixel_diff(const PixType * d_imgleft_buf, const PixType * d_imgright_buf, int rows, int cols, int diff_scale, CostType *d_cost)
{
int row = blockIdx.x;
int now_disparity = threadIdx.x;
const PixType *local_imgleft_buf = d_imgleft_buf + row * cols;
const PixType *local_imgright_buf = d_imgright_buf + row * cols; //找到该行起始位置对应的地址
for(int x = MAX_DISPARITY; x < cols; x++) //最大视差作为左图的起始点
{
int u = local_imgleft_buf[x];
int ul = x > 0 ? (u + local_imgleft_buf[x - 1])/2 : u;
int ur = x < cols - 1 ? (u + local_imgleft_buf[x + 1])/2 : u;
int u0 = min(ul, ur); u0 = min(u0, u);
int u1 = max(ul, ur); u1 = max(u1, u);
int v = local_imgright_buf[x - now_disparity];
int vl = x >= now_disparity + 1 ? (local_imgright_buf[x - now_disparity - 1] + v)/2 : v;
int vr = x < cols + now_disparity - 1 ? (local_imgright_buf[x - now_disparity + 1] + v)/2 : v;
int v0 = min(vl, vr); v0 = min(v0, v);
int v1 = max(vl, vr); v1= max(v1, v);
int c0 = max(0, u - v1); c0 = max(c0, v0 - u);
int c1 = max(0, v - u1); c1 = max(c1, u0 - v);
int pre_cost = d_cost[(row * cols + x) * MAX_DISPARITY + now_disparity];
d_cost[(row * cols + x) * MAX_DISPARITY + now_disparity] = d_cost[(row * cols + x) * MAX_DISPARITY + now_disparity] + (min(c0, c1) >> diff_scale);
}
}
__global__ void get_hsum(const CostType *d_pixel_diff, CostType *d_hsum, int rows, int cols, int blocksize)
{
int row = blockIdx.x; //该线程所处理的行
int now_disparity = threadIdx.x; //该线程处理该行的某个视差
int max_col = cols - MAX_DISPARITY; //对比sgbm中的width1
int SH2 = blocksize/2, SW2= blocksize/2;
CostType *local_hsumAdd = d_hsum + (row * cols + MAX_DISPARITY)* MAX_DISPARITY; // d_hsum的前MAX_DISPARITY列无法计算
const CostType *local_pixel_diff = d_pixel_diff + (row * cols + MAX_DISPARITY) * MAX_DISPARITY; //d_pixel_diff的前MAX_DISPARITY列没有计算
//可以计算的左边第一个像素
local_hsumAdd[now_disparity] = 0;
for(int x = 0; x <=SW2 * MAX_DISPARITY; x += MAX_DISPARITY)
{
int scale = x == 0 ? SW2 + 1 : 1;
local_hsumAdd[now_disparity] = local_hsumAdd[now_disparity] + local_pixel_diff[x + now_disparity] * scale;
// if(row == 479 && now_disparity == 0)
// printf("row=%d,d=%d,rang_x=%d, scale=%d, pixdiff=%d, src_pixdiff=%d,hsumadd=%d\n", row, now_disparity,x/MAX_DISPARITY, scale, local_pixel_diff[x + now_disparity], d_pixel_diff[row * cols *MAX_DISPARITY + MAX_DISPARITY * MAX_DISPARITY + now_disparity ], local_hsumAdd[now_disparity]);
}
//printf("final row=%d,d=%d,hsumAdd=%d\n", row, now_disparity, local_hsumAdd[now_disparity]);
for(int x = MAX_DISPARITY; x < max_col * MAX_DISPARITY; x += MAX_DISPARITY)
{
const CostType *pixAdd = local_pixel_diff + min(x + SW2 * MAX_DISPARITY, (max_col - 1) * MAX_DISPARITY);
const CostType *pixSub = local_pixel_diff + max(x - (SW2 + 1) * MAX_DISPARITY, 0);
local_hsumAdd[x + now_disparity] = local_hsumAdd[x - MAX_DISPARITY + now_disparity] + pixAdd[now_disparity] - pixSub[now_disparity];
//if(row == 479 && now_disparity == 0)
// printf("row=%d,x=%d,d=%d,hsumAdd=%d, pixSub=%d, pixAdd=%d, hsumAdd_pre=%d, pixAddIndex=%d, SW2=%d, max_col=%d, x=%d\n", row, x/MAX_DISPARITY + MAX_DISPARITY, now_disparity, local_hsumAdd[x+now_disparity], pixSub[now_disparity],pixAdd[now_disparity], local_hsumAdd[x - MAX_DISPARITY + now_disparity], min(x + SW2 * MAX_DISPARITY, (max_col - 1) * MAX_DISPARITY), SW2, max_col, x);
}
}
__global__ void get_cost(const CostType *d_hsumAdd, CostType *d_cost, int p2, int rows, int cols, int blocksize)
{
int col = blockIdx.x + MAX_DISPARITY; //线程块代表每一列, 因为只开了cols - MAX_DISPARITY个线程块
int now_disparity = threadIdx.x; //线程块中的一个线程处理某一列的视差为d时的代价
CostType *local_cost = d_cost + col * MAX_DISPARITY; //第一行该列的地址
const CostType *local_hsumAdd = d_hsumAdd + col * MAX_DISPARITY; //第一行该列的地址
int SH2 = blocksize/2, SW2= blocksize/2;
//y == 0
local_cost[0 + now_disparity] = p2;
for(int i = 0; i <= SH2; i++)
{
int scale = i == 0 ? SH2 + 1 : 1;
local_cost[now_disparity] = local_cost[0 + now_disparity] + local_hsumAdd[i * cols * MAX_DISPARITY + now_disparity] * scale;
//if(col == 128 && now_disparity == 127)
// printf("k=%d,col=%d,d=%d,hsumAdd=%d,scale=%d,C=%d\n", i,col,now_disparity,local_hsumAdd[i * cols * MAX_DISPARITY + now_disparity],scale,local_cost[now_disparity]);
}
if(MAX_DISPARITY == col) //copy opencv, 最左边第一列和第一行保持一致
{
int k = 0;
for(k = 1 + SH2; k < rows; k++)
{
int y = k - SH2;
local_cost[cols * MAX_DISPARITY * y + now_disparity] = local_cost[cols * MAX_DISPARITY * (y - 1) + now_disparity];
}
for(int y = k - SH2; y < rows; y++)
{
local_cost[cols * MAX_DISPARITY * y + now_disparity] = local_cost[cols * MAX_DISPARITY * (y - 1) + now_disparity];
}
}
else
{
int k = 0;
for(k = 1 + SH2; k < rows; k++)
{
int y = k - SH2;
const CostType *h_sumSub = local_hsumAdd + (k >= blocksize ? cols * MAX_DISPARITY * (k - blocksize) : 0);
local_cost[cols * MAX_DISPARITY * y + now_disparity] = local_cost[cols * MAX_DISPARITY * (y - 1) + now_disparity] + local_hsumAdd[cols * MAX_DISPARITY * k + now_disparity] - h_sumSub[now_disparity];
}
for(int y = k - SH2; y < rows; y++) //fill the last rows with previous value
{
local_cost[cols * MAX_DISPARITY *y + now_disparity] = local_cost[cols * MAX_DISPARITY * (y - 1) + now_disparity];
}
}
}
__global__ void fill_tab(PixType *d_tab, int TAB_SIZE, int TAB_OFS, int ftzero)
{
for(int k = 0; k < TAB_SIZE; k++)
{
d_tab[k] = (PixType)(min(max(k - TAB_OFS, -ftzero), ftzero) + ftzero);
}
}
|
952f594cd37b1e51b4a5a87056a7c9bf6b4c605a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <assert.h>
#include "Error.h"
/*
**Execution Config 1
* N = 4
**Execution Config 2
* N = 16
**Execution Config 3
* N = 32
**Execution Config 4
* N = 64
*/
#define N 64
__global__ void additionMatricesKernel(int *d_a, int *d_b, int *d_c){
int i = threadIdx.x+blockIdx.x*blockDim.x;
int j = threadIdx.y+blockIdx.y*blockDim.y;
d_c[ i*N+j ] = d_a[i*N+j] + d_b[i*N+j];
}
void onDevice(int h_a[][N], int h_b[][N], int h_c[][N] ){
// declare GPU memory pointers
int *d_a, *d_b, *d_c;
const int ARRAY_BYTES = N * N * sizeof(int);
// allocate memory on the GPU
HANDLER_ERROR_ERR(hipMalloc((void**)&d_a,ARRAY_BYTES));
HANDLER_ERROR_ERR(hipMalloc((void**)&d_b,ARRAY_BYTES));
HANDLER_ERROR_ERR(hipMalloc((void**)&d_c,ARRAY_BYTES));
// copy data from CPU the GPU
HANDLER_ERROR_ERR(hipMemcpy(d_a, h_a, ARRAY_BYTES, hipMemcpyHostToDevice));
HANDLER_ERROR_ERR(hipMemcpy(d_b, h_b, ARRAY_BYTES, hipMemcpyHostToDevice));
HANDLER_ERROR_ERR(hipMemcpy(d_c, h_c, ARRAY_BYTES, hipMemcpyHostToDevice));
//execution configuration
dim3 GridBlocks( N/2,N/2 );
dim3 ThreadsBlocks( N/2,N/2 );
//run the kernel
hipLaunchKernelGGL(( additionMatricesKernel), dim3(GridBlocks),dim3(ThreadsBlocks), 0, 0, d_a, d_b, d_c );
HANDLER_ERROR_MSG("kernel panic!!!");
// copy data back from the GPU to the CPU
HANDLER_ERROR_ERR(hipMemcpy(h_c, d_c, ARRAY_BYTES, hipMemcpyDeviceToHost));
// free GPU memory
HANDLER_ERROR_ERR(hipFree(d_a));
HANDLER_ERROR_ERR(hipFree(d_b));
HANDLER_ERROR_ERR(hipFree(d_c));
}
void test(int h_a[][N], int h_b[][N], int h_c[][N]){
int i,j;
for(i=0; i < N; i++){
for(j = 0; j < N; j++){
assert(h_a[i][j] + h_b[i][j] == h_c[i][j]);
}
}
printf("-: successful execution :-\n");
}
void onHost(){
int i,j;
int h_a[N][N], h_b[N][N], h_c[N][N];
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
h_a[i][j] = h_b[i][j] = i+j;
h_c[i][j] = 0;
}
}
// call device configuration
onDevice(h_a,h_b,h_c);
test(h_a,h_b,h_c);
}
int main(){
onHost();
}
| 952f594cd37b1e51b4a5a87056a7c9bf6b4c605a.cu | #include <stdio.h>
#include <assert.h>
#include "Error.h"
/*
**Execution Config 1
* N = 4
**Execution Config 2
* N = 16
**Execution Config 3
* N = 32
**Execution Config 4
* N = 64
*/
#define N 64
__global__ void additionMatricesKernel(int *d_a, int *d_b, int *d_c){
int i = threadIdx.x+blockIdx.x*blockDim.x;
int j = threadIdx.y+blockIdx.y*blockDim.y;
d_c[ i*N+j ] = d_a[i*N+j] + d_b[i*N+j];
}
void onDevice(int h_a[][N], int h_b[][N], int h_c[][N] ){
// declare GPU memory pointers
int *d_a, *d_b, *d_c;
const int ARRAY_BYTES = N * N * sizeof(int);
// allocate memory on the GPU
HANDLER_ERROR_ERR(cudaMalloc((void**)&d_a,ARRAY_BYTES));
HANDLER_ERROR_ERR(cudaMalloc((void**)&d_b,ARRAY_BYTES));
HANDLER_ERROR_ERR(cudaMalloc((void**)&d_c,ARRAY_BYTES));
// copy data from CPU the GPU
HANDLER_ERROR_ERR(cudaMemcpy(d_a, h_a, ARRAY_BYTES, cudaMemcpyHostToDevice));
HANDLER_ERROR_ERR(cudaMemcpy(d_b, h_b, ARRAY_BYTES, cudaMemcpyHostToDevice));
HANDLER_ERROR_ERR(cudaMemcpy(d_c, h_c, ARRAY_BYTES, cudaMemcpyHostToDevice));
//execution configuration
dim3 GridBlocks( N/2,N/2 );
dim3 ThreadsBlocks( N/2,N/2 );
//run the kernel
additionMatricesKernel<<<GridBlocks,ThreadsBlocks>>>( d_a, d_b, d_c );
HANDLER_ERROR_MSG("kernel panic!!!");
// copy data back from the GPU to the CPU
HANDLER_ERROR_ERR(cudaMemcpy(h_c, d_c, ARRAY_BYTES, cudaMemcpyDeviceToHost));
// free GPU memory
HANDLER_ERROR_ERR(cudaFree(d_a));
HANDLER_ERROR_ERR(cudaFree(d_b));
HANDLER_ERROR_ERR(cudaFree(d_c));
}
void test(int h_a[][N], int h_b[][N], int h_c[][N]){
int i,j;
for(i=0; i < N; i++){
for(j = 0; j < N; j++){
assert(h_a[i][j] + h_b[i][j] == h_c[i][j]);
}
}
printf("-: successful execution :-\n");
}
void onHost(){
int i,j;
int h_a[N][N], h_b[N][N], h_c[N][N];
for(i = 0; i < N; i++){
for(j = 0; j < N; j++){
h_a[i][j] = h_b[i][j] = i+j;
h_c[i][j] = 0;
}
}
// call device configuration
onDevice(h_a,h_b,h_c);
test(h_a,h_b,h_c);
}
int main(){
onHost();
}
|
03bc65b077b48f726d3e49f03cccf1ce5ffb663f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hipcub/hipcub.hpp>
#include <cudf/utilities/error.hpp>
#include <io/utilities/block_utils.cuh>
#include "parquet_gpu.h"
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
struct dict_state_s {
uint32_t row_cnt;
PageFragment *cur_fragment;
uint32_t *hashmap;
uint32_t total_dict_entries; //!< Total number of entries in dictionary
uint32_t dictionary_size; //!< Total dictionary size in bytes
uint32_t num_dict_entries; //!< Dictionary entries in current fragment to add
uint32_t frag_dict_size;
EncColumnChunk ck;
EncColumnDesc col;
PageFragment frag;
volatile uint32_t scratch_red[32];
uint16_t frag_dict[MAX_PAGE_FRAGMENT_SIZE];
};
/**
* @brief Computes a 16-bit dictionary hash
**/
inline __device__ uint32_t uint32_hash16(uint32_t v) { return (v + (v >> 16)) & 0xffff; }
inline __device__ uint32_t uint64_hash16(uint64_t v)
{
return uint32_hash16((uint32_t)(v + (v >> 32)));
}
inline __device__ uint32_t nvstr_hash16(const uint8_t *p, uint32_t len)
{
uint32_t hash = len;
if (len > 0) {
uint32_t align_p = 3 & reinterpret_cast<uintptr_t>(p);
const uint32_t *p32 = reinterpret_cast<const uint32_t *>(p - align_p);
uint32_t ofs = align_p * 8;
uint32_t v;
while (len > 4) {
v = *p32++;
if (ofs) { v = __funnelshift_r(v, *p32, ofs); }
hash = __funnelshift_l(hash, hash, 5) + v;
len -= 4;
}
v = *p32;
if (ofs) { v = __funnelshift_r(v, (align_p + len > 4) ? p32[1] : 0, ofs); }
v &= ((2 << (len * 8 - 1)) - 1);
hash = __funnelshift_l(hash, hash, 5) + v;
}
return uint32_hash16(hash);
}
/**
* @brief Fetch a page fragment and its dictionary entries in row-ascending order
*
* @param[in,out] s dictionary state
* @param[in,out] dict_data fragment dictionary data for the current column (zeroed out after
*fetching)
* @param[in] frag_start_row row position of current fragment
* @param[in] t thread id
**/
__device__ void FetchDictionaryFragment(dict_state_s *s,
uint32_t *dict_data,
uint32_t frag_start_row,
uint32_t t)
{
if (t < sizeof(PageFragment) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->frag)[t] =
reinterpret_cast<const uint32_t *>(s->cur_fragment)[t];
}
__syncthreads();
// Store the row values in shared mem and set the corresponding dict_data to zero (end-of-list)
// It's easiest to do this here since we're only dealing with values all within a 5K-row window
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = dict_data[frag_start_row + i] - frag_start_row;
s->frag_dict[i] = r;
}
__syncthreads();
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = s->frag_dict[i];
dict_data[frag_start_row + r] = 0;
}
__syncthreads();
}
/// Generate dictionary indices in ascending row order
__device__ void GenerateDictionaryIndices(dict_state_s *s, uint32_t t)
{
uint32_t *dict_index = s->col.dict_index;
uint32_t *dict_data = s->col.dict_data + s->ck.start_row;
const uint32_t *valid_map = s->col.valid_map_base;
uint32_t num_dict_entries = 0;
for (uint32_t i = 0; i < s->row_cnt; i += 1024) {
uint32_t row = s->ck.start_row + i + t;
uint32_t is_valid = (i + t < s->row_cnt && row < s->col.num_rows)
? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1
: 0;
uint32_t dict_idx = (is_valid) ? dict_index[row] : 0;
uint32_t is_unique =
(is_valid &&
dict_idx ==
row); // Any value that doesn't have bit31 set should have dict_idx=row at this point
uint32_t umask = BALLOT(is_unique);
uint32_t pos = num_dict_entries + __popc(umask & ((1 << (t & 0x1f)) - 1));
if (!(t & 0x1f)) { s->scratch_red[t >> 5] = __popc(umask); }
num_dict_entries += __syncthreads_count(is_unique);
if (t < 32) { s->scratch_red[t] = WarpReducePos32(s->scratch_red[t], t); }
__syncthreads();
if (t >= 32) { pos += s->scratch_red[(t - 32) >> 5]; }
if (is_valid && is_unique) {
dict_data[pos] = row;
dict_index[row] = pos;
}
__syncthreads();
if (is_valid && !is_unique) {
// NOTE: Should have at most 3 iterations (once for early duplicate elimination, once for
// final dictionary duplicate elimination and once for re-ordering) (If something went wrong
// building the dictionary, it will likely hang or crash right here)
do {
dict_idx = dict_index[dict_idx & 0x7fffffff];
} while (dict_idx > 0x7fffffff);
dict_index[row] = dict_idx;
}
}
}
// blockDim(1024, 1, 1)
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
gpuBuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch)
{
__shared__ __align__(8) dict_state_s state_g;
using warp_reduce = hipcub::WarpReduce<uint32_t>;
__shared__ typename warp_reduce::TempStorage temp_storage[block_size / 32];
dict_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t dtype, dtype_len, dtype_len_in;
if (t < sizeof(EncColumnChunk) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->ck)[t] =
reinterpret_cast<const uint32_t *>(&chunks[blockIdx.x])[t];
}
__syncthreads();
if (!s->ck.has_dictionary) { return; }
if (t < sizeof(EncColumnDesc) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->col)[t] =
reinterpret_cast<const uint32_t *>(s->ck.col_desc)[t];
}
__syncthreads();
if (!t) {
s->hashmap = dev_scratch + s->ck.dictionary_id * (size_t)(1 << kDictHashBits);
s->row_cnt = 0;
s->cur_fragment = s->ck.fragments;
s->total_dict_entries = 0;
s->dictionary_size = 0;
s->ck.num_dict_fragments = 0;
}
dtype = s->col.physical_type;
dtype_len = (dtype == INT64 || dtype == DOUBLE) ? 8 : 4;
if (dtype == INT32) {
dtype_len_in = GetDtypeLogicalLen(s->col.converted_type);
} else {
dtype_len_in = (dtype == BYTE_ARRAY) ? sizeof(nvstrdesc_s) : dtype_len;
}
__syncthreads();
while (s->row_cnt < s->ck.num_rows) {
uint32_t frag_start_row = s->ck.start_row + s->row_cnt, num_dict_entries, frag_dict_size;
FetchDictionaryFragment(s, s->col.dict_data, frag_start_row, t);
__syncthreads();
num_dict_entries = s->frag.num_dict_vals;
if (!t) {
s->num_dict_entries = 0;
s->frag_dict_size = 0;
}
for (uint32_t i = 0; i < num_dict_entries; i += 1024) {
bool is_valid = (i + t < num_dict_entries);
uint32_t len = 0;
uint32_t is_dupe = 0;
uint32_t row, hash, next, *next_addr;
uint32_t new_dict_entries;
if (is_valid) {
row = frag_start_row + s->frag_dict[i + t];
len = dtype_len;
if (dtype == BYTE_ARRAY) {
const char *ptr = reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].ptr;
uint32_t count =
(uint32_t) reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].count;
len += count;
hash = nvstr_hash16(reinterpret_cast<const uint8_t *>(ptr), count);
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
const char *ptr2 =
reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].ptr;
uint32_t count2 =
reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].count;
if (count2 == count && nvstr_is_equal(ptr, count, ptr2, count2)) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
} else {
uint64_t val;
if (dtype_len_in == 8) {
val = reinterpret_cast<const uint64_t *>(s->col.column_data_base)[row];
hash = uint64_hash16(val);
} else {
val = (dtype_len_in == 4)
? reinterpret_cast<const uint32_t *>(s->col.column_data_base)[row]
: (dtype_len_in == 2)
? reinterpret_cast<const uint16_t *>(s->col.column_data_base)[row]
: reinterpret_cast<const uint8_t *>(s->col.column_data_base)[row];
hash = uint32_hash16(val);
}
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
uint64_t val2 =
(dtype_len_in == 8)
? reinterpret_cast<const uint64_t *>(s->col.column_data_base)[next - 1]
: (dtype_len_in == 4)
? reinterpret_cast<const uint32_t *>(s->col.column_data_base)[next - 1]
: (dtype_len_in == 2)
? reinterpret_cast<const uint16_t *>(s->col.column_data_base)[next - 1]
: reinterpret_cast<const uint8_t *>(s->col.column_data_base)[next - 1];
if (val2 == val) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
}
}
// Count the non-duplicate entries
frag_dict_size = warp_reduce(temp_storage[t / 32]).Sum((is_valid && !is_dupe) ? len : 0);
if (!(t & 0x1f)) { s->scratch_red[t >> 5] = frag_dict_size; }
new_dict_entries = __syncthreads_count(is_valid && !is_dupe);
if (t < 32) {
frag_dict_size = warp_reduce(temp_storage[t / 32]).Sum(s->scratch_red[t]);
if (t == 0) {
s->frag_dict_size += frag_dict_size;
s->num_dict_entries += new_dict_entries;
}
}
if (is_valid) {
if (!is_dupe) {
s->col.dict_index[row] = row;
} else {
s->col.dict_index[row] = (next - 1) | (1u << 31);
}
}
__syncthreads();
// At this point, the dictionary order is non-deterministic, and we want insertion order
// Make sure that the non-duplicate entry corresponds to the lower row number
// (The entry in dict_data (next-1) used for duplicate elimination does not need
// to be the lowest row number)
bool reorder_check = (is_valid && is_dupe && next - 1 > row);
if (reorder_check) {
next = s->col.dict_index[next - 1];
while (next & (1u << 31)) { next = s->col.dict_index[next & 0x7fffffff]; }
}
if (__syncthreads_or(reorder_check)) {
if (reorder_check) { atomicMin(&s->col.dict_index[next], row); }
__syncthreads();
if (reorder_check && s->col.dict_index[next] == row) {
s->col.dict_index[next] = row | (1u << 31);
s->col.dict_index[row] = row;
}
__syncthreads();
}
}
__syncthreads();
num_dict_entries = s->num_dict_entries;
frag_dict_size = s->frag_dict_size;
if (s->total_dict_entries + num_dict_entries > 65536 ||
(s->dictionary_size != 0 && s->dictionary_size + frag_dict_size > 512 * 1024)) {
break;
}
__syncthreads();
if (!t) {
if (num_dict_entries != s->frag.num_dict_vals) {
s->cur_fragment->num_dict_vals = num_dict_entries;
}
if (frag_dict_size != s->frag.dict_data_size) { s->frag.dict_data_size = frag_dict_size; }
s->total_dict_entries += num_dict_entries;
s->dictionary_size += frag_dict_size;
s->row_cnt += s->frag.num_rows;
s->cur_fragment++;
s->ck.num_dict_fragments++;
}
__syncthreads();
}
__syncthreads();
GenerateDictionaryIndices(s, t);
if (!t) {
chunks[blockIdx.x].num_dict_fragments = s->ck.num_dict_fragments;
chunks[blockIdx.x].dictionary_size = s->dictionary_size;
chunks[blockIdx.x].total_dict_entries = s->total_dict_entries;
}
}
/**
* @brief Launches kernel for building chunk dictionaries
*
* @param[in] chunks Column chunks
* @param[in] dev_scratch Device scratch data (kDictScratchSize per dictionary)
* @param[in] num_chunks Number of column chunks
* @param[in] stream CUDA stream to use, default 0
*
* @return hipSuccess if successful, a CUDA error code otherwise
**/
hipError_t BuildChunkDictionaries(EncColumnChunk *chunks,
uint32_t *dev_scratch,
size_t scratch_size,
uint32_t num_chunks,
hipStream_t stream)
{
if (num_chunks > 0 && scratch_size > 0) { // zero scratch size implies no dictionaries
CUDA_TRY(hipMemsetAsync(dev_scratch, 0, scratch_size, stream));
hipLaunchKernelGGL(( gpuBuildChunkDictionaries<1024>), dim3(num_chunks), dim3(1024), 0, stream, chunks, dev_scratch);
}
return hipSuccess;
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
| 03bc65b077b48f726d3e49f03cccf1ce5ffb663f.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cub/cub.cuh>
#include <cudf/utilities/error.hpp>
#include <io/utilities/block_utils.cuh>
#include "parquet_gpu.h"
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
struct dict_state_s {
uint32_t row_cnt;
PageFragment *cur_fragment;
uint32_t *hashmap;
uint32_t total_dict_entries; //!< Total number of entries in dictionary
uint32_t dictionary_size; //!< Total dictionary size in bytes
uint32_t num_dict_entries; //!< Dictionary entries in current fragment to add
uint32_t frag_dict_size;
EncColumnChunk ck;
EncColumnDesc col;
PageFragment frag;
volatile uint32_t scratch_red[32];
uint16_t frag_dict[MAX_PAGE_FRAGMENT_SIZE];
};
/**
* @brief Computes a 16-bit dictionary hash
**/
inline __device__ uint32_t uint32_hash16(uint32_t v) { return (v + (v >> 16)) & 0xffff; }
inline __device__ uint32_t uint64_hash16(uint64_t v)
{
return uint32_hash16((uint32_t)(v + (v >> 32)));
}
inline __device__ uint32_t nvstr_hash16(const uint8_t *p, uint32_t len)
{
uint32_t hash = len;
if (len > 0) {
uint32_t align_p = 3 & reinterpret_cast<uintptr_t>(p);
const uint32_t *p32 = reinterpret_cast<const uint32_t *>(p - align_p);
uint32_t ofs = align_p * 8;
uint32_t v;
while (len > 4) {
v = *p32++;
if (ofs) { v = __funnelshift_r(v, *p32, ofs); }
hash = __funnelshift_l(hash, hash, 5) + v;
len -= 4;
}
v = *p32;
if (ofs) { v = __funnelshift_r(v, (align_p + len > 4) ? p32[1] : 0, ofs); }
v &= ((2 << (len * 8 - 1)) - 1);
hash = __funnelshift_l(hash, hash, 5) + v;
}
return uint32_hash16(hash);
}
/**
* @brief Fetch a page fragment and its dictionary entries in row-ascending order
*
* @param[in,out] s dictionary state
* @param[in,out] dict_data fragment dictionary data for the current column (zeroed out after
*fetching)
* @param[in] frag_start_row row position of current fragment
* @param[in] t thread id
**/
__device__ void FetchDictionaryFragment(dict_state_s *s,
uint32_t *dict_data,
uint32_t frag_start_row,
uint32_t t)
{
if (t < sizeof(PageFragment) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->frag)[t] =
reinterpret_cast<const uint32_t *>(s->cur_fragment)[t];
}
__syncthreads();
// Store the row values in shared mem and set the corresponding dict_data to zero (end-of-list)
// It's easiest to do this here since we're only dealing with values all within a 5K-row window
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = dict_data[frag_start_row + i] - frag_start_row;
s->frag_dict[i] = r;
}
__syncthreads();
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = s->frag_dict[i];
dict_data[frag_start_row + r] = 0;
}
__syncthreads();
}
/// Generate dictionary indices in ascending row order
__device__ void GenerateDictionaryIndices(dict_state_s *s, uint32_t t)
{
uint32_t *dict_index = s->col.dict_index;
uint32_t *dict_data = s->col.dict_data + s->ck.start_row;
const uint32_t *valid_map = s->col.valid_map_base;
uint32_t num_dict_entries = 0;
for (uint32_t i = 0; i < s->row_cnt; i += 1024) {
uint32_t row = s->ck.start_row + i + t;
uint32_t is_valid = (i + t < s->row_cnt && row < s->col.num_rows)
? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1
: 0;
uint32_t dict_idx = (is_valid) ? dict_index[row] : 0;
uint32_t is_unique =
(is_valid &&
dict_idx ==
row); // Any value that doesn't have bit31 set should have dict_idx=row at this point
uint32_t umask = BALLOT(is_unique);
uint32_t pos = num_dict_entries + __popc(umask & ((1 << (t & 0x1f)) - 1));
if (!(t & 0x1f)) { s->scratch_red[t >> 5] = __popc(umask); }
num_dict_entries += __syncthreads_count(is_unique);
if (t < 32) { s->scratch_red[t] = WarpReducePos32(s->scratch_red[t], t); }
__syncthreads();
if (t >= 32) { pos += s->scratch_red[(t - 32) >> 5]; }
if (is_valid && is_unique) {
dict_data[pos] = row;
dict_index[row] = pos;
}
__syncthreads();
if (is_valid && !is_unique) {
// NOTE: Should have at most 3 iterations (once for early duplicate elimination, once for
// final dictionary duplicate elimination and once for re-ordering) (If something went wrong
// building the dictionary, it will likely hang or crash right here)
do {
dict_idx = dict_index[dict_idx & 0x7fffffff];
} while (dict_idx > 0x7fffffff);
dict_index[row] = dict_idx;
}
}
}
// blockDim(1024, 1, 1)
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
gpuBuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch)
{
__shared__ __align__(8) dict_state_s state_g;
using warp_reduce = cub::WarpReduce<uint32_t>;
__shared__ typename warp_reduce::TempStorage temp_storage[block_size / 32];
dict_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t dtype, dtype_len, dtype_len_in;
if (t < sizeof(EncColumnChunk) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->ck)[t] =
reinterpret_cast<const uint32_t *>(&chunks[blockIdx.x])[t];
}
__syncthreads();
if (!s->ck.has_dictionary) { return; }
if (t < sizeof(EncColumnDesc) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->col)[t] =
reinterpret_cast<const uint32_t *>(s->ck.col_desc)[t];
}
__syncthreads();
if (!t) {
s->hashmap = dev_scratch + s->ck.dictionary_id * (size_t)(1 << kDictHashBits);
s->row_cnt = 0;
s->cur_fragment = s->ck.fragments;
s->total_dict_entries = 0;
s->dictionary_size = 0;
s->ck.num_dict_fragments = 0;
}
dtype = s->col.physical_type;
dtype_len = (dtype == INT64 || dtype == DOUBLE) ? 8 : 4;
if (dtype == INT32) {
dtype_len_in = GetDtypeLogicalLen(s->col.converted_type);
} else {
dtype_len_in = (dtype == BYTE_ARRAY) ? sizeof(nvstrdesc_s) : dtype_len;
}
__syncthreads();
while (s->row_cnt < s->ck.num_rows) {
uint32_t frag_start_row = s->ck.start_row + s->row_cnt, num_dict_entries, frag_dict_size;
FetchDictionaryFragment(s, s->col.dict_data, frag_start_row, t);
__syncthreads();
num_dict_entries = s->frag.num_dict_vals;
if (!t) {
s->num_dict_entries = 0;
s->frag_dict_size = 0;
}
for (uint32_t i = 0; i < num_dict_entries; i += 1024) {
bool is_valid = (i + t < num_dict_entries);
uint32_t len = 0;
uint32_t is_dupe = 0;
uint32_t row, hash, next, *next_addr;
uint32_t new_dict_entries;
if (is_valid) {
row = frag_start_row + s->frag_dict[i + t];
len = dtype_len;
if (dtype == BYTE_ARRAY) {
const char *ptr = reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].ptr;
uint32_t count =
(uint32_t) reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].count;
len += count;
hash = nvstr_hash16(reinterpret_cast<const uint8_t *>(ptr), count);
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
const char *ptr2 =
reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].ptr;
uint32_t count2 =
reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].count;
if (count2 == count && nvstr_is_equal(ptr, count, ptr2, count2)) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
} else {
uint64_t val;
if (dtype_len_in == 8) {
val = reinterpret_cast<const uint64_t *>(s->col.column_data_base)[row];
hash = uint64_hash16(val);
} else {
val = (dtype_len_in == 4)
? reinterpret_cast<const uint32_t *>(s->col.column_data_base)[row]
: (dtype_len_in == 2)
? reinterpret_cast<const uint16_t *>(s->col.column_data_base)[row]
: reinterpret_cast<const uint8_t *>(s->col.column_data_base)[row];
hash = uint32_hash16(val);
}
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
uint64_t val2 =
(dtype_len_in == 8)
? reinterpret_cast<const uint64_t *>(s->col.column_data_base)[next - 1]
: (dtype_len_in == 4)
? reinterpret_cast<const uint32_t *>(s->col.column_data_base)[next - 1]
: (dtype_len_in == 2)
? reinterpret_cast<const uint16_t *>(s->col.column_data_base)[next - 1]
: reinterpret_cast<const uint8_t *>(s->col.column_data_base)[next - 1];
if (val2 == val) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
}
}
// Count the non-duplicate entries
frag_dict_size = warp_reduce(temp_storage[t / 32]).Sum((is_valid && !is_dupe) ? len : 0);
if (!(t & 0x1f)) { s->scratch_red[t >> 5] = frag_dict_size; }
new_dict_entries = __syncthreads_count(is_valid && !is_dupe);
if (t < 32) {
frag_dict_size = warp_reduce(temp_storage[t / 32]).Sum(s->scratch_red[t]);
if (t == 0) {
s->frag_dict_size += frag_dict_size;
s->num_dict_entries += new_dict_entries;
}
}
if (is_valid) {
if (!is_dupe) {
s->col.dict_index[row] = row;
} else {
s->col.dict_index[row] = (next - 1) | (1u << 31);
}
}
__syncthreads();
// At this point, the dictionary order is non-deterministic, and we want insertion order
// Make sure that the non-duplicate entry corresponds to the lower row number
// (The entry in dict_data (next-1) used for duplicate elimination does not need
// to be the lowest row number)
bool reorder_check = (is_valid && is_dupe && next - 1 > row);
if (reorder_check) {
next = s->col.dict_index[next - 1];
while (next & (1u << 31)) { next = s->col.dict_index[next & 0x7fffffff]; }
}
if (__syncthreads_or(reorder_check)) {
if (reorder_check) { atomicMin(&s->col.dict_index[next], row); }
__syncthreads();
if (reorder_check && s->col.dict_index[next] == row) {
s->col.dict_index[next] = row | (1u << 31);
s->col.dict_index[row] = row;
}
__syncthreads();
}
}
__syncthreads();
num_dict_entries = s->num_dict_entries;
frag_dict_size = s->frag_dict_size;
if (s->total_dict_entries + num_dict_entries > 65536 ||
(s->dictionary_size != 0 && s->dictionary_size + frag_dict_size > 512 * 1024)) {
break;
}
__syncthreads();
if (!t) {
if (num_dict_entries != s->frag.num_dict_vals) {
s->cur_fragment->num_dict_vals = num_dict_entries;
}
if (frag_dict_size != s->frag.dict_data_size) { s->frag.dict_data_size = frag_dict_size; }
s->total_dict_entries += num_dict_entries;
s->dictionary_size += frag_dict_size;
s->row_cnt += s->frag.num_rows;
s->cur_fragment++;
s->ck.num_dict_fragments++;
}
__syncthreads();
}
__syncthreads();
GenerateDictionaryIndices(s, t);
if (!t) {
chunks[blockIdx.x].num_dict_fragments = s->ck.num_dict_fragments;
chunks[blockIdx.x].dictionary_size = s->dictionary_size;
chunks[blockIdx.x].total_dict_entries = s->total_dict_entries;
}
}
/**
* @brief Launches kernel for building chunk dictionaries
*
* @param[in] chunks Column chunks
* @param[in] dev_scratch Device scratch data (kDictScratchSize per dictionary)
* @param[in] num_chunks Number of column chunks
* @param[in] stream CUDA stream to use, default 0
*
* @return cudaSuccess if successful, a CUDA error code otherwise
**/
cudaError_t BuildChunkDictionaries(EncColumnChunk *chunks,
uint32_t *dev_scratch,
size_t scratch_size,
uint32_t num_chunks,
cudaStream_t stream)
{
if (num_chunks > 0 && scratch_size > 0) { // zero scratch size implies no dictionaries
CUDA_TRY(cudaMemsetAsync(dev_scratch, 0, scratch_size, stream));
gpuBuildChunkDictionaries<1024><<<num_chunks, 1024, 0, stream>>>(chunks, dev_scratch);
}
return cudaSuccess;
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
164fd610b64649c52550e62b9cb11908827ae23a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
__global__ void sumRandC(int* A, int* B, int m, int n, int p, int q, int k)
{
int id=blockIdx.x*blockDim.x + threadIdx.x,idx;
if(id<((m*n)/k))
{
for(int i=0;i<k;i++)
{
idx = id+i*((m*n)/k);
B[idx+(idx/n)] = A[idx];
atomicAdd(&B[(((idx/n)+1)*n)+(idx/n)],A[idx]); // Adds elements to the row end
atomicAdd(&B[(m*n)+m+(idx%n)],A[idx]); // Adds elements to the column end
if(idx==0)
B[p*q-1] = INT_MAX;
}
}
}
__global__ void findMIn( int* A, int* B, int m, int n, int p, int q, int k)
{
int id=blockIdx.x*blockDim.x + threadIdx.x,idx;
if(id<((m*n)/k))
{
for(int i=0;i<k;i++)
{
idx=id+i*((m*n)/k);
atomicMin(&B[p*q-1],B[(((idx/n)+1)*n)+(idx/n)]); // Checks minimum of row end elements
atomicMin(&B[p*q-1],B[(m*n)+m+(idx%n)]); // Checks minimum of column end elements
}
}
}
__global__ void updateMin( int* A, int* B, int m, int n, int p, int q, int k)
{
int id=blockIdx.x*blockDim.x + threadIdx.x,idx;
if(id<((m*n)/k))
{
for(int i=0;i<k;i++)
{
idx = id+i*((m*n)/k)+((id+i*((m*n)/k))/n);
if(idx%q!=n && idx/q!=m)
{
atomicAdd(&B[idx],B[p*q-1]); // Adds minimum to all the elements not in the last row and column
}
}
}
}
int main()
{
int M,N,k;
scanf( "%d %d %d", &M,&N,&k);
int *matrix,*matrix1, *hmatrix,*h1matrix;
hipMalloc(&matrix, (M) * (N) * sizeof(int));
hipMalloc(&matrix1, (M+1) * (N+1) * sizeof(int));
hmatrix = (int *)malloc(M * N * sizeof(int));
h1matrix = (int *)malloc((M+1) * (N+1) * sizeof(int));
for (int ii = 0; ii < M; ++ii)
{
for (int jj = 0; jj < N; ++jj)
{
scanf("%d",&hmatrix[ii*N+jj]);
}
}
hipMemcpy(matrix, hmatrix, M * N * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sumRandC), dim3(ceil((float)(M*N)/(k*1024))),dim3(1024), 0, 0, matrix,matrix1,M,N,M+1,N+1,k);
hipLaunchKernelGGL(( findMIn), dim3(ceil((float)(M*N)/(k*1024))),dim3(1024), 0, 0, matrix,matrix1,M,N,M+1,N+1,k);
hipLaunchKernelGGL(( updateMin), dim3(ceil((float)(M*N)/(k*1024))),dim3(1024), 0, 0, matrix,matrix1,M,N,M+1,N+1,k);
hipDeviceSynchronize();
hipMemcpy(h1matrix, matrix1, (M+1) * (N+1) * sizeof(int), hipMemcpyDeviceToHost);
for (int ii = 0; ii < M+1; ++ii)
{
for (int jj = 0; jj < N+1; ++jj)
{
printf("%d ",h1matrix[ii*(N+1)+jj]);
}
printf("\n");
}
return 0;
}
| 164fd610b64649c52550e62b9cb11908827ae23a.cu | #include<stdio.h>
#include<cuda.h>
__global__ void sumRandC(int* A, int* B, int m, int n, int p, int q, int k)
{
int id=blockIdx.x*blockDim.x + threadIdx.x,idx;
if(id<((m*n)/k))
{
for(int i=0;i<k;i++)
{
idx = id+i*((m*n)/k);
B[idx+(idx/n)] = A[idx];
atomicAdd(&B[(((idx/n)+1)*n)+(idx/n)],A[idx]); // Adds elements to the row end
atomicAdd(&B[(m*n)+m+(idx%n)],A[idx]); // Adds elements to the column end
if(idx==0)
B[p*q-1] = INT_MAX;
}
}
}
__global__ void findMIn( int* A, int* B, int m, int n, int p, int q, int k)
{
int id=blockIdx.x*blockDim.x + threadIdx.x,idx;
if(id<((m*n)/k))
{
for(int i=0;i<k;i++)
{
idx=id+i*((m*n)/k);
atomicMin(&B[p*q-1],B[(((idx/n)+1)*n)+(idx/n)]); // Checks minimum of row end elements
atomicMin(&B[p*q-1],B[(m*n)+m+(idx%n)]); // Checks minimum of column end elements
}
}
}
__global__ void updateMin( int* A, int* B, int m, int n, int p, int q, int k)
{
int id=blockIdx.x*blockDim.x + threadIdx.x,idx;
if(id<((m*n)/k))
{
for(int i=0;i<k;i++)
{
idx = id+i*((m*n)/k)+((id+i*((m*n)/k))/n);
if(idx%q!=n && idx/q!=m)
{
atomicAdd(&B[idx],B[p*q-1]); // Adds minimum to all the elements not in the last row and column
}
}
}
}
int main()
{
int M,N,k;
scanf( "%d %d %d", &M,&N,&k);
int *matrix,*matrix1, *hmatrix,*h1matrix;
cudaMalloc(&matrix, (M) * (N) * sizeof(int));
cudaMalloc(&matrix1, (M+1) * (N+1) * sizeof(int));
hmatrix = (int *)malloc(M * N * sizeof(int));
h1matrix = (int *)malloc((M+1) * (N+1) * sizeof(int));
for (int ii = 0; ii < M; ++ii)
{
for (int jj = 0; jj < N; ++jj)
{
scanf("%d",&hmatrix[ii*N+jj]);
}
}
cudaMemcpy(matrix, hmatrix, M * N * sizeof(int), cudaMemcpyHostToDevice);
sumRandC<<<ceil((float)(M*N)/(k*1024)),1024>>>(matrix,matrix1,M,N,M+1,N+1,k);
findMIn<<<ceil((float)(M*N)/(k*1024)),1024>>>(matrix,matrix1,M,N,M+1,N+1,k);
updateMin<<<ceil((float)(M*N)/(k*1024)),1024>>>(matrix,matrix1,M,N,M+1,N+1,k);
cudaDeviceSynchronize();
cudaMemcpy(h1matrix, matrix1, (M+1) * (N+1) * sizeof(int), cudaMemcpyDeviceToHost);
for (int ii = 0; ii < M+1; ++ii)
{
for (int jj = 0; jj < N+1; ++jj)
{
printf("%d ",h1matrix[ii*(N+1)+jj]);
}
printf("\n");
}
return 0;
}
|
a3a10b555d9c197b8ba6284b7b4f9133b845829e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <math.h>
extern "C"
{
__device__ float rand_expon(float a, hiprandState_t *state)
{
return -log(hiprand_uniform(state))/a; // x is now random expon by inverse CDF
} // END rand_expo
__device__ float psi_calc(float mu_minus, float alpha, float z)
{
float psi;
// Compute Psi
if(mu_minus < alpha){
psi = expf( -1/2*pow(alpha-z,2));
}
else {
psi = expf( 1/2*( pow(mu_minus-alpha,2) - pow(alpha-z,2) ) );
}
return psi;
}
__global__ void rtruncnorm_kernel(float *vals, int n,
float *mu, float *sigma,
float *lo, float *hi,
int mu_len, int sigma_len,
int lo_len, int hi_len,
int rng_seed_a, int rng_seed_b, int rng_seed_c,
int maxtries)
{
int accepted = 0;
int numtries = 0;
float x;
float u;
float alpha;
float psi;
float z;
float a;
float mu_minus;
int left_trunc = 0;
// Figure out which thread and block you are in and map these to a single index, "idx"
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Check: if index idx < n generate a sample, else in unneeded thread
if(idx<n){
// Setup the RNG:
hiprandState_t rng;
hiprand_init(rng_seed_a + idx*rng_seed_b, rng_seed_c, 0, &rng);
// Sample the truncated normal
// i.e. pick off mu and sigma corresponding to idx and generate a random sample, x
// if that random sample, x, is in the truncation region, update the return value to x, i.e. vals[idx]=x
// if x is not in the trunc region, try again until you get a sample in the trunc region or if more than maxtries,
// move on to Robert's approx method
while(accepted == 0 && numtries < maxtries){
numtries++; // Increment numtries
x = mu[idx] + sigma[idx]*hiprand_normal(&rng);
if(x >= lo[idx] && x <= hi[idx]){
accepted = 1;
vals[idx] = x;
}
}
// Robert's approx method
// We don't want to write both trunc algos for left and right tail truncations, just use
// right tail trancation. If we want to sample from Y~N(mu, sigma, -Inf, b), we transform
// first X~N(mu, sigma, -b+2*mu, Inf), use only right truncation, sample from the right
// tail to get a X, then transform back Y=2*mu-X to get left truncation sample if needed in Robert.
if(lo[idx] < mu[idx]) { // then left truncation
left_trunc = 1;
a = -1*hi[idx] + 2*mu[idx]; // flip up to right tail
}
else {
a = lo[idx]; // right truncation from a=lo[idx] to infinity
}
mu_minus = (a-mu[idx])/sigma[idx];
// need to find mu_minus but that depends on if lower trunc or upper trunc
alpha = (mu_minus + sqrtf(pow(mu_minus,2) + 4))/2;
numtries = 1; // If couldn't get sample naively, reset and try Robert
while(accepted == 0 && numtries < maxtries){
numtries++; // Increment numtries
// Need random expon for Robert no curand_expon function so do inverse CDF
// F(x) = 1-exp(-alpha*x) --> F^1(x) = -log(U)/alpha where U~Unif[0,1]
// u = hiprand_uniform(&rng);
// x = -1 * log(u)/alpha; // x is now random expon by inverse CDF
z = mu_minus + rand_expon(alpha, &rng);
// Compute Psi = probability of acceptance
psi = psi_calc(mu_minus, alpha, z);
// Check if Random Unif[0,1] < Psi, if so accept, else reject and try again
u = hiprand_uniform(&rng);
if (u < psi){
accepted = 1; // we now have our vals[idx]
if (left_trunc == 1){ // since originally left trunc, and flip back to left tail and final transform
vals[idx] = mu[idx] - sigma[idx]*z;
}
else { // right truncation originally so we're done after final transform
vals[idx] = mu[idx] + sigma[idx]*z;
}
}
}
if(accepted == 0){ // Just in case both naive and Roberts fail
vals[idx] = -999;
}
} // END if (idx<n)
return;
} // END rtruncnorm_kernel
} // END extern "C"
| a3a10b555d9c197b8ba6284b7b4f9133b845829e.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <math.h>
extern "C"
{
__device__ float rand_expon(float a, curandState *state)
{
return -log(curand_uniform(state))/a; // x is now random expon by inverse CDF
} // END rand_expo
__device__ float psi_calc(float mu_minus, float alpha, float z)
{
float psi;
// Compute Psi
if(mu_minus < alpha){
psi = expf( -1/2*pow(alpha-z,2));
}
else {
psi = expf( 1/2*( pow(mu_minus-alpha,2) - pow(alpha-z,2) ) );
}
return psi;
}
__global__ void rtruncnorm_kernel(float *vals, int n,
float *mu, float *sigma,
float *lo, float *hi,
int mu_len, int sigma_len,
int lo_len, int hi_len,
int rng_seed_a, int rng_seed_b, int rng_seed_c,
int maxtries)
{
int accepted = 0;
int numtries = 0;
float x;
float u;
float alpha;
float psi;
float z;
float a;
float mu_minus;
int left_trunc = 0;
// Figure out which thread and block you are in and map these to a single index, "idx"
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Check: if index idx < n generate a sample, else in unneeded thread
if(idx<n){
// Setup the RNG:
curandState rng;
curand_init(rng_seed_a + idx*rng_seed_b, rng_seed_c, 0, &rng);
// Sample the truncated normal
// i.e. pick off mu and sigma corresponding to idx and generate a random sample, x
// if that random sample, x, is in the truncation region, update the return value to x, i.e. vals[idx]=x
// if x is not in the trunc region, try again until you get a sample in the trunc region or if more than maxtries,
// move on to Robert's approx method
while(accepted == 0 && numtries < maxtries){
numtries++; // Increment numtries
x = mu[idx] + sigma[idx]*curand_normal(&rng);
if(x >= lo[idx] && x <= hi[idx]){
accepted = 1;
vals[idx] = x;
}
}
// Robert's approx method
// We don't want to write both trunc algos for left and right tail truncations, just use
// right tail trancation. If we want to sample from Y~N(mu, sigma, -Inf, b), we transform
// first X~N(mu, sigma, -b+2*mu, Inf), use only right truncation, sample from the right
// tail to get a X, then transform back Y=2*mu-X to get left truncation sample if needed in Robert.
if(lo[idx] < mu[idx]) { // then left truncation
left_trunc = 1;
a = -1*hi[idx] + 2*mu[idx]; // flip up to right tail
}
else {
a = lo[idx]; // right truncation from a=lo[idx] to infinity
}
mu_minus = (a-mu[idx])/sigma[idx];
// need to find mu_minus but that depends on if lower trunc or upper trunc
alpha = (mu_minus + sqrtf(pow(mu_minus,2) + 4))/2;
numtries = 1; // If couldn't get sample naively, reset and try Robert
while(accepted == 0 && numtries < maxtries){
numtries++; // Increment numtries
// Need random expon for Robert no curand_expon function so do inverse CDF
// F(x) = 1-exp(-alpha*x) --> F^1(x) = -log(U)/alpha where U~Unif[0,1]
// u = curand_uniform(&rng);
// x = -1 * log(u)/alpha; // x is now random expon by inverse CDF
z = mu_minus + rand_expon(alpha, &rng);
// Compute Psi = probability of acceptance
psi = psi_calc(mu_minus, alpha, z);
// Check if Random Unif[0,1] < Psi, if so accept, else reject and try again
u = curand_uniform(&rng);
if (u < psi){
accepted = 1; // we now have our vals[idx]
if (left_trunc == 1){ // since originally left trunc, and flip back to left tail and final transform
vals[idx] = mu[idx] - sigma[idx]*z;
}
else { // right truncation originally so we're done after final transform
vals[idx] = mu[idx] + sigma[idx]*z;
}
}
}
if(accepted == 0){ // Just in case both naive and Roberts fail
vals[idx] = -999;
}
} // END if (idx<n)
return;
} // END rtruncnorm_kernel
} // END extern "C"
|
86ee5dc06f77059213719738462e5c37c9649f6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// these are just for timing measurments
#include <time.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const int block_size = 4; // CUDA maximum is 1024 *total* threads in block
//const float A_val = 3.0f;
//const float B_val = 2.0f;
// matrix multiply (naive) kernel: C = A * B
__global__ void mmul(const float *A, const float *B, float *C, int r1,int c1, int r2, int c2) {
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create thread x index
int idy = threadIdx.y+blockDim.y*blockIdx.y; // create thread y index
if ((idx < c2) && (idy < c1)){
float temp = 0;
for (int i = 0; i < c1; i++)
temp += A[idy*c1+i] * B[i*c2+idx]; // dot product of row and column
C[idy*c2+idx] = temp;
}
}
int main(){
float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
// these are just for timing
clock_t t0, t1, t2;
double t1sum=0.0;
double t2sum=0.0;
// start timing
t0 = clock();
//getting matrix from user
int r1,c1,r2,c2;
printf("Enter the row of 1st Matrix: ");
scanf("%d",&r1);
printf("Enter the column of 1st Matrix: ");
scanf("%d",&c1);
printf("Enter the row of 2st Matrix: ");
scanf("%d",&r2);
printf("Enter the column of 2st Matrix: ");
scanf("%d",&c2);
if (c1!=r2){
printf("Invalid Matrix");
}
h_A = new float[r1*c1];
h_B = new float[r2*c2];
h_C = new float[r1*c2];
FILE* matrixA;
matrixA = fopen("A.txt","r");
if(matrixA==NULL){
printf("Matrix A did not open \n");
return 0 ;
}
int i =0;
while(fscanf(matrixA,"%f", &h_A[i] )!= EOF){
i++;
}
FILE* matrixB;
matrixB = fopen("B.txt","r");
if(matrixA==NULL){
printf("Matrix B did not open \n");
return 0;
}
i =0;
while(fscanf(matrixB,"%f",&h_B[i] )!= EOF){
i++;
}
//Printing values on screen for degugg
/*
for (int i = 0; i < r1*c1; i++){
printf("%.1f ", h_A[i]);
}
printf("\n");
for (int i = 0; i < r2*c2; i++){
printf("%.1f ", h_B[i]);
}
*/
//If values was assigned in the program and not through input files
/*
for (int i = 0; i < r1*c1; i++){
h_A[i] = A_val;
}
for (int i = 0; i < r2*c2; i++){
h_B[i] = B_val;
}
for (int i = 0; i < r1*c2; i++){
h_C[i] = 0;}
*/
// Initialization timing
t1 = clock();
t1sum = ((double)(t1-t0))/CLOCKS_PER_SEC;
printf("Init took %f seconds. Begin compute\n", t1sum);
// Allocate device memory and copy input data over to GPU
hipMalloc(&d_A, r1*c1*sizeof(float));
hipMalloc(&d_B, r2*c2*sizeof(float));
hipMalloc(&d_C, r1*c2*sizeof(float));
cudaCheckErrors("hipMalloc failure");
hipMemcpy(d_A, h_A, r1*c1*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, r2*c2*sizeof(float), hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy H2D failure");
// Cuda processing sequence step 1 is complete
// Launch kernel
dim3 block(block_size, block_size); // dim3 variable holds 3 dimensions
dim3 grid(((c1+r2)/2+block.x-1)/block.x, ((c1+r2)/2+block.y-1)/block.y);
hipLaunchKernelGGL(( mmul), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, r1,c1,r2,c2 );
cudaCheckErrors("kernel launch failure");
// Cuda processing sequence step 2 is complete
// Copy results back to host
hipMemcpy(h_C, d_C, r1*c2*sizeof(float), hipMemcpyDeviceToHost);
for (int i = 0; i < r1*c1; i++){
printf("%.1f ", h_A[i]);
}
printf("\n");
for (int i = 0; i < r2*c2; i++){
printf("%.1f ", h_B[i]);
}
printf("\n");
// Verify results
for (int i = 0; i < r1*c2; i++){
printf("%.1f ", h_C[i]);
}
FILE* matrixC;
matrixC = fopen("C.txt","w");
if(matrixC==NULL){
printf("Matrix A did not open \n");
return 0 ;
}
for(int i =0; i < r1*c2; i++){
fprintf(matrixC, "%.1f ",h_C[i]);
}
fclose(matrixA);
fclose(matrixB);
fclose(matrixC);
// GPU timing
t2 = clock();
t2sum = ((double)(t2-t1))/CLOCKS_PER_SEC;
printf ("Done. Compute took %f seconds\n", t2sum);
// Cuda processing sequence step 3 is complete
// Verify results
cudaCheckErrors("kernel execution failure or hipMemcpy H2D failure");
return 0;
}
| 86ee5dc06f77059213719738462e5c37c9649f6b.cu | #include <stdio.h>
// these are just for timing measurments
#include <time.h>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const int block_size = 4; // CUDA maximum is 1024 *total* threads in block
//const float A_val = 3.0f;
//const float B_val = 2.0f;
// matrix multiply (naive) kernel: C = A * B
__global__ void mmul(const float *A, const float *B, float *C, int r1,int c1, int r2, int c2) {
int idx = threadIdx.x+blockDim.x*blockIdx.x; // create thread x index
int idy = threadIdx.y+blockDim.y*blockIdx.y; // create thread y index
if ((idx < c2) && (idy < c1)){
float temp = 0;
for (int i = 0; i < c1; i++)
temp += A[idy*c1+i] * B[i*c2+idx]; // dot product of row and column
C[idy*c2+idx] = temp;
}
}
int main(){
float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
// these are just for timing
clock_t t0, t1, t2;
double t1sum=0.0;
double t2sum=0.0;
// start timing
t0 = clock();
//getting matrix from user
int r1,c1,r2,c2;
printf("Enter the row of 1st Matrix: ");
scanf("%d",&r1);
printf("Enter the column of 1st Matrix: ");
scanf("%d",&c1);
printf("Enter the row of 2st Matrix: ");
scanf("%d",&r2);
printf("Enter the column of 2st Matrix: ");
scanf("%d",&c2);
if (c1!=r2){
printf("Invalid Matrix");
}
h_A = new float[r1*c1];
h_B = new float[r2*c2];
h_C = new float[r1*c2];
FILE* matrixA;
matrixA = fopen("A.txt","r");
if(matrixA==NULL){
printf("Matrix A did not open \n");
return 0 ;
}
int i =0;
while(fscanf(matrixA,"%f", &h_A[i] )!= EOF){
i++;
}
FILE* matrixB;
matrixB = fopen("B.txt","r");
if(matrixA==NULL){
printf("Matrix B did not open \n");
return 0;
}
i =0;
while(fscanf(matrixB,"%f",&h_B[i] )!= EOF){
i++;
}
//Printing values on screen for degugg
/*
for (int i = 0; i < r1*c1; i++){
printf("%.1f ", h_A[i]);
}
printf("\n");
for (int i = 0; i < r2*c2; i++){
printf("%.1f ", h_B[i]);
}
*/
//If values was assigned in the program and not through input files
/*
for (int i = 0; i < r1*c1; i++){
h_A[i] = A_val;
}
for (int i = 0; i < r2*c2; i++){
h_B[i] = B_val;
}
for (int i = 0; i < r1*c2; i++){
h_C[i] = 0;}
*/
// Initialization timing
t1 = clock();
t1sum = ((double)(t1-t0))/CLOCKS_PER_SEC;
printf("Init took %f seconds. Begin compute\n", t1sum);
// Allocate device memory and copy input data over to GPU
cudaMalloc(&d_A, r1*c1*sizeof(float));
cudaMalloc(&d_B, r2*c2*sizeof(float));
cudaMalloc(&d_C, r1*c2*sizeof(float));
cudaCheckErrors("cudaMalloc failure");
cudaMemcpy(d_A, h_A, r1*c1*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, r2*c2*sizeof(float), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy H2D failure");
// Cuda processing sequence step 1 is complete
// Launch kernel
dim3 block(block_size, block_size); // dim3 variable holds 3 dimensions
dim3 grid(((c1+r2)/2+block.x-1)/block.x, ((c1+r2)/2+block.y-1)/block.y);
mmul<<<grid, block>>>(d_A, d_B, d_C, r1,c1,r2,c2 );
cudaCheckErrors("kernel launch failure");
// Cuda processing sequence step 2 is complete
// Copy results back to host
cudaMemcpy(h_C, d_C, r1*c2*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < r1*c1; i++){
printf("%.1f ", h_A[i]);
}
printf("\n");
for (int i = 0; i < r2*c2; i++){
printf("%.1f ", h_B[i]);
}
printf("\n");
// Verify results
for (int i = 0; i < r1*c2; i++){
printf("%.1f ", h_C[i]);
}
FILE* matrixC;
matrixC = fopen("C.txt","w");
if(matrixC==NULL){
printf("Matrix A did not open \n");
return 0 ;
}
for(int i =0; i < r1*c2; i++){
fprintf(matrixC, "%.1f ",h_C[i]);
}
fclose(matrixA);
fclose(matrixB);
fclose(matrixC);
// GPU timing
t2 = clock();
t2sum = ((double)(t2-t1))/CLOCKS_PER_SEC;
printf ("Done. Compute took %f seconds\n", t2sum);
// Cuda processing sequence step 3 is complete
// Verify results
cudaCheckErrors("kernel execution failure or cudaMemcpy H2D failure");
return 0;
}
|
2a5d02d1c8192b8795d335eb3776b27e6811974c.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "NvtxPermissions.h"
#include <nvtx3/nvToolsExtMem.h>
#include <nvtx3/nvToolsExtMemCudaRt.h>
#include <hip/hip_runtime_api.h>
#include <cassert>
#include <cstddef>
#include <cstdint>
#define checkCudaErrors(Code) assert((Code) == hipSuccess)
#define checkCudaLaunch(...) checkCudaErrors((__VA_ARGS__, hipPeekAtLastError()))
__global__ void IncrementTwice(unsigned int* v)
{
unsigned int i = *v;
*v = i + 1u;
atomicAdd(v, 1u);
}
int main()
{
auto nvtxDomain = nvtxDomainCreateA("my-domain");
unsigned int* ptr;
checkCudaErrors(hipMalloc((void**)&ptr, sizeof(unsigned int)));
checkCudaErrors(hipMemset(ptr, 0, sizeof(unsigned int)));
// Success: allocation is readable and writable
hipLaunchKernelGGL(( checkCudaLaunch(IncrementTwice), dim3(1), dim3(1), 0, 0, ptr));
checkCudaErrors(hipDeviceSynchronize());
// Violation: 4 bytes written on a read-only allocation
NV::PermissionsAssign(nvtxDomain, ptr, NV::PERMISSIONS_READ);
hipLaunchKernelGGL(( checkCudaLaunch(IncrementTwice), dim3(1), dim3(1), 0, 0, ptr));
checkCudaErrors(hipDeviceSynchronize());
// Violation: 4 bytes read on a write-only allocation
NV::PermissionsAssign(nvtxDomain, ptr, NV::PERMISSIONS_WRITE);
hipLaunchKernelGGL(( checkCudaLaunch(IncrementTwice), dim3(1), dim3(1), 0, 0, ptr));
checkCudaErrors(hipDeviceSynchronize());
// Violation: 4 bytes read on a no-permissions allocation
NV::PermissionsAssign(nvtxDomain, ptr, NV::PERMISSIONS_NONE);
hipLaunchKernelGGL(( checkCudaLaunch(IncrementTwice), dim3(1), dim3(1), 0, 0, ptr));
checkCudaErrors(hipDeviceSynchronize());
// Violation: 4 bytes atomic operation on a no-atomic allocation
NV::PermissionsAssign(nvtxDomain, ptr, NV::PERMISSIONS_READ | NV::PERMISSIONS_WRITE);
hipLaunchKernelGGL(( checkCudaLaunch(IncrementTwice), dim3(1), dim3(1), 0, 0, ptr));
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(ptr));
}
| 2a5d02d1c8192b8795d335eb3776b27e6811974c.cu | /* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "NvtxPermissions.h"
#include <nvtx3/nvToolsExtMem.h>
#include <nvtx3/nvToolsExtMemCudaRt.h>
#include <cuda_runtime_api.h>
#include <cassert>
#include <cstddef>
#include <cstdint>
#define checkCudaErrors(Code) assert((Code) == cudaSuccess)
#define checkCudaLaunch(...) checkCudaErrors((__VA_ARGS__, cudaPeekAtLastError()))
__global__ void IncrementTwice(unsigned int* v)
{
unsigned int i = *v;
*v = i + 1u;
atomicAdd(v, 1u);
}
int main()
{
auto nvtxDomain = nvtxDomainCreateA("my-domain");
unsigned int* ptr;
checkCudaErrors(cudaMalloc((void**)&ptr, sizeof(unsigned int)));
checkCudaErrors(cudaMemset(ptr, 0, sizeof(unsigned int)));
// Success: allocation is readable and writable
checkCudaLaunch(IncrementTwice<<<1, 1>>>(ptr));
checkCudaErrors(cudaDeviceSynchronize());
// Violation: 4 bytes written on a read-only allocation
NV::PermissionsAssign(nvtxDomain, ptr, NV::PERMISSIONS_READ);
checkCudaLaunch(IncrementTwice<<<1, 1>>>(ptr));
checkCudaErrors(cudaDeviceSynchronize());
// Violation: 4 bytes read on a write-only allocation
NV::PermissionsAssign(nvtxDomain, ptr, NV::PERMISSIONS_WRITE);
checkCudaLaunch(IncrementTwice<<<1, 1>>>(ptr));
checkCudaErrors(cudaDeviceSynchronize());
// Violation: 4 bytes read on a no-permissions allocation
NV::PermissionsAssign(nvtxDomain, ptr, NV::PERMISSIONS_NONE);
checkCudaLaunch(IncrementTwice<<<1, 1>>>(ptr));
checkCudaErrors(cudaDeviceSynchronize());
// Violation: 4 bytes atomic operation on a no-atomic allocation
NV::PermissionsAssign(nvtxDomain, ptr, NV::PERMISSIONS_READ | NV::PERMISSIONS_WRITE);
checkCudaLaunch(IncrementTwice<<<1, 1>>>(ptr));
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(ptr));
}
|
4a489104dcbad6e14296ea99cce71e76a758ee7e.hip | // !!! This is a file automatically generated by hipify!!!
#include <pthread.h>
#include <stdint.h>
#include <ctype.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <malloc.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <time.h>
#define REPS 1
void SortChunkI(long ChunkSize, int Chunk_ID, signed int *ary);
void SortChunkL(long ChunkSize, int Chunk_ID, signed long long *ary);
void SortChunkF(long ChunkSize, int Chunk_ID, float *ary);
void SortChunkD(long ChunkSize, int Chunk_ID, double *ary);
void MergeSortI(signed int *list, long length);
void MergeSortL(signed long long *list, long length);
void MergeSortF(float *list, long length);
void MergeSortD(double *list, long length);
__global__ void SortChunkGI(signed int ary[], long ChunkSize)
{
unsigned long i,j;
unsigned long sp;
int temp;
sp = (blockIdx.x * blockDim.x +threadIdx.x)*ChunkSize;
printf("\n start:%li end:%li Chunksize: %li S_ary: %i E_ary:%i\n",sp,ChunkSize+sp-1,ChunkSize,ary[sp],ary[ChunkSize+sp-1]);
for(i = 0; i< ChunkSize; i++)
{
for(j = sp; j< (ChunkSize+sp-1-i); j++)
{
if( (ary[j]) > (ary[j+1]))
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
}
__global__ void SortChunkGL(signed long long ary[], long ChunkSize)
{
unsigned long i,j;
unsigned long sp;
long long temp;
sp = (blockIdx.x * blockDim.x +threadIdx.x)*ChunkSize;
//printf("\n start:%li end:%li Chunksize: %li S_ary: %i E_ary:%i\n",sp,ChunkSize+sp-1,ChunkSize,ary[sp],ary[ChunkSize+sp-1]);
for(i = 0; i< ChunkSize; i++)
{
for(j = sp; j< (ChunkSize+sp-1-i); j++)
{
if( (ary[j]) > (ary[j+1]))
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
}
__global__ void SortChunkGF(float ary[], long ChunkSize)
{
unsigned long i,j;
unsigned long sp;
float temp;
sp = (blockIdx.x * blockDim.x +threadIdx.x)*ChunkSize;
printf("\n start:%li end:%li Chunksize: %li S_ary: %i E_ary:%i\n",sp,ChunkSize+sp-1,ChunkSize,ary[sp],ary[ChunkSize+sp-1]);
for(i = 0; i< ChunkSize; i++)
{
for(j = sp; j< (ChunkSize+sp-1-i); j++)
{
if( (ary[j]) > (ary[j+1]))
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
}
__global__ void SortChunkGD(double ary[], long ChunkSize)
{
unsigned long i,j;
unsigned long sp;
double temp;
sp = (blockIdx.x * blockDim.x +threadIdx.x)*ChunkSize;
printf("\n start:%li end:%li Chunksize: %li S_ary: %i E_ary:%i\n",sp,ChunkSize+sp-1,ChunkSize,ary[sp],ary[ChunkSize+sp-1]);
for(i = 0; i< ChunkSize; i++)
{
for(j = sp; j< (ChunkSize+sp-1-i); j++)
{
if( (ary[j]) > (ary[j+1]))
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
}
int main(int argc, char** argv)
{
//**************** HOST variable ******************
struct timeval t;
double StartTime, EndTime;
double TimeElapsed;
long a,i;
long NumOfChunk;
long HowMany;
long ChunkSize;
char Type;
signed int *InputArrayI,*SortedArrayI;
signed long long *InputArrayL,*SortedArrayL;
float *InputArrayF,*SortedArrayF;
double *InputArrayD,*SortedArrayD;
long BlockSize;
long NumOfBlock;
//**************** GPU variable ******************
signed int *InputArrayG_I,*SortedArrayG_I;
signed long long *InputArrayG_L,*SortedArrayG_L;
float *InputArrayG_F,*SortedArrayG_F;
double *InputArrayG_D,*SortedArrayG_D;
FILE *ff = fopen("BubbleSortResult.txt", "w");
FILE *fp = fopen("MergeSortResult.txt", "w");
if(argc != 5)
{
printf("\n Argument is not correct \n\n");
printf("Nothing executed ... Exiting ...\n\n");
exit(EXIT_FAILURE);
}
HowMany = atoi(argv[1]);
ChunkSize = atoi(argv[3]);
Type = toupper(argv[2][0]);
BlockSize = atoi(argv[4]);
NumOfChunk = HowMany/(ChunkSize);
NumOfBlock = HowMany/((ChunkSize * BlockSize));
printf("\nElement type : %c\n",Type);
printf("BlockSize : %li\n",BlockSize);
printf("\nNumberOfChunk : %li\n",NumOfChunk);
printf("Total Block : %li\n",NumOfBlock);
printf("Total Element : %li\n\n\n\n",NumOfBlock*ChunkSize*BlockSize);
srand(time(NULL));
// HOST*************inital rand number
switch(Type)
{
case 'I':
InputArrayI = (signed int *)malloc(HowMany * sizeof(signed int));
SortedArrayI = (signed int *)malloc(HowMany * sizeof(signed int));
for(i=0;i<HowMany;i++)
{
InputArrayI[i] = ( ((-1)^i)*rand() );
}
break;
//*******************************************
case 'L':
InputArrayL = (signed long long *)malloc(HowMany * sizeof(signed long long ));
SortedArrayL = (signed long long *)malloc(HowMany * sizeof(signed long long ));
for(i=0;i<HowMany;i++)
{
InputArrayL[i] =(long long )( ((-1)^i)*rand()<<32 | rand() );
}
break;
//*******************************************
case 'F':
InputArrayF = (float *)malloc(HowMany * sizeof(float));
SortedArrayF = (float *)malloc(HowMany * sizeof(float));
int my_random_int;
for(i=0;i<HowMany;i++)
{
my_random_int = ((-1)^i)*rand() ;
InputArrayF[i] = *(float*)&my_random_int;
if(isnan(InputArrayF[i])){i--;}
}
break;
//*******************************************
case 'D':
InputArrayD = (double *)malloc(HowMany * sizeof(double));
SortedArrayD = (double *)malloc(HowMany * sizeof(double));
long long int my_random_long;
for(i=0;i<HowMany;i++)
{
my_random_long = (long long )(( ((-1)^i)*rand()<<32) | rand() );
InputArrayD[i] = *(double*)&my_random_long;
if(isnan(InputArrayD[i])){i--;}
}
break;
}
// GPU*********** inital GPU and transfer data HtoD
switch(Type)
{
case 'I':
hipMalloc ((signed int **)&InputArrayG_I, HowMany*sizeof(signed int));
hipMalloc ((signed int **)&SortedArrayG_I, HowMany*sizeof(signed int));
hipMemcpy (InputArrayG_I, InputArrayI, HowMany*sizeof(signed int), hipMemcpyHostToDevice);
hipMemcpy (SortedArrayG_I, InputArrayI, HowMany*sizeof(signed int), hipMemcpyHostToDevice);
break;
//*******************************************
case 'L':
hipMalloc ((signed long long **)&InputArrayG_L, HowMany* sizeof(signed long long ));
hipMalloc ((signed long long **)&SortedArrayG_L, HowMany* sizeof(signed long long ));
hipMemcpy (InputArrayG_L, InputArrayL, HowMany, hipMemcpyHostToDevice);
hipMemcpy (SortedArrayG_L, InputArrayL, HowMany*sizeof(signed int), hipMemcpyHostToDevice);
break;
//*******************************************
case 'F':
hipMalloc ((float **)&InputArrayG_F, HowMany);
hipMalloc ((float **)&SortedArrayG_F, HowMany);
hipMemcpy (InputArrayG_F, InputArrayF, HowMany, hipMemcpyHostToDevice);
hipMemcpy (SortedArrayG_F, InputArrayF, HowMany*sizeof(signed int), hipMemcpyHostToDevice);
break;
//*******************************************
case 'D':
hipMalloc ((double **)&InputArrayG_D, HowMany);
hipMalloc ((double **)&SortedArrayG_D, HowMany);
hipMemcpy (InputArrayG_D, InputArrayD, HowMany, hipMemcpyHostToDevice);
hipMemcpy (SortedArrayG_D, InputArrayD, HowMany*sizeof(signed int), hipMemcpyHostToDevice);
break;
}
gettimeofday(&t, NULL);
StartTime = (double)t.tv_sec*1000000.0 + ((double)t.tv_usec);
//******************* sort ***************
for(a=0; a<REPS; a++)
{
switch(Type)
{
case 'I':
// tot block element per block
hipLaunchKernelGGL(( SortChunkGI), dim3(NumOfBlock), dim3(BlockSize), 0, 0, SortedArrayG_I,ChunkSize);
hipMemcpy (SortedArrayI, SortedArrayG_I, HowMany*sizeof(signed int), hipMemcpyDeviceToHost);
fprintf(ff, "Bubble sorted result of int done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(ff, "%i \n", SortedArrayI[i]);
if((i+1)%ChunkSize ==0){fprintf(ff, " \n");}
}
MergeSortI(SortedArrayI, HowMany);
break;
//*******************************************
case 'L':
// tot block element per block
hipLaunchKernelGGL(( SortChunkGL), dim3(NumOfBlock), dim3(BlockSize), 0, 0, SortedArrayG_L,ChunkSize);
hipMemcpy (SortedArrayL, SortedArrayG_L, HowMany*sizeof(signed long long), hipMemcpyDeviceToHost);
fprintf(ff, "Bubble sorted result of long done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(ff, "%lli \n", SortedArrayL[i]);
if((i+1)%ChunkSize ==0){fprintf(ff, " \n");}
}
MergeSortL(SortedArrayL, HowMany);
break;
//*******************************************
case 'F':
// tot block element per block
hipLaunchKernelGGL(( SortChunkGF), dim3(NumOfBlock), dim3(BlockSize), 0, 0, SortedArrayG_F,ChunkSize);
hipMemcpy (SortedArrayF, SortedArrayG_F, HowMany*sizeof(float), hipMemcpyDeviceToHost);
fprintf(ff, "Bubble sorted result of float done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(ff, "%f \n", SortedArrayF[i]);
if((i+1)%ChunkSize ==0){fprintf(ff, " \n");}
}
MergeSortF(SortedArrayF, HowMany);
break;
//*******************************************
case 'D':
// tot block element per block
hipLaunchKernelGGL(( SortChunkGD), dim3(NumOfBlock), dim3(BlockSize), 0, 0, SortedArrayG_D,ChunkSize);
hipMemcpy (SortedArrayD, SortedArrayG_D, HowMany*sizeof(double), hipMemcpyDeviceToHost);
fprintf(ff, "Bubble sorted result of double done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(ff, "%lf \n", InputArrayD[i]);
if((i+1)%ChunkSize ==0){fprintf(ff, " \n");}
}
MergeSortD(SortedArrayD, HowMany);
break;
}
}
gettimeofday(&t, NULL);
EndTime = (double)t.tv_sec*1000000.0 + ((double)t.tv_usec);
TimeElapsed=(EndTime-StartTime)/1000.00;
TimeElapsed/=(double)REPS;
printf("\n\nExecution time:%10.4f ms ",TimeElapsed);
// print result
switch(Type)
{
case 'I':
fprintf(fp, "Merge sorted result of int done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(fp, "%i \n", SortedArrayI[i]);
}
break;
//*******************************************
case 'L':
fprintf(fp, "Merge sorted result of long done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(fp, "%lli \n", SortedArrayL[i]);
}
break;
//*******************************************
case 'F':
fprintf(fp, "Merge sorted result of float done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(fp, "%f \n", SortedArrayF[i]);
}
break;
//******************************************
case 'D':
fprintf(fp, "Merge sorted result of double done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(fp, "%lf \n", SortedArrayD[i]);
}
break;
}
//free memoary
switch(Type)
{
case 'I':
free(InputArrayI);
free(SortedArrayI);
hipFree(InputArrayG_I);
hipFree(SortedArrayG_I);
break;
//*******************************************
case 'L':
free(InputArrayL);
free(SortedArrayL);
hipFree(InputArrayG_L);
hipFree(SortedArrayG_L);
break;
//*******************************************
case 'F':
free(InputArrayF);
free(SortedArrayF);
hipFree(InputArrayG_F);
hipFree(SortedArrayG_F);
break;
//*******************************************
case 'D':
free(InputArrayD);
free(SortedArrayD);
hipFree(InputArrayG_D);
hipFree(SortedArrayG_D);
break;
}
fclose(ff);
fclose(fp);
return (EXIT_SUCCESS);
}
//************** bubble sort HOST****************
void SortChunkI(long ChunkSize, int Chunk_ID, signed int *ary)
{
long i,j;
long sp;
int temp;
sp = Chunk_ID * ChunkSize;
for(i = 0; i< (ChunkSize-1); i++)
{
for(j = sp; j< ((ChunkSize+sp)-1-i); j++)
{
if(ary[j] > ary[j+1])
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
for(i = sp; i< (ChunkSize+sp); i++)
{
printf("\n %i",ary[i]);
}
printf("\n \n");
return;
}
void SortChunkL(long ChunkSize, int Chunk_ID, signed long long *ary)
{
long i,j;
long sp;
long long temp;
sp = Chunk_ID * ChunkSize;
for(i = 0; i< (ChunkSize-1); i++)
{
for(j = sp; j< ((ChunkSize+sp)-1-i); j++)
{
if(ary[j] > ary[j+1])
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
for(i = sp; i< (ChunkSize+sp); i++)
{
printf("\n %lli",ary[i]);
}
printf("\n \n");
return;
}
void SortChunkF(long ChunkSize, int Chunk_ID, float *ary)
{
long i,j;
long sp;
float temp;
sp = Chunk_ID * ChunkSize;
for(i = 0; i< (ChunkSize-1); i++)
{
for(j = sp; j< ((ChunkSize+sp)-1-i); j++)
{
if(ary[j] > ary[j+1])
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
for(i = sp; i< (ChunkSize+sp); i++)
{
printf("\n %f",ary[i]);
}
printf("\n \n");
return;
}
void SortChunkD(long ChunkSize, int Chunk_ID, double *ary)
{
long i,j;
long sp;
double temp;
sp = Chunk_ID * ChunkSize;
for(i = 0; i< (ChunkSize-1); i++)
{
for(j = sp; j< ((ChunkSize+sp)-1-i); j++)
{
if(ary[j] > ary[j+1])
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
for(i = sp; i< (ChunkSize+sp); i++)
{
printf("\n %f",ary[i]);
}
printf("\n \n");
return;
}
//************** merge sort HOST *****************
void MergeSortI(signed int *list, long length)
{
long i;
long left_min, left_max, right_min, right_max, next;
signed int *tmp = (int*)malloc(sizeof(int) * length);
if (tmp == NULL)
{
fputs("Error: out of memory\n", stderr);
abort();
}
for (i = 1; i < length; i *= 2)
{
for (left_min = 0; left_min < length - i; left_min = right_max)
{
right_min = left_max = left_min + i;
right_max = left_max + i;
if (right_max > length)
right_max = length;
next = 0;
while (left_min < left_max && right_min < right_max)
tmp[next++] = list[left_min] > list[right_min] ? list[right_min++] : list[left_min++];
while (left_min < left_max)
list[--right_min] = list[--left_max];
while (next > 0)
list[--right_min] = tmp[--next];
}
}
free(tmp);
return;
}
void MergeSortL(signed long long *list, long length)
{
long i;
long left_min, left_max, right_min, right_max, next;
signed long long *tmp = (long long *)malloc(sizeof(long long ) * length);
if (tmp == NULL)
{
fputs("Error: out of memory\n", stderr);
abort();
}
for (i = 1; i < length; i *= 2)
{
for (left_min = 0; left_min < length - i; left_min = right_max)
{
right_min = left_max = left_min + i;
right_max = left_max + i;
if (right_max > length)
right_max = length;
next = 0;
while (left_min < left_max && right_min < right_max)
tmp[next++] = list[left_min] > list[right_min] ? list[right_min++] : list[left_min++];
while (left_min < left_max)
list[--right_min] = list[--left_max];
while (next > 0)
list[--right_min] = tmp[--next];
}
}
free(tmp);
return;
}
void MergeSortF(float *list, long length)
{
long i;
long left_min, left_max, right_min, right_max, next;
float *tmp = (float *)malloc(sizeof(float ) * length);
if (tmp == NULL)
{
fputs("Error: out of memory\n", stderr);
abort();
}
for (i = 1; i < length; i *= 2)
{
for (left_min = 0; left_min < length - i; left_min = right_max)
{
right_min = left_max = left_min + i;
right_max = left_max + i;
if (right_max > length)
right_max = length;
next = 0;
while (left_min < left_max && right_min < right_max)
tmp[next++] = list[left_min] > list[right_min] ? list[right_min++] : list[left_min++];
while (left_min < left_max)
list[--right_min] = list[--left_max];
while (next > 0)
list[--right_min] = tmp[--next];
}
}
free(tmp);
return;
}
void MergeSortD(double *list, long length)
{
long i;
long left_min, left_max, right_min, right_max, next;
double *tmp = (double *)malloc(sizeof(double ) * length);
if (tmp == NULL)
{
fputs("Error: out of memory\n", stderr);
abort();
}
for (i = 1; i < length; i *= 2)
{
for (left_min = 0; left_min < length - i; left_min = right_max)
{
right_min = left_max = left_min + i;
right_max = left_max + i;
if (right_max > length)
right_max = length;
next = 0;
while (left_min < left_max && right_min < right_max)
tmp[next++] = list[left_min] > list[right_min] ? list[right_min++] : list[left_min++];
while (left_min < left_max)
list[--right_min] = list[--left_max];
while (next > 0)
list[--right_min] = tmp[--next];
}
}
free(tmp);
return;
} | 4a489104dcbad6e14296ea99cce71e76a758ee7e.cu | #include <pthread.h>
#include <stdint.h>
#include <ctype.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <malloc.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <time.h>
#define REPS 1
void SortChunkI(long ChunkSize, int Chunk_ID, signed int *ary);
void SortChunkL(long ChunkSize, int Chunk_ID, signed long long *ary);
void SortChunkF(long ChunkSize, int Chunk_ID, float *ary);
void SortChunkD(long ChunkSize, int Chunk_ID, double *ary);
void MergeSortI(signed int *list, long length);
void MergeSortL(signed long long *list, long length);
void MergeSortF(float *list, long length);
void MergeSortD(double *list, long length);
__global__ void SortChunkGI(signed int ary[], long ChunkSize)
{
unsigned long i,j;
unsigned long sp;
int temp;
sp = (blockIdx.x * blockDim.x +threadIdx.x)*ChunkSize;
printf("\n start:%li end:%li Chunksize: %li S_ary: %i E_ary:%i\n",sp,ChunkSize+sp-1,ChunkSize,ary[sp],ary[ChunkSize+sp-1]);
for(i = 0; i< ChunkSize; i++)
{
for(j = sp; j< (ChunkSize+sp-1-i); j++)
{
if( (ary[j]) > (ary[j+1]))
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
}
__global__ void SortChunkGL(signed long long ary[], long ChunkSize)
{
unsigned long i,j;
unsigned long sp;
long long temp;
sp = (blockIdx.x * blockDim.x +threadIdx.x)*ChunkSize;
//printf("\n start:%li end:%li Chunksize: %li S_ary: %i E_ary:%i\n",sp,ChunkSize+sp-1,ChunkSize,ary[sp],ary[ChunkSize+sp-1]);
for(i = 0; i< ChunkSize; i++)
{
for(j = sp; j< (ChunkSize+sp-1-i); j++)
{
if( (ary[j]) > (ary[j+1]))
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
}
__global__ void SortChunkGF(float ary[], long ChunkSize)
{
unsigned long i,j;
unsigned long sp;
float temp;
sp = (blockIdx.x * blockDim.x +threadIdx.x)*ChunkSize;
printf("\n start:%li end:%li Chunksize: %li S_ary: %i E_ary:%i\n",sp,ChunkSize+sp-1,ChunkSize,ary[sp],ary[ChunkSize+sp-1]);
for(i = 0; i< ChunkSize; i++)
{
for(j = sp; j< (ChunkSize+sp-1-i); j++)
{
if( (ary[j]) > (ary[j+1]))
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
}
__global__ void SortChunkGD(double ary[], long ChunkSize)
{
unsigned long i,j;
unsigned long sp;
double temp;
sp = (blockIdx.x * blockDim.x +threadIdx.x)*ChunkSize;
printf("\n start:%li end:%li Chunksize: %li S_ary: %i E_ary:%i\n",sp,ChunkSize+sp-1,ChunkSize,ary[sp],ary[ChunkSize+sp-1]);
for(i = 0; i< ChunkSize; i++)
{
for(j = sp; j< (ChunkSize+sp-1-i); j++)
{
if( (ary[j]) > (ary[j+1]))
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
}
int main(int argc, char** argv)
{
//**************** HOST variable ******************
struct timeval t;
double StartTime, EndTime;
double TimeElapsed;
long a,i;
long NumOfChunk;
long HowMany;
long ChunkSize;
char Type;
signed int *InputArrayI,*SortedArrayI;
signed long long *InputArrayL,*SortedArrayL;
float *InputArrayF,*SortedArrayF;
double *InputArrayD,*SortedArrayD;
long BlockSize;
long NumOfBlock;
//**************** GPU variable ******************
signed int *InputArrayG_I,*SortedArrayG_I;
signed long long *InputArrayG_L,*SortedArrayG_L;
float *InputArrayG_F,*SortedArrayG_F;
double *InputArrayG_D,*SortedArrayG_D;
FILE *ff = fopen("BubbleSortResult.txt", "w");
FILE *fp = fopen("MergeSortResult.txt", "w");
if(argc != 5)
{
printf("\n Argument is not correct \n\n");
printf("Nothing executed ... Exiting ...\n\n");
exit(EXIT_FAILURE);
}
HowMany = atoi(argv[1]);
ChunkSize = atoi(argv[3]);
Type = toupper(argv[2][0]);
BlockSize = atoi(argv[4]);
NumOfChunk = HowMany/(ChunkSize);
NumOfBlock = HowMany/((ChunkSize * BlockSize));
printf("\nElement type : %c\n",Type);
printf("BlockSize : %li\n",BlockSize);
printf("\nNumberOfChunk : %li\n",NumOfChunk);
printf("Total Block : %li\n",NumOfBlock);
printf("Total Element : %li\n\n\n\n",NumOfBlock*ChunkSize*BlockSize);
srand(time(NULL));
// HOST*************inital rand number
switch(Type)
{
case 'I':
InputArrayI = (signed int *)malloc(HowMany * sizeof(signed int));
SortedArrayI = (signed int *)malloc(HowMany * sizeof(signed int));
for(i=0;i<HowMany;i++)
{
InputArrayI[i] = ( ((-1)^i)*rand() );
}
break;
//*******************************************
case 'L':
InputArrayL = (signed long long *)malloc(HowMany * sizeof(signed long long ));
SortedArrayL = (signed long long *)malloc(HowMany * sizeof(signed long long ));
for(i=0;i<HowMany;i++)
{
InputArrayL[i] =(long long )( ((-1)^i)*rand()<<32 | rand() );
}
break;
//*******************************************
case 'F':
InputArrayF = (float *)malloc(HowMany * sizeof(float));
SortedArrayF = (float *)malloc(HowMany * sizeof(float));
int my_random_int;
for(i=0;i<HowMany;i++)
{
my_random_int = ((-1)^i)*rand() ;
InputArrayF[i] = *(float*)&my_random_int;
if(isnan(InputArrayF[i])){i--;}
}
break;
//*******************************************
case 'D':
InputArrayD = (double *)malloc(HowMany * sizeof(double));
SortedArrayD = (double *)malloc(HowMany * sizeof(double));
long long int my_random_long;
for(i=0;i<HowMany;i++)
{
my_random_long = (long long )(( ((-1)^i)*rand()<<32) | rand() );
InputArrayD[i] = *(double*)&my_random_long;
if(isnan(InputArrayD[i])){i--;}
}
break;
}
// GPU*********** inital GPU and transfer data HtoD
switch(Type)
{
case 'I':
cudaMalloc ((signed int **)&InputArrayG_I, HowMany*sizeof(signed int));
cudaMalloc ((signed int **)&SortedArrayG_I, HowMany*sizeof(signed int));
cudaMemcpy (InputArrayG_I, InputArrayI, HowMany*sizeof(signed int), cudaMemcpyHostToDevice);
cudaMemcpy (SortedArrayG_I, InputArrayI, HowMany*sizeof(signed int), cudaMemcpyHostToDevice);
break;
//*******************************************
case 'L':
cudaMalloc ((signed long long **)&InputArrayG_L, HowMany* sizeof(signed long long ));
cudaMalloc ((signed long long **)&SortedArrayG_L, HowMany* sizeof(signed long long ));
cudaMemcpy (InputArrayG_L, InputArrayL, HowMany, cudaMemcpyHostToDevice);
cudaMemcpy (SortedArrayG_L, InputArrayL, HowMany*sizeof(signed int), cudaMemcpyHostToDevice);
break;
//*******************************************
case 'F':
cudaMalloc ((float **)&InputArrayG_F, HowMany);
cudaMalloc ((float **)&SortedArrayG_F, HowMany);
cudaMemcpy (InputArrayG_F, InputArrayF, HowMany, cudaMemcpyHostToDevice);
cudaMemcpy (SortedArrayG_F, InputArrayF, HowMany*sizeof(signed int), cudaMemcpyHostToDevice);
break;
//*******************************************
case 'D':
cudaMalloc ((double **)&InputArrayG_D, HowMany);
cudaMalloc ((double **)&SortedArrayG_D, HowMany);
cudaMemcpy (InputArrayG_D, InputArrayD, HowMany, cudaMemcpyHostToDevice);
cudaMemcpy (SortedArrayG_D, InputArrayD, HowMany*sizeof(signed int), cudaMemcpyHostToDevice);
break;
}
gettimeofday(&t, NULL);
StartTime = (double)t.tv_sec*1000000.0 + ((double)t.tv_usec);
//******************* sort ***************
for(a=0; a<REPS; a++)
{
switch(Type)
{
case 'I':
// tot block element per block
SortChunkGI<<< NumOfBlock, BlockSize>>> (SortedArrayG_I,ChunkSize);
cudaMemcpy (SortedArrayI, SortedArrayG_I, HowMany*sizeof(signed int), cudaMemcpyDeviceToHost);
fprintf(ff, "Bubble sorted result of int done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(ff, "%i \n", SortedArrayI[i]);
if((i+1)%ChunkSize ==0){fprintf(ff, " \n");}
}
MergeSortI(SortedArrayI, HowMany);
break;
//*******************************************
case 'L':
// tot block element per block
SortChunkGL<<< NumOfBlock, BlockSize>>> (SortedArrayG_L,ChunkSize);
cudaMemcpy (SortedArrayL, SortedArrayG_L, HowMany*sizeof(signed long long), cudaMemcpyDeviceToHost);
fprintf(ff, "Bubble sorted result of long done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(ff, "%lli \n", SortedArrayL[i]);
if((i+1)%ChunkSize ==0){fprintf(ff, " \n");}
}
MergeSortL(SortedArrayL, HowMany);
break;
//*******************************************
case 'F':
// tot block element per block
SortChunkGF<<< NumOfBlock, BlockSize>>> (SortedArrayG_F,ChunkSize);
cudaMemcpy (SortedArrayF, SortedArrayG_F, HowMany*sizeof(float), cudaMemcpyDeviceToHost);
fprintf(ff, "Bubble sorted result of float done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(ff, "%f \n", SortedArrayF[i]);
if((i+1)%ChunkSize ==0){fprintf(ff, " \n");}
}
MergeSortF(SortedArrayF, HowMany);
break;
//*******************************************
case 'D':
// tot block element per block
SortChunkGD<<< NumOfBlock, BlockSize>>> (SortedArrayG_D,ChunkSize);
cudaMemcpy (SortedArrayD, SortedArrayG_D, HowMany*sizeof(double), cudaMemcpyDeviceToHost);
fprintf(ff, "Bubble sorted result of double done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(ff, "%lf \n", InputArrayD[i]);
if((i+1)%ChunkSize ==0){fprintf(ff, " \n");}
}
MergeSortD(SortedArrayD, HowMany);
break;
}
}
gettimeofday(&t, NULL);
EndTime = (double)t.tv_sec*1000000.0 + ((double)t.tv_usec);
TimeElapsed=(EndTime-StartTime)/1000.00;
TimeElapsed/=(double)REPS;
printf("\n\nExecution time:%10.4f ms ",TimeElapsed);
// print result
switch(Type)
{
case 'I':
fprintf(fp, "Merge sorted result of int done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(fp, "%i \n", SortedArrayI[i]);
}
break;
//*******************************************
case 'L':
fprintf(fp, "Merge sorted result of long done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(fp, "%lli \n", SortedArrayL[i]);
}
break;
//*******************************************
case 'F':
fprintf(fp, "Merge sorted result of float done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(fp, "%f \n", SortedArrayF[i]);
}
break;
//******************************************
case 'D':
fprintf(fp, "Merge sorted result of double done by GPU\n***********************************\n");
for(i=0;i<HowMany;i++)
{
fprintf(fp, "%lf \n", SortedArrayD[i]);
}
break;
}
//free memoary
switch(Type)
{
case 'I':
free(InputArrayI);
free(SortedArrayI);
cudaFree(InputArrayG_I);
cudaFree(SortedArrayG_I);
break;
//*******************************************
case 'L':
free(InputArrayL);
free(SortedArrayL);
cudaFree(InputArrayG_L);
cudaFree(SortedArrayG_L);
break;
//*******************************************
case 'F':
free(InputArrayF);
free(SortedArrayF);
cudaFree(InputArrayG_F);
cudaFree(SortedArrayG_F);
break;
//*******************************************
case 'D':
free(InputArrayD);
free(SortedArrayD);
cudaFree(InputArrayG_D);
cudaFree(SortedArrayG_D);
break;
}
fclose(ff);
fclose(fp);
return (EXIT_SUCCESS);
}
//************** bubble sort HOST****************
void SortChunkI(long ChunkSize, int Chunk_ID, signed int *ary)
{
long i,j;
long sp;
int temp;
sp = Chunk_ID * ChunkSize;
for(i = 0; i< (ChunkSize-1); i++)
{
for(j = sp; j< ((ChunkSize+sp)-1-i); j++)
{
if(ary[j] > ary[j+1])
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
for(i = sp; i< (ChunkSize+sp); i++)
{
printf("\n %i",ary[i]);
}
printf("\n \n");
return;
}
void SortChunkL(long ChunkSize, int Chunk_ID, signed long long *ary)
{
long i,j;
long sp;
long long temp;
sp = Chunk_ID * ChunkSize;
for(i = 0; i< (ChunkSize-1); i++)
{
for(j = sp; j< ((ChunkSize+sp)-1-i); j++)
{
if(ary[j] > ary[j+1])
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
for(i = sp; i< (ChunkSize+sp); i++)
{
printf("\n %lli",ary[i]);
}
printf("\n \n");
return;
}
void SortChunkF(long ChunkSize, int Chunk_ID, float *ary)
{
long i,j;
long sp;
float temp;
sp = Chunk_ID * ChunkSize;
for(i = 0; i< (ChunkSize-1); i++)
{
for(j = sp; j< ((ChunkSize+sp)-1-i); j++)
{
if(ary[j] > ary[j+1])
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
for(i = sp; i< (ChunkSize+sp); i++)
{
printf("\n %f",ary[i]);
}
printf("\n \n");
return;
}
void SortChunkD(long ChunkSize, int Chunk_ID, double *ary)
{
long i,j;
long sp;
double temp;
sp = Chunk_ID * ChunkSize;
for(i = 0; i< (ChunkSize-1); i++)
{
for(j = sp; j< ((ChunkSize+sp)-1-i); j++)
{
if(ary[j] > ary[j+1])
{
temp = ary[j+1];
ary[j+1] = ary[j];
ary[j] = temp;
}
}
}
for(i = sp; i< (ChunkSize+sp); i++)
{
printf("\n %f",ary[i]);
}
printf("\n \n");
return;
}
//************** merge sort HOST *****************
void MergeSortI(signed int *list, long length)
{
long i;
long left_min, left_max, right_min, right_max, next;
signed int *tmp = (int*)malloc(sizeof(int) * length);
if (tmp == NULL)
{
fputs("Error: out of memory\n", stderr);
abort();
}
for (i = 1; i < length; i *= 2)
{
for (left_min = 0; left_min < length - i; left_min = right_max)
{
right_min = left_max = left_min + i;
right_max = left_max + i;
if (right_max > length)
right_max = length;
next = 0;
while (left_min < left_max && right_min < right_max)
tmp[next++] = list[left_min] > list[right_min] ? list[right_min++] : list[left_min++];
while (left_min < left_max)
list[--right_min] = list[--left_max];
while (next > 0)
list[--right_min] = tmp[--next];
}
}
free(tmp);
return;
}
void MergeSortL(signed long long *list, long length)
{
long i;
long left_min, left_max, right_min, right_max, next;
signed long long *tmp = (long long *)malloc(sizeof(long long ) * length);
if (tmp == NULL)
{
fputs("Error: out of memory\n", stderr);
abort();
}
for (i = 1; i < length; i *= 2)
{
for (left_min = 0; left_min < length - i; left_min = right_max)
{
right_min = left_max = left_min + i;
right_max = left_max + i;
if (right_max > length)
right_max = length;
next = 0;
while (left_min < left_max && right_min < right_max)
tmp[next++] = list[left_min] > list[right_min] ? list[right_min++] : list[left_min++];
while (left_min < left_max)
list[--right_min] = list[--left_max];
while (next > 0)
list[--right_min] = tmp[--next];
}
}
free(tmp);
return;
}
void MergeSortF(float *list, long length)
{
long i;
long left_min, left_max, right_min, right_max, next;
float *tmp = (float *)malloc(sizeof(float ) * length);
if (tmp == NULL)
{
fputs("Error: out of memory\n", stderr);
abort();
}
for (i = 1; i < length; i *= 2)
{
for (left_min = 0; left_min < length - i; left_min = right_max)
{
right_min = left_max = left_min + i;
right_max = left_max + i;
if (right_max > length)
right_max = length;
next = 0;
while (left_min < left_max && right_min < right_max)
tmp[next++] = list[left_min] > list[right_min] ? list[right_min++] : list[left_min++];
while (left_min < left_max)
list[--right_min] = list[--left_max];
while (next > 0)
list[--right_min] = tmp[--next];
}
}
free(tmp);
return;
}
void MergeSortD(double *list, long length)
{
long i;
long left_min, left_max, right_min, right_max, next;
double *tmp = (double *)malloc(sizeof(double ) * length);
if (tmp == NULL)
{
fputs("Error: out of memory\n", stderr);
abort();
}
for (i = 1; i < length; i *= 2)
{
for (left_min = 0; left_min < length - i; left_min = right_max)
{
right_min = left_max = left_min + i;
right_max = left_max + i;
if (right_max > length)
right_max = length;
next = 0;
while (left_min < left_max && right_min < right_max)
tmp[next++] = list[left_min] > list[right_min] ? list[right_min++] : list[left_min++];
while (left_min < left_max)
list[--right_min] = list[--left_max];
while (next > 0)
list[--right_min] = tmp[--next];
}
}
free(tmp);
return;
} |
29a8cad0333158bf66d5fb0db41da3d38f64b13b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ipsec.h"
#include <stdlib.h>
#include <time.h>
#define SHA 1
#define AES_ASSIGN 1
#define BODY 1
#define EIHDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + 2)
extern int *pkt_cnt;
extern unsigned char *pktBuf;
extern int *nbBoard;
extern int *statusBoard;
__device__ void sha1_kernel_global(unsigned char *data, sha1_gpu_context *ctx, uint32_t *extended, int len)
{
#if 1
/* Initialization vector for SHA-1 */
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
ctx->state[4] = 0xC3D2E1F0;
#endif
uint32_t temp, t;
/*
* Extend 32 block byte block into 80 byte block.
*/
//sh_kim 20.03.11 : when data length is 20byte, we need padding
if(len == 20)
{
memset(data + len - 1, 0, 44);
}
GET_UINT32_BE( extended[0], data, 0 );
GET_UINT32_BE( extended[1], data, 4 );
GET_UINT32_BE( extended[2], data, 8 );
GET_UINT32_BE( extended[3], data, 12 );
GET_UINT32_BE( extended[4], data, 16 );
GET_UINT32_BE( extended[5], data, 20 );
GET_UINT32_BE( extended[6], data, 24 );
GET_UINT32_BE( extended[7], data, 28 );
GET_UINT32_BE( extended[8], data, 32 );
GET_UINT32_BE( extended[9], data, 36 );
GET_UINT32_BE( extended[10], data, 40 );
GET_UINT32_BE( extended[11], data, 44 );
GET_UINT32_BE( extended[12], data, 48 );
GET_UINT32_BE( extended[13], data, 52 );
GET_UINT32_BE( extended[14], data, 56 );
GET_UINT32_BE( extended[15], data, 60 );
// Same as "blk(i)" macro in openssl source.
for (t = 16; t < 80; t++) {
temp = extended[t - 3] ^ extended[t - 8] ^ extended[t - 14] ^ extended[t - 16];
extended[t] = S(temp,1);
}
sha1_gpu_process(ctx, extended);
}
// CKJUNG, 18.10.26 [NF#2:IPSec]-------------------------------------
__global__ void ipsec(unsigned char *pktBuf, int *nbBoard, int *statusBoard, int* pkt_cnt, unsigned char* d_nounce, unsigned int* d_key, unsigned char* d_sbox, unsigned char* d_GF2, int* head, unsigned int* seq)
{
unsigned char IV[16] = {0};
sha1_gpu_context octx;
sha1_gpu_context ictx;
uint32_t extended[80];
int ctr = 0;
unsigned int sha_count = 0;
__shared__ int nb;
unsigned char * buf = NULL;
__shared__ int curIdx;
uint8_t chapter_idx = 0;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int pktid = tid / THD_PER_PKT;
int dataid = tid % THD_PER_PKT;
if(threadIdx.x == 0){
nb = 0;
curIdx = -1;
}
__syncthreads();
while(true){ // Persistent Kernel (for every threads)
__syncthreads();
if(tid == 0 && statusBoard[chapter_idx] == -1){
if((*head) == -1 || statusBoard[(*head)] == nbBoard[(*head)] - 1){
atomicAdd(&statusBoard[(*head)], 1);
*head = chapter_idx;
chapter_idx++;
chapter_idx %= CHAPTER_NUM;
}
}
__syncthreads();
if(threadIdx.x == 0 && (*head) != curIdx){
nb = nbBoard[(*head)];
curIdx = (*head);
}
__syncthreads();
if(pktid < nb){
#if BODY
buf = &pktBuf[curIdx * PKT_BATCH_SIZE + pktid * PKT_SIZE];
sha_count = PKT_DATA_SIZE / 64 + ((PKT_DATA_SIZE % 64) != 0);
__syncthreads();
if(tid%THD_PER_PKT == 0){
buf[HEADROOM_SIZE + PKT_DATA_SIZE] = PAD_LEN; // padlen
buf[HEADROOM_SIZE + PKT_DATA_SIZE + 1] = IPPROTO_IPIP; // next-hdr (Meaning "IP within IP)
/* For Reference...
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IPIP = 4
IPPROTO_TCP = 6
IPPROTO_UDP = 17
IPPROTO_ESP = 50
*/
ctr++; // same "ctr" value for grouped 3-threads. (counter) AES-CTR Mode
IV[15] = ctr & 0xFF;
IV[14] = (ctr >> 8) & 0xFF; // CKJUNG, 1 Byte = 8bits means, Octal notation
IV[13] = (ctr >> 16) & 0xFF;
IV[12] = (ctr >> 24) & 0xFF;
for(int i = 0; i < 12; i++)
IV[i] = 0;
// Copy our state into private memory
unsigned char temp, temp2;
unsigned char overflow = 0;
char tmp[16];
for(int i = 15; i != -1; i--) {
temp = d_nounce[i];
temp2 = IV[i];
IV[i] += temp + overflow;
overflow = ((int)temp2 + (int)temp + (int)overflow > 255);
}
AddRoundKey(IV, &d_key[0]);
for(int i = 1; i < 10; i++){
SubBytes(IV, d_sbox);
ShiftRows(IV);
MixColumns(IV, d_GF2, tmp);
AddRoundKey(IV, &d_key[4 * i]);
}
SubBytes(IV, d_sbox);
ShiftRows(IV);
AddRoundKey(IV, &d_key[4 * 10]);
unsigned char temphdr[34];
memcpy(temphdr, buf + HEADROOM_SIZE, EIHDR_SIZE);
memcpy(buf + HEADROOM_SIZE - IPSECHEAD, temphdr, EIHDR_SIZE);
}
////////////////// Locating AES Encrypted parts into a pkt ///////////////////////////////
#if AES_ASSIGN
__syncthreads();
for(int i = 0; i < DATA_PER_THD; i++){
buf[HEADROOM_SIZE + sizeof(struct ethhdr) + dataid*DATA_PER_THD + i] ^= IV[i % 16];
}
__syncthreads();
#endif
if(tid%THD_PER_PKT == 0){
//////////// Proto_type = ESP set! ///////////
buf[HEADROOM_SIZE + 6] = IPPROTO_ESP; // IPPROTO_ESP = 50
//buf[HEADROOM_SIZE - IPSECHEAD + sizeof(struct ethhdr) + sizeof(struct iphdr)] = IPPROTO_ESP; // IPPROTO_ESP = 50
struct esphdr* esph;
esph = (struct esphdr *)((uint32_t *)&buf[HEADROOM_SIZE + 6]);
// SPI (Security Parameter Index)
uint32_t spi = 1085899777;
HTONS32(spi);
////////// Set ESP header SPI value ///////////////////
memcpy(&esph->spi, &spi, 4);
atomicAdd(seq, 1);
//////////// Set ESP header SEQ value //////////
memcpy(&esph->seq, seq, 4);
#if SHA
// CKJUNG, HMAC-SHA1 From here! /////////////////////////////
// RFC 2104, H(K XOR opad, H(K XOR ipad, text))
/**** Inner Digest ****/
// H(K XOR ipad, text) : 64 Bytes
int e_index = 0;
while(e_index < sha_count){
sha1_kernel_global(&buf[HEADROOM_SIZE + 6 + e_index*64], &ictx, extended, 64);
e_index++;
}
/**** Outer Digest ****/
// H(K XOR opad, H(K XOR ipad, text)) : 20 Bytes
sha1_kernel_global(&(ictx.c_state[0]), &octx, extended, 20);
memcpy(&buf[HEADROOM_SIZE + PKT_DATA_SIZE + 2], &(octx.c_state[0]), 20);
#endif
#endif
atomicAdd(&statusBoard[curIdx], 1);
atomicAdd(pkt_cnt, 1);
}
if(threadIdx.x == 0){
nb = 0;
}
}
}
}
__device__ void AddRoundKey(unsigned char *state, unsigned *w)
{
int i;
for(i = 0; i < BLOCK_SIZE; i++) { // column
state[i * 4 + 0] = state[i * 4 + 0] ^ ((w[i] >> (8 * 3)) & 0xFF);
state[i * 4 + 1] = state[i * 4 + 1] ^ ((w[i] >> (8 * 2)) & 0xFF);
state[i * 4 + 2] = state[i * 4 + 2] ^ ((w[i] >> (8 * 1)) & 0xFF);
state[i * 4 + 3] = state[i * 4 + 3] ^ ((w[i] >> (8 * 0)) & 0xFF);
}
}
__device__ void SubBytes(unsigned char *state, unsigned char* sbox) //state = 16 chars
{
int i;
for(i = 0; i < 4 * BLOCK_SIZE; i++) {
state[i] = sbox[state[i]];
}
}
__device__ void ShiftRows(unsigned char *state)
{
// NOTE: For whatever reason the standard uses column-major ordering ?
// 0 1 2 3 --> 0 1 2 3 | 0 4 8 12 --> 0 4 8 12
// 0 1 2 3 --> 1 2 3 0 | 1 5 9 13 --> 5 9 13 1
// 0 1 2 3 --> 2 3 0 1 | 2 6 10 14 --> 10 14 2 6
// 0 1 2 3 --> 3 0 1 2 | 3 7 11 15 --> 15 3 7 11
unsigned char temp = state[1];
state[1] = state[5];
state[5] = state[9];
state[9] = state[13];
state[13] = temp;
temp = state[2];
state[2] = state[10];
state[10] = temp;
temp = state[6];
state[6] = state[14];
state[14] = temp;
temp = state[3];
state[3] = state[15];
state[15] = state[11];
state[11] = state[7];
state[7] = temp;
}
// See "Efficient Software Implementation of AES on 32-bit platforms"
__device__ void MixColumns(unsigned char *state, unsigned char* GF_2, char* s)
{
//[TODO] malloc!!!!!! is the criminal!!! CKJUNG, 18.10.26
memcpy(s, state, 4 * BLOCK_SIZE);
int i;
#if 1
for(i = 0; i < BLOCK_SIZE; i++) { // column
unsigned char * x = (unsigned char*)&s[i*4];
unsigned char * y = (unsigned char*)&state[i*4];
y[0] = x[1] ^ x[2] ^ x[3];
y[1] = x[0] ^ x[2] ^ x[3];
y[2] = x[0] ^ x[1] ^ x[3];
y[3] = x[0] ^ x[1] ^ x[2];
x[0] = GF_2[x[0]];
x[1] = GF_2[x[1]];
x[2] = GF_2[x[2]];
x[3] = GF_2[x[3]];
y[0] ^= x[0] ^ x[1];
y[1] ^= x[1] ^ x[2];
y[2] ^= x[2] ^ x[3];
y[3] ^= x[3] ^ x[0];
}
#endif
}
/**
* Initialize new context
*
* @param context SHA1-Context
*/
/*
* Process extended block.
*/
__device__ void sha1_gpu_process (sha1_gpu_context *ctx, uint32_t W[80])
{
uint32_t A, B, C, D, E;
A = ctx->state[0];
B = ctx->state[1];
C = ctx->state[2];
D = ctx->state[3];
E = ctx->state[4];
#define P(a,b,c,d,e,x)\
{\
e += S(a,5) + F(b,c,d) + K + x; b = S(b,30);\
}
#define F(x,y,z) (z ^ (x & (y ^ z)))
#define K 0x5A827999
P( A, B, C, D, E, W[0] );
P( E, A, B, C, D, W[1] );
P( D, E, A, B, C, W[2] );
P( C, D, E, A, B, W[3] );
P( B, C, D, E, A, W[4] );
P( A, B, C, D, E, W[5] );
P( E, A, B, C, D, W[6] );
P( D, E, A, B, C, W[7] );
P( C, D, E, A, B, W[8] );
P( B, C, D, E, A, W[9] );
P( A, B, C, D, E, W[10] );
P( E, A, B, C, D, W[11] );
P( D, E, A, B, C, W[12] );
P( C, D, E, A, B, W[13] );
P( B, C, D, E, A, W[14] );
P( A, B, C, D, E, W[15] );
P( E, A, B, C, D, W[16] );
P( D, E, A, B, C, W[17] );
P( C, D, E, A, B, W[18] );
P( B, C, D, E, A, W[19] );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0x6ED9EBA1
P( A, B, C, D, E, W[20] );
P( E, A, B, C, D, W[21] );
P( D, E, A, B, C, W[22] );
P( C, D, E, A, B, W[23] );
P( B, C, D, E, A, W[24] );
P( A, B, C, D, E, W[25] ); // w[25] is the problem.
P( E, A, B, C, D, W[26] );
P( D, E, A, B, C, W[27] );
P( C, D, E, A, B, W[28] );
P( B, C, D, E, A, W[29] );
P( A, B, C, D, E, W[30] );
P( E, A, B, C, D, W[31] );
P( D, E, A, B, C, W[32] );
P( C, D, E, A, B, W[33] );
P( B, C, D, E, A, W[34] );
P( A, B, C, D, E, W[35] );
P( E, A, B, C, D, W[36] );
P( D, E, A, B, C, W[37] );
P( C, D, E, A, B, W[38] );
P( B, C, D, E, A, W[39] );
#undef K
#undef F
#define F(x,y,z) ((x & y) | (z & (x | y)))
#define K 0x8F1BBCDC
P( A, B, C, D, E, W[40] );
P( E, A, B, C, D, W[41] );
P( D, E, A, B, C, W[42] );
P( C, D, E, A, B, W[43] );
P( B, C, D, E, A, W[44] );
P( A, B, C, D, E, W[45] );
P( E, A, B, C, D, W[46] );
P( D, E, A, B, C, W[47] );
P( C, D, E, A, B, W[48] );
P( B, C, D, E, A, W[49] );
P( A, B, C, D, E, W[50] );
P( E, A, B, C, D, W[51] );
P( D, E, A, B, C, W[52] );
P( C, D, E, A, B, W[53] );
P( B, C, D, E, A, W[54] );
P( A, B, C, D, E, W[55] );
P( E, A, B, C, D, W[56] );
P( D, E, A, B, C, W[57] );
P( C, D, E, A, B, W[58] );
P( B, C, D, E, A, W[59] );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0xCA62C1D6
P( A, B, C, D, E, W[60] );
P( E, A, B, C, D, W[61] );
P( D, E, A, B, C, W[62] );
P( C, D, E, A, B, W[63] );
P( B, C, D, E, A, W[64] );
P( A, B, C, D, E, W[65] );
P( E, A, B, C, D, W[66] );
P( D, E, A, B, C, W[67] );
P( C, D, E, A, B, W[68] );
P( B, C, D, E, A, W[69] );
P( A, B, C, D, E, W[70] );
P( E, A, B, C, D, W[71] );
P( D, E, A, B, C, W[72] );
P( C, D, E, A, B, W[73] );
P( B, C, D, E, A, W[74] );
P( A, B, C, D, E, W[75] );
P( E, A, B, C, D, W[76] );
P( D, E, A, B, C, W[77] );
P( C, D, E, A, B, W[78] );
P( B, C, D, E, A, W[79] );
#undef K
#undef F
ctx->state[0] += A;
ctx->state[1] += B;
ctx->state[2] += C;
ctx->state[3] += D;
ctx->state[4] += E;
}
unsigned int SubWord(unsigned int w) {
unsigned int i = (sbox[(w >> 24) & 0xFF] << 24) | (sbox[(w >> 16) & 0xFF] << 16);
i |= (sbox[(w >> 8) & 0xFF] << 8) | sbox[w & 0xFF];
return i;
}
unsigned int RotWord(unsigned int w) {
unsigned char temp = (w >> 24) & 0xFF;
return ((w << 8) | temp);
}
void KeyExpansion(unsigned char* key, unsigned int* w) {
unsigned int temp;
int i = 0;
for(i = 0; i < KEY_SIZE; i++) {
w[i] = (key[4*i] << 24) | (key[4*i + 1] << 16) | (key[4*i + 2] << 8) | key[4*i + 3];
}
for(; i < BLOCK_SIZE * (NUM_ROUNDS + 1); i++) {
temp = w[i - 1];
if(i % KEY_SIZE == 0) {
temp = SubWord(RotWord(temp)) ^ Rcon[i / KEY_SIZE];
}
w[i] = w[i - KEY_SIZE] ^ temp;
}
}
extern "C"
void initialize_ipsec(void)
{
// CKJUNG, 18.10.25 [NF #2: IPSec] Setting initial_counter, key /////////////////////////
unsigned char nounce[16];
FILE* fnounce = fopen("./apps/lib/test.ctr", "rb");
fread(&nounce, 1, 16, fnounce);
fclose(fnounce);
int num_keys = BLOCK_SIZE * (NUM_ROUNDS + 1);
unsigned char key[16];
unsigned int* expanded_key = (unsigned int*)malloc(num_keys * sizeof(int));
FILE* fkey = fopen("./apps/lib/test.key", "rb");
fread(&key, 1, 16, fkey);
fclose(fkey);
KeyExpansion(key, expanded_key);
unsigned char *d_nounce;
unsigned int *d_key;
unsigned char *d_sbox;
unsigned char *d_GF2;
unsigned int *d_seq; // 20.02.02. CKJUNG
printf("____[Initialize]__NF #2__IPSec__\n");
ASSERTRT(hipMalloc((void**)&d_nounce, 16*sizeof(unsigned char)));
ASSERTRT(hipMemset(d_nounce, 0, 16*sizeof(unsigned char)));
ASSERTRT(hipMalloc((void**)&d_key, num_keys*sizeof(unsigned int)));
ASSERTRT(hipMemset(d_key, 0, num_keys*sizeof(unsigned int)));
ASSERTRT(hipMalloc((void**)&d_sbox, 256*sizeof(unsigned char)));
ASSERTRT(hipMemset(d_sbox, 0, 256*sizeof(unsigned char)));
ASSERTRT(hipMalloc((void**)&d_GF2, 256*sizeof(unsigned char)));
ASSERTRT(hipMemset(d_GF2, 0, 256*sizeof(unsigned char)));
ASSERTRT(hipMalloc((void**)&d_seq, sizeof(unsigned int)));
ASSERTRT(hipMemset(d_seq, 0, sizeof(unsigned int)));
hipError_t nounce_err = hipMemcpy(d_nounce, nounce, 16*sizeof(unsigned char), hipMemcpyHostToDevice);
hipError_t key_err = hipMemcpy(d_key, expanded_key, num_keys*sizeof(unsigned int), hipMemcpyHostToDevice);
hipError_t sbox_err = hipMemcpy(d_sbox, sbox, 256*sizeof(unsigned char), hipMemcpyHostToDevice);
hipError_t GF2_err = hipMemcpy(d_GF2, GF_2, 256*sizeof(unsigned char), hipMemcpyHostToDevice);
if(nounce_err != hipSuccess || key_err != hipSuccess || sbox_err != hipSuccess || GF2_err != hipSuccess)
{
START_RED
printf("[Error] hipMemcpy for \"nounce\" or \"key\" or \"sbox\" or \"GF2\" has failed.\n");
END
}else{
START_GRN
printf("[IPSec] Nounce, Expanded keys, SBOX, and GF2 are ready.\n");
END
}
int * head;
ASSERTRT(hipMalloc((void**)&head, sizeof(int)));
ASSERTRT(hipMemset(head, -1 , sizeof(int)));
hipStream_t cuda_stream3;
ASSERT_CUDA(hipStreamCreateWithFlags(&cuda_stream3,hipStreamNonBlocking));
printf("NF#2: IPsec\n");
/*
* ipsec for 64B pkt
* 1 pkt needs 1 GPU threads.
* 512 x 1 = 512 threads. (OK)
* 384 threads per TB; 512 = 1 * 512; each TB manages 512 pkts; 128 * 1 = 512 Desc
*/
hipLaunchKernelGGL(( ipsec), dim3(NF_TB_NUM), dim3(NF_T_NUM), 0, cuda_stream3 , pktBuf, nbBoard, statusBoard, pkt_cnt, d_nounce, d_key, d_sbox, d_GF2, head, d_seq);
START_GRN
printf("[Done]____[Initialize]__NF #2__IPSec__\n");
printf("[IPSEC] %s\n", hipGetErrorName(hipGetLastError()));
END
free(expanded_key);
// ~ CKJUNG /////////////////////////////////////////////////////////////////////////////
}
| 29a8cad0333158bf66d5fb0db41da3d38f64b13b.cu | #include "ipsec.h"
#include <stdlib.h>
#include <time.h>
#define SHA 1
#define AES_ASSIGN 1
#define BODY 1
#define EIHDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + 2)
extern int *pkt_cnt;
extern unsigned char *pktBuf;
extern int *nbBoard;
extern int *statusBoard;
__device__ void sha1_kernel_global(unsigned char *data, sha1_gpu_context *ctx, uint32_t *extended, int len)
{
#if 1
/* Initialization vector for SHA-1 */
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
ctx->state[4] = 0xC3D2E1F0;
#endif
uint32_t temp, t;
/*
* Extend 32 block byte block into 80 byte block.
*/
//sh_kim 20.03.11 : when data length is 20byte, we need padding
if(len == 20)
{
memset(data + len - 1, 0, 44);
}
GET_UINT32_BE( extended[0], data, 0 );
GET_UINT32_BE( extended[1], data, 4 );
GET_UINT32_BE( extended[2], data, 8 );
GET_UINT32_BE( extended[3], data, 12 );
GET_UINT32_BE( extended[4], data, 16 );
GET_UINT32_BE( extended[5], data, 20 );
GET_UINT32_BE( extended[6], data, 24 );
GET_UINT32_BE( extended[7], data, 28 );
GET_UINT32_BE( extended[8], data, 32 );
GET_UINT32_BE( extended[9], data, 36 );
GET_UINT32_BE( extended[10], data, 40 );
GET_UINT32_BE( extended[11], data, 44 );
GET_UINT32_BE( extended[12], data, 48 );
GET_UINT32_BE( extended[13], data, 52 );
GET_UINT32_BE( extended[14], data, 56 );
GET_UINT32_BE( extended[15], data, 60 );
// Same as "blk(i)" macro in openssl source.
for (t = 16; t < 80; t++) {
temp = extended[t - 3] ^ extended[t - 8] ^ extended[t - 14] ^ extended[t - 16];
extended[t] = S(temp,1);
}
sha1_gpu_process(ctx, extended);
}
// CKJUNG, 18.10.26 [NF#2:IPSec]-------------------------------------
__global__ void ipsec(unsigned char *pktBuf, int *nbBoard, int *statusBoard, int* pkt_cnt, unsigned char* d_nounce, unsigned int* d_key, unsigned char* d_sbox, unsigned char* d_GF2, int* head, unsigned int* seq)
{
unsigned char IV[16] = {0};
sha1_gpu_context octx;
sha1_gpu_context ictx;
uint32_t extended[80];
int ctr = 0;
unsigned int sha_count = 0;
__shared__ int nb;
unsigned char * buf = NULL;
__shared__ int curIdx;
uint8_t chapter_idx = 0;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int pktid = tid / THD_PER_PKT;
int dataid = tid % THD_PER_PKT;
if(threadIdx.x == 0){
nb = 0;
curIdx = -1;
}
__syncthreads();
while(true){ // Persistent Kernel (for every threads)
__syncthreads();
if(tid == 0 && statusBoard[chapter_idx] == -1){
if((*head) == -1 || statusBoard[(*head)] == nbBoard[(*head)] - 1){
atomicAdd(&statusBoard[(*head)], 1);
*head = chapter_idx;
chapter_idx++;
chapter_idx %= CHAPTER_NUM;
}
}
__syncthreads();
if(threadIdx.x == 0 && (*head) != curIdx){
nb = nbBoard[(*head)];
curIdx = (*head);
}
__syncthreads();
if(pktid < nb){
#if BODY
buf = &pktBuf[curIdx * PKT_BATCH_SIZE + pktid * PKT_SIZE];
sha_count = PKT_DATA_SIZE / 64 + ((PKT_DATA_SIZE % 64) != 0);
__syncthreads();
if(tid%THD_PER_PKT == 0){
buf[HEADROOM_SIZE + PKT_DATA_SIZE] = PAD_LEN; // padlen
buf[HEADROOM_SIZE + PKT_DATA_SIZE + 1] = IPPROTO_IPIP; // next-hdr (Meaning "IP within IP)
/* For Reference...
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IPIP = 4
IPPROTO_TCP = 6
IPPROTO_UDP = 17
IPPROTO_ESP = 50
*/
ctr++; // same "ctr" value for grouped 3-threads. (counter) AES-CTR Mode
IV[15] = ctr & 0xFF;
IV[14] = (ctr >> 8) & 0xFF; // CKJUNG, 1 Byte = 8bits means, Octal notation
IV[13] = (ctr >> 16) & 0xFF;
IV[12] = (ctr >> 24) & 0xFF;
for(int i = 0; i < 12; i++)
IV[i] = 0;
// Copy our state into private memory
unsigned char temp, temp2;
unsigned char overflow = 0;
char tmp[16];
for(int i = 15; i != -1; i--) {
temp = d_nounce[i];
temp2 = IV[i];
IV[i] += temp + overflow;
overflow = ((int)temp2 + (int)temp + (int)overflow > 255);
}
AddRoundKey(IV, &d_key[0]);
for(int i = 1; i < 10; i++){
SubBytes(IV, d_sbox);
ShiftRows(IV);
MixColumns(IV, d_GF2, tmp);
AddRoundKey(IV, &d_key[4 * i]);
}
SubBytes(IV, d_sbox);
ShiftRows(IV);
AddRoundKey(IV, &d_key[4 * 10]);
unsigned char temphdr[34];
memcpy(temphdr, buf + HEADROOM_SIZE, EIHDR_SIZE);
memcpy(buf + HEADROOM_SIZE - IPSECHEAD, temphdr, EIHDR_SIZE);
}
////////////////// Locating AES Encrypted parts into a pkt ///////////////////////////////
#if AES_ASSIGN
__syncthreads();
for(int i = 0; i < DATA_PER_THD; i++){
buf[HEADROOM_SIZE + sizeof(struct ethhdr) + dataid*DATA_PER_THD + i] ^= IV[i % 16];
}
__syncthreads();
#endif
if(tid%THD_PER_PKT == 0){
//////////// Proto_type = ESP set! ///////////
buf[HEADROOM_SIZE + 6] = IPPROTO_ESP; // IPPROTO_ESP = 50
//buf[HEADROOM_SIZE - IPSECHEAD + sizeof(struct ethhdr) + sizeof(struct iphdr)] = IPPROTO_ESP; // IPPROTO_ESP = 50
struct esphdr* esph;
esph = (struct esphdr *)((uint32_t *)&buf[HEADROOM_SIZE + 6]);
// SPI (Security Parameter Index)
uint32_t spi = 1085899777;
HTONS32(spi);
////////// Set ESP header SPI value ///////////////////
memcpy(&esph->spi, &spi, 4);
atomicAdd(seq, 1);
//////////// Set ESP header SEQ value //////////
memcpy(&esph->seq, seq, 4);
#if SHA
// CKJUNG, HMAC-SHA1 From here! /////////////////////////////
// RFC 2104, H(K XOR opad, H(K XOR ipad, text))
/**** Inner Digest ****/
// H(K XOR ipad, text) : 64 Bytes
int e_index = 0;
while(e_index < sha_count){
sha1_kernel_global(&buf[HEADROOM_SIZE + 6 + e_index*64], &ictx, extended, 64);
e_index++;
}
/**** Outer Digest ****/
// H(K XOR opad, H(K XOR ipad, text)) : 20 Bytes
sha1_kernel_global(&(ictx.c_state[0]), &octx, extended, 20);
memcpy(&buf[HEADROOM_SIZE + PKT_DATA_SIZE + 2], &(octx.c_state[0]), 20);
#endif
#endif
atomicAdd(&statusBoard[curIdx], 1);
atomicAdd(pkt_cnt, 1);
}
if(threadIdx.x == 0){
nb = 0;
}
}
}
}
__device__ void AddRoundKey(unsigned char *state, unsigned *w)
{
int i;
for(i = 0; i < BLOCK_SIZE; i++) { // column
state[i * 4 + 0] = state[i * 4 + 0] ^ ((w[i] >> (8 * 3)) & 0xFF);
state[i * 4 + 1] = state[i * 4 + 1] ^ ((w[i] >> (8 * 2)) & 0xFF);
state[i * 4 + 2] = state[i * 4 + 2] ^ ((w[i] >> (8 * 1)) & 0xFF);
state[i * 4 + 3] = state[i * 4 + 3] ^ ((w[i] >> (8 * 0)) & 0xFF);
}
}
__device__ void SubBytes(unsigned char *state, unsigned char* sbox) //state = 16 chars
{
int i;
for(i = 0; i < 4 * BLOCK_SIZE; i++) {
state[i] = sbox[state[i]];
}
}
__device__ void ShiftRows(unsigned char *state)
{
// NOTE: For whatever reason the standard uses column-major ordering ?
// 0 1 2 3 --> 0 1 2 3 | 0 4 8 12 --> 0 4 8 12
// 0 1 2 3 --> 1 2 3 0 | 1 5 9 13 --> 5 9 13 1
// 0 1 2 3 --> 2 3 0 1 | 2 6 10 14 --> 10 14 2 6
// 0 1 2 3 --> 3 0 1 2 | 3 7 11 15 --> 15 3 7 11
unsigned char temp = state[1];
state[1] = state[5];
state[5] = state[9];
state[9] = state[13];
state[13] = temp;
temp = state[2];
state[2] = state[10];
state[10] = temp;
temp = state[6];
state[6] = state[14];
state[14] = temp;
temp = state[3];
state[3] = state[15];
state[15] = state[11];
state[11] = state[7];
state[7] = temp;
}
// See "Efficient Software Implementation of AES on 32-bit platforms"
__device__ void MixColumns(unsigned char *state, unsigned char* GF_2, char* s)
{
//[TODO] malloc!!!!!! is the criminal!!! CKJUNG, 18.10.26
memcpy(s, state, 4 * BLOCK_SIZE);
int i;
#if 1
for(i = 0; i < BLOCK_SIZE; i++) { // column
unsigned char * x = (unsigned char*)&s[i*4];
unsigned char * y = (unsigned char*)&state[i*4];
y[0] = x[1] ^ x[2] ^ x[3];
y[1] = x[0] ^ x[2] ^ x[3];
y[2] = x[0] ^ x[1] ^ x[3];
y[3] = x[0] ^ x[1] ^ x[2];
x[0] = GF_2[x[0]];
x[1] = GF_2[x[1]];
x[2] = GF_2[x[2]];
x[3] = GF_2[x[3]];
y[0] ^= x[0] ^ x[1];
y[1] ^= x[1] ^ x[2];
y[2] ^= x[2] ^ x[3];
y[3] ^= x[3] ^ x[0];
}
#endif
}
/**
* Initialize new context
*
* @param context SHA1-Context
*/
/*
* Process extended block.
*/
__device__ void sha1_gpu_process (sha1_gpu_context *ctx, uint32_t W[80])
{
uint32_t A, B, C, D, E;
A = ctx->state[0];
B = ctx->state[1];
C = ctx->state[2];
D = ctx->state[3];
E = ctx->state[4];
#define P(a,b,c,d,e,x)\
{\
e += S(a,5) + F(b,c,d) + K + x; b = S(b,30);\
}
#define F(x,y,z) (z ^ (x & (y ^ z)))
#define K 0x5A827999
P( A, B, C, D, E, W[0] );
P( E, A, B, C, D, W[1] );
P( D, E, A, B, C, W[2] );
P( C, D, E, A, B, W[3] );
P( B, C, D, E, A, W[4] );
P( A, B, C, D, E, W[5] );
P( E, A, B, C, D, W[6] );
P( D, E, A, B, C, W[7] );
P( C, D, E, A, B, W[8] );
P( B, C, D, E, A, W[9] );
P( A, B, C, D, E, W[10] );
P( E, A, B, C, D, W[11] );
P( D, E, A, B, C, W[12] );
P( C, D, E, A, B, W[13] );
P( B, C, D, E, A, W[14] );
P( A, B, C, D, E, W[15] );
P( E, A, B, C, D, W[16] );
P( D, E, A, B, C, W[17] );
P( C, D, E, A, B, W[18] );
P( B, C, D, E, A, W[19] );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0x6ED9EBA1
P( A, B, C, D, E, W[20] );
P( E, A, B, C, D, W[21] );
P( D, E, A, B, C, W[22] );
P( C, D, E, A, B, W[23] );
P( B, C, D, E, A, W[24] );
P( A, B, C, D, E, W[25] ); // w[25] is the problem.
P( E, A, B, C, D, W[26] );
P( D, E, A, B, C, W[27] );
P( C, D, E, A, B, W[28] );
P( B, C, D, E, A, W[29] );
P( A, B, C, D, E, W[30] );
P( E, A, B, C, D, W[31] );
P( D, E, A, B, C, W[32] );
P( C, D, E, A, B, W[33] );
P( B, C, D, E, A, W[34] );
P( A, B, C, D, E, W[35] );
P( E, A, B, C, D, W[36] );
P( D, E, A, B, C, W[37] );
P( C, D, E, A, B, W[38] );
P( B, C, D, E, A, W[39] );
#undef K
#undef F
#define F(x,y,z) ((x & y) | (z & (x | y)))
#define K 0x8F1BBCDC
P( A, B, C, D, E, W[40] );
P( E, A, B, C, D, W[41] );
P( D, E, A, B, C, W[42] );
P( C, D, E, A, B, W[43] );
P( B, C, D, E, A, W[44] );
P( A, B, C, D, E, W[45] );
P( E, A, B, C, D, W[46] );
P( D, E, A, B, C, W[47] );
P( C, D, E, A, B, W[48] );
P( B, C, D, E, A, W[49] );
P( A, B, C, D, E, W[50] );
P( E, A, B, C, D, W[51] );
P( D, E, A, B, C, W[52] );
P( C, D, E, A, B, W[53] );
P( B, C, D, E, A, W[54] );
P( A, B, C, D, E, W[55] );
P( E, A, B, C, D, W[56] );
P( D, E, A, B, C, W[57] );
P( C, D, E, A, B, W[58] );
P( B, C, D, E, A, W[59] );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0xCA62C1D6
P( A, B, C, D, E, W[60] );
P( E, A, B, C, D, W[61] );
P( D, E, A, B, C, W[62] );
P( C, D, E, A, B, W[63] );
P( B, C, D, E, A, W[64] );
P( A, B, C, D, E, W[65] );
P( E, A, B, C, D, W[66] );
P( D, E, A, B, C, W[67] );
P( C, D, E, A, B, W[68] );
P( B, C, D, E, A, W[69] );
P( A, B, C, D, E, W[70] );
P( E, A, B, C, D, W[71] );
P( D, E, A, B, C, W[72] );
P( C, D, E, A, B, W[73] );
P( B, C, D, E, A, W[74] );
P( A, B, C, D, E, W[75] );
P( E, A, B, C, D, W[76] );
P( D, E, A, B, C, W[77] );
P( C, D, E, A, B, W[78] );
P( B, C, D, E, A, W[79] );
#undef K
#undef F
ctx->state[0] += A;
ctx->state[1] += B;
ctx->state[2] += C;
ctx->state[3] += D;
ctx->state[4] += E;
}
unsigned int SubWord(unsigned int w) {
unsigned int i = (sbox[(w >> 24) & 0xFF] << 24) | (sbox[(w >> 16) & 0xFF] << 16);
i |= (sbox[(w >> 8) & 0xFF] << 8) | sbox[w & 0xFF];
return i;
}
unsigned int RotWord(unsigned int w) {
unsigned char temp = (w >> 24) & 0xFF;
return ((w << 8) | temp);
}
void KeyExpansion(unsigned char* key, unsigned int* w) {
unsigned int temp;
int i = 0;
for(i = 0; i < KEY_SIZE; i++) {
w[i] = (key[4*i] << 24) | (key[4*i + 1] << 16) | (key[4*i + 2] << 8) | key[4*i + 3];
}
for(; i < BLOCK_SIZE * (NUM_ROUNDS + 1); i++) {
temp = w[i - 1];
if(i % KEY_SIZE == 0) {
temp = SubWord(RotWord(temp)) ^ Rcon[i / KEY_SIZE];
}
w[i] = w[i - KEY_SIZE] ^ temp;
}
}
extern "C"
void initialize_ipsec(void)
{
// CKJUNG, 18.10.25 [NF #2: IPSec] Setting initial_counter, key /////////////////////////
unsigned char nounce[16];
FILE* fnounce = fopen("./apps/lib/test.ctr", "rb");
fread(&nounce, 1, 16, fnounce);
fclose(fnounce);
int num_keys = BLOCK_SIZE * (NUM_ROUNDS + 1);
unsigned char key[16];
unsigned int* expanded_key = (unsigned int*)malloc(num_keys * sizeof(int));
FILE* fkey = fopen("./apps/lib/test.key", "rb");
fread(&key, 1, 16, fkey);
fclose(fkey);
KeyExpansion(key, expanded_key);
unsigned char *d_nounce;
unsigned int *d_key;
unsigned char *d_sbox;
unsigned char *d_GF2;
unsigned int *d_seq; // 20.02.02. CKJUNG
printf("____[Initialize]__NF #2__IPSec__\n");
ASSERTRT(cudaMalloc((void**)&d_nounce, 16*sizeof(unsigned char)));
ASSERTRT(cudaMemset(d_nounce, 0, 16*sizeof(unsigned char)));
ASSERTRT(cudaMalloc((void**)&d_key, num_keys*sizeof(unsigned int)));
ASSERTRT(cudaMemset(d_key, 0, num_keys*sizeof(unsigned int)));
ASSERTRT(cudaMalloc((void**)&d_sbox, 256*sizeof(unsigned char)));
ASSERTRT(cudaMemset(d_sbox, 0, 256*sizeof(unsigned char)));
ASSERTRT(cudaMalloc((void**)&d_GF2, 256*sizeof(unsigned char)));
ASSERTRT(cudaMemset(d_GF2, 0, 256*sizeof(unsigned char)));
ASSERTRT(cudaMalloc((void**)&d_seq, sizeof(unsigned int)));
ASSERTRT(cudaMemset(d_seq, 0, sizeof(unsigned int)));
cudaError_t nounce_err = cudaMemcpy(d_nounce, nounce, 16*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaError_t key_err = cudaMemcpy(d_key, expanded_key, num_keys*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaError_t sbox_err = cudaMemcpy(d_sbox, sbox, 256*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaError_t GF2_err = cudaMemcpy(d_GF2, GF_2, 256*sizeof(unsigned char), cudaMemcpyHostToDevice);
if(nounce_err != cudaSuccess || key_err != cudaSuccess || sbox_err != cudaSuccess || GF2_err != cudaSuccess)
{
START_RED
printf("[Error] cudaMemcpy for \"nounce\" or \"key\" or \"sbox\" or \"GF2\" has failed.\n");
END
}else{
START_GRN
printf("[IPSec] Nounce, Expanded keys, SBOX, and GF2 are ready.\n");
END
}
int * head;
ASSERTRT(cudaMalloc((void**)&head, sizeof(int)));
ASSERTRT(cudaMemset(head, -1 , sizeof(int)));
cudaStream_t cuda_stream3;
ASSERT_CUDA(cudaStreamCreateWithFlags(&cuda_stream3,cudaStreamNonBlocking));
printf("NF#2: IPsec\n");
/*
* ipsec for 64B pkt
* 1 pkt needs 1 GPU threads.
* 512 x 1 = 512 threads. (OK)
* 384 threads per TB; 512 = 1 * 512; each TB manages 512 pkts; 128 * 1 = 512 Desc
*/
ipsec<<< NF_TB_NUM, NF_T_NUM, 0, cuda_stream3 >>> (pktBuf, nbBoard, statusBoard, pkt_cnt, d_nounce, d_key, d_sbox, d_GF2, head, d_seq);
START_GRN
printf("[Done]____[Initialize]__NF #2__IPSec__\n");
printf("[IPSEC] %s\n", cudaGetErrorName(cudaGetLastError()));
END
free(expanded_key);
// ~ CKJUNG /////////////////////////////////////////////////////////////////////////////
}
|
2eab53e168e8e427abadad4808fde4f308e81419.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
//M and N number of threads (grid and block)
__global__ void multiply( const int a[] ,const int b[], int c[] , const int sqrt_dim,const int thread_number)
{
int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x;
//for an element in matrix[i][j] , its coordinate k in array[] is i+j*sqrt(size_array)
int index_i = index < sqrt_dim ? index : (int)index%sqrt_dim;
int index_j = (index-index_i)/sqrt_dim;
int dim=sqrt_dim*sqrt_dim;
if(index<dim){
c[index]=0;
if(dim<=thread_number){ //if more threads than array size
printf("Thread %i; Modifying value of index %i\n ", index, index);
for(int i=0; i<sqrt_dim;i++){ //row of first matrix
c[index]+=a[i+index_j * sqrt_dim ]*b[index_i+ i* sqrt_dim];
//printf("c[%i]+=a[%i]*b[%i]\n", index,i+index_j * sqrt_dim ,index_i+ i* sqrt_dim );
}
}
else{ //if less threads than array size
if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries
for(int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){
printf("Thread %i; Modifying value of index %i \n", index, i);
index_i = (int)i%sqrt_dim;
index_j = (i-index_i)/sqrt_dim;
for(int j=0; j<sqrt_dim;j++){ //row of first matrix
c[i]+=a[j+index_j * sqrt_dim ]*b[index_i+ j* sqrt_dim];
}
}
}
else{ //if last thread deal with all remaining array entries
for(int i=index*(int)(dim/thread_number); i< dim; i++){
printf("Thread %i; Modifying value of index %i\n",index, i );
index_i = (int)i%sqrt_dim;
index_j = (i-index_i)/sqrt_dim;
for(int j=0;j<sqrt_dim;j++){ //row of first matrix
c[i]+=a[j+index_j * sqrt_dim ]*b[index_i+ j* sqrt_dim];
}
}
}
}
}
}
int main(int argc, char *argv[]){
//Measure time
clock_t time_begin;
time_begin=clock();
// pointers to host & device arrays
int *d_array1 = 0,*d_array2 = 0,*d_array3 = 0;
int *h_array1 = 0,*h_array2 = 0,*h_array3 = 0;
int size_array=9; //here, size_array =L hqs to be a square
int N=3;
if(argc == 3){
size_array=atoi(argv[1]) * atoi(argv[1]) ;
N=atoi(argv[2]);
}
// malloc columns of host arrays
h_array1 = (int*)malloc( size_array * sizeof(int));
h_array2 = (int*)malloc( size_array * sizeof(int));
h_array3 = (int*)malloc( size_array * sizeof(int));
for(int i=0; i<size_array; i++){
h_array1[i]=1;//rand()%10;
h_array2[i]=1;//rand()%10;
// printf("%i|%i\t", h_array1[i], h_array2[i]);
//if((i+1)%(int)sqrt((float)size_array)==0)
//printf("\n");
}
//printf("\n");
// hipMalloc a device array
hipMalloc(&d_array1,size_array * sizeof(int));
hipMalloc(&d_array2,size_array * sizeof(int));
hipMalloc(&d_array3,size_array * sizeof(int));
// download and inspect the result on the host:
hipMemcpy(d_array1, h_array1, sizeof(int)*size_array, hipMemcpyHostToDevice);
hipMemcpy(d_array2, h_array2, sizeof(int)*size_array, hipMemcpyHostToDevice);
dim3 bloque(N,N); //Bloque bidimensional de N*N hilos
dim3 grid(1,1); //Grid bidimensional de M*M bloques
int thread_number= N*N;
hipLaunchKernelGGL(( multiply), dim3(grid), dim3(bloque), 0, 0, d_array1, d_array2 , d_array3,sqrt((float)size_array), thread_number);
hipDeviceSynchronize();
// download and inspect the result on the host:
hipMemcpy(h_array3, d_array3, sizeof(int)*size_array, hipMemcpyDeviceToHost);
for(int i=0; i<size_array; i++){
printf("%i\t", h_array3[i]);
if((i+1)%(int)(sqrt((float)size_array))==0)
printf("\n");
}
printf("\n");
// deallocate memory
free(h_array3); free(h_array2); free(h_array1);
hipFree(d_array3);hipFree(d_array2);hipFree(d_array1);
printf("Time elapsed: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.18s
}
| 2eab53e168e8e427abadad4808fde4f308e81419.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
//M and N number of threads (grid and block)
__global__ void multiply( const int a[] ,const int b[], int c[] , const int sqrt_dim,const int thread_number)
{
int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x;
//for an element in matrix[i][j] , its coordinate k in array[] is i+j*sqrt(size_array)
int index_i = index < sqrt_dim ? index : (int)index%sqrt_dim;
int index_j = (index-index_i)/sqrt_dim;
int dim=sqrt_dim*sqrt_dim;
if(index<dim){
c[index]=0;
if(dim<=thread_number){ //if more threads than array size
printf("Thread %i; Modifying value of index %i\n ", index, index);
for(int i=0; i<sqrt_dim;i++){ //row of first matrix
c[index]+=a[i+index_j * sqrt_dim ]*b[index_i+ i* sqrt_dim];
//printf("c[%i]+=a[%i]*b[%i]\n", index,i+index_j * sqrt_dim ,index_i+ i* sqrt_dim );
}
}
else{ //if less threads than array size
if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries
for(int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){
printf("Thread %i; Modifying value of index %i \n", index, i);
index_i = (int)i%sqrt_dim;
index_j = (i-index_i)/sqrt_dim;
for(int j=0; j<sqrt_dim;j++){ //row of first matrix
c[i]+=a[j+index_j * sqrt_dim ]*b[index_i+ j* sqrt_dim];
}
}
}
else{ //if last thread deal with all remaining array entries
for(int i=index*(int)(dim/thread_number); i< dim; i++){
printf("Thread %i; Modifying value of index %i\n",index, i );
index_i = (int)i%sqrt_dim;
index_j = (i-index_i)/sqrt_dim;
for(int j=0;j<sqrt_dim;j++){ //row of first matrix
c[i]+=a[j+index_j * sqrt_dim ]*b[index_i+ j* sqrt_dim];
}
}
}
}
}
}
int main(int argc, char *argv[]){
//Measure time
clock_t time_begin;
time_begin=clock();
// pointers to host & device arrays
int *d_array1 = 0,*d_array2 = 0,*d_array3 = 0;
int *h_array1 = 0,*h_array2 = 0,*h_array3 = 0;
int size_array=9; //here, size_array =L hqs to be a square
int N=3;
if(argc == 3){
size_array=atoi(argv[1]) * atoi(argv[1]) ;
N=atoi(argv[2]);
}
// malloc columns of host arrays
h_array1 = (int*)malloc( size_array * sizeof(int));
h_array2 = (int*)malloc( size_array * sizeof(int));
h_array3 = (int*)malloc( size_array * sizeof(int));
for(int i=0; i<size_array; i++){
h_array1[i]=1;//rand()%10;
h_array2[i]=1;//rand()%10;
// printf("%i|%i\t", h_array1[i], h_array2[i]);
//if((i+1)%(int)sqrt((float)size_array)==0)
//printf("\n");
}
//printf("\n");
// cudaMalloc a device array
cudaMalloc(&d_array1,size_array * sizeof(int));
cudaMalloc(&d_array2,size_array * sizeof(int));
cudaMalloc(&d_array3,size_array * sizeof(int));
// download and inspect the result on the host:
cudaMemcpy(d_array1, h_array1, sizeof(int)*size_array, cudaMemcpyHostToDevice);
cudaMemcpy(d_array2, h_array2, sizeof(int)*size_array, cudaMemcpyHostToDevice);
dim3 bloque(N,N); //Bloque bidimensional de N*N hilos
dim3 grid(1,1); //Grid bidimensional de M*M bloques
int thread_number= N*N;
multiply<<<grid, bloque>>>(d_array1, d_array2 , d_array3,sqrt((float)size_array), thread_number);
cudaThreadSynchronize();
// download and inspect the result on the host:
cudaMemcpy(h_array3, d_array3, sizeof(int)*size_array, cudaMemcpyDeviceToHost);
for(int i=0; i<size_array; i++){
printf("%i\t", h_array3[i]);
if((i+1)%(int)(sqrt((float)size_array))==0)
printf("\n");
}
printf("\n");
// deallocate memory
free(h_array3); free(h_array2); free(h_array1);
cudaFree(d_array3);cudaFree(d_array2);cudaFree(d_array1);
printf("Time elapsed: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.18s
}
|
b49238244627f6580d4e7ee188802bae92fd3376.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
using namespace std;
__global__ void task1(){
printf("Hello World! I am thread %d.\n",threadIdx.x);
}
int main(){
hipLaunchKernelGGL(( task1), dim3(1),dim3(4), 0, 0, );
hipDeviceSynchronize();
return 0;
}
| b49238244627f6580d4e7ee188802bae92fd3376.cu | #include <iostream>
using namespace std;
__global__ void task1(){
printf("Hello World! I am thread %d.\n",threadIdx.x);
}
int main(){
task1<<<1,4>>>();
cudaDeviceSynchronize();
return 0;
}
|
8216dfbe1d2b69e4358814c28aac5d1b810b6a8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "src/cuda_interface.h"
#include "src/laplaceSolver.h"
#include "src/ellipticalGrid.h"
#include "engine/hcImage.h"
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
//
// CUDA Kernels
//
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
/**********************************************************************************************************************
* compute lower boundary
*
* this is correct for the general curvilinear case and the spheric case
**********************************************************************************************************************/
__global__ void
__launch_bounds__(1024, 1)
kernel_computeLowerBoundary(LaplaceSolver &solver)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
SphericalGrid &gr = *solver.grid;
uint numR = gr.numR;
uint numTheta = gr.numTheta;
uint numPhi = gr.numPhi;
// compute position in grid
uint k = idx % numPhi;
uint j = (idx - k) / numPhi;
uint i = 0;
uint ind = k * numR * numTheta + j * numR + i;
uint texInd = (numTheta - 1 - j) * numPhi + k;
if(texInd < numTheta * numPhi)
{
Vec3D pos = gr.pos[ind];
Vec3D pos_p = gr.pos[ind+1];
Vec3D pos_pp = gr.pos[ind+2];
hcFloat psi_p = gr.psi[ind+1];
hcFloat psi_pp = gr.psi[ind+2];
hcFloat B_l = solver.d_image[texInd];
hcFloat B_r = B_l / __sinf(pos[1]);
hcFloat dp = pos_p[0] - pos[0];
hcFloat dpp = pos_pp[0] - pos[0];
#ifdef RSCALE
dp /= r_sol;
dpp /= r_sol;
#endif
hcFloat a = 1 / (dpp*dpp - dp*dp);
hcFloat psi = a * (B_r * (dp*dpp*dpp - dpp*dp*dp) + psi_p*dpp*dpp - psi_pp*dp*dp);
gr.setPsi(ind, psi);
}
}
/**********************************************************************************************************************
* iterate spheric grid
*
* this version is specifically taylored to the spheric case. It is faster than the general purpose curvilinear
* version.
**********************************************************************************************************************/
__global__ void
__launch_bounds__(1024, 1)
kernel_iterate_spheric(LaplaceSolver &solver, uint prevBlocks)
{
const hcFloat PI = 3.1415926535897;
uint idx = (blockIdx.x + prevBlocks) * blockDim.x + threadIdx.x;
SphericalGrid &gr = *solver.grid;
uint numR = gr.numR;
uint numT = gr.numTheta;
uint numP = gr.numPhi;
Vec3D *pos = gr.getPosArray();
hcFloat *psi = gr.getPsiArray();
// compute position in grid
uint i = idx % numR;
uint j = ((idx - i) / numR) % numT;
uint k = ((idx-i - j * numR) / numR) / numT;
uint ind = k * numR * numT + j * numR + i;
if(idx < numT * numP * numR && i > 0)
{
if(i==numR-1)
{
gr.setTemp(ind, 0.0);
gr.setRelError(ind, 0.0);
}
else
{
uint ind_r_m = i==0 ? gr.getIndex(i,j,k) : gr.getIndex(i-1,j,k);
uint ind_r_p = i==numR-1 ? gr.getIndex(i,j,k) : gr.getIndex(i+1,j,k);
uint ind_t_m = j==0 ? 0 : gr.getIndex(i,j-1,k);
uint ind_t_p = j==numT-1 ? 0 : gr.getIndex(i,j+1,k);
uint ind_p_m = k==0 ? gr.getIndex(i,j,numP-1) : gr.getIndex(i,j,k-1);
uint ind_p_p = k==numP-1 ? gr.getIndex(i,j,0) : gr.getIndex(i,j,k+1);
hcFloat dr_p = pos[ind_r_p][0] - pos[ind][0];
hcFloat dr_m = pos[ind][0] - pos[ind_r_m][0];
hcFloat dt_p = (j==numT-1 ? PI - pos[ind][1] : pos[ind_t_p][1] - pos[ind][1]);
hcFloat dt_m = (j==0 ? pos[ind][1] : pos[ind][1] - pos[ind_t_m][1]);
hcFloat dp_p = (k==numP-1 ? pos[ind_p_p][2] + 2*PI - pos[ind][2] : pos[ind_p_p][2] - pos[ind][2]);
hcFloat dp_m = (k==0 ? 2*PI - pos[ind_p_m][2] + pos[ind][2] : pos[ind][2] - pos[ind_p_m][2]);
hcFloat r = pos[ind][0];
hcFloat theta = pos[ind][1];
hcFloat sin2 = __sinf(theta) * __sinf(theta);
#ifdef RSCALE
dr_p /= r_sol;
dr_m /= r_sol;
r /= r_sol;
#endif
hcFloat psi_pole= 0.0;
for(uint l=0; l<numP; ++l)
{
uint ind_pole = gr.getIndex(i,j,l);
psi_pole += psi[ind_pole];
}
psi_pole /= numP;
hcFloat psi_ipjk = psi[ind_r_p];
hcFloat psi_imjk = psi[ind_r_m];
hcFloat psi_ijpk = ( j==numT-1 ? psi_pole : psi[ind_t_p]);
hcFloat psi_ijmk = ( j==0 ? psi_pole : psi[ind_t_m]);
hcFloat psi_ijkp = psi[ind_p_p];
hcFloat psi_ijkm = psi[ind_p_m];
/*-----
//----- first version, working but not as accurate as below
//-----
hcFloat h_r = dr_p + dr_m;
hcFloat h_t = dt_p + dt_m;
hcFloat h_p = dp_p + dp_m;
hcFloat A_r = 1 / (dr_p * dr_m * h_r);
hcFloat A_theta = 1 / (dt_p * dt_m * h_t);
hcFloat A_phi = 1 / (dp_p * dp_m * h_p);
hcFloat d = 2*A_r / r * (dr_m*dr_m - dr_p*dr_p)
+ 2*A_r * (dr_m + dr_p)
+ A_theta / (r*r) * cos(theta)/sin(theta) * (dt_m*dt_m - dt_p*dt_p)
+ 2*A_theta / (r*r) * (dt_m + dt_p)
+ 2*A_phi / (r*r*sin2)* (dp_m + dp_p);
hcFloat rhs = 2 * A_r / r * (dr_m*dr_m*psi_ipjk - dr_p*dr_p*psi_imjk)
+ 2*A_r * (dr_m * psi_ipjk + dr_p * psi_imjk)
+ A_theta / (r*r) * cos(theta) / sin(theta) * (dt_m*dt_m * psi_ijpk - dt_p*dt_p * psi_ijmk)
+ 2*A_theta / (r*r) * (dt_m * psi_ijpk + dt_p * psi_ijmk)
+ 2*A_phi / (r*r*sin2)* (dp_m * psi_ijkp + dp_p * psi_ijkm);
//-----
////-- higher accuracy here (but numerically unstable due to two "interlocked" solutions)
//-----
hcFloat f_r = - dr_m*dr_pp / (dr_p*dr_p * (dr_pp + dr_p))
+ dr_m*dr_m / (dr_p*dr_p * h_r)
- 2 / h_r
+ dr_p*dr_p / (dr_m*dr_m * h_r)
- dr_p*dr_mm / (dr_m*dr_m * (dr_mm + dr_m));
hcFloat f_t = - dt_m*dt_pp / (dt_p*dt_p * (dt_pp + dt_p))
+ dt_m*dt_m / (dt_p*dt_p * h_t)
- 2 / h_t
+ dt_p*dt_p / (dt_m*dt_m * h_t)
- dt_p*dt_mm / (dt_m*dt_m * (dt_mm + dt_m));
hcFloat f_p = - dp_m*dp_pp / (dp_p*dp_p * (dp_pp + dp_p))
+ dp_m*dp_m / (dp_p*dp_p * h_p)
- 2 / h_p
+ dp_p*dp_p / (dp_m*dp_m * h_p)
- dp_p*dp_mm / (dp_m*dp_m * (dp_mm + dp_m));
hcFloat d = 2 / (r*h_r) * (dr_p/dr_m - dr_m/dr_p)
+ cos(theta)/ (r*r*sin(theta)*h_t) * (dt_p/dt_m - dt_m/dt_p)
+ f_r / h_r
+ f_t / (r*r*h_t)
+ f_p / (r*r*sin2*h_p);
d *= -1;
hcFloat rhs = 2 / (r*h_r) * ( psi_ipjk * dr_m/dr_p - psi_imjk * dr_p/dr_m)
+ 1 / h_r * ( (psi_ippjk * dr_m/dr_pp + psi_ipjk * (dr_m*dr_pp/(dr_p*dr_p) - dr_m/dr_pp)) / (dr_pp + dr_p)
+ (psi_immjk * dr_p/dr_mm + psi_imjk * (dr_p*dr_mm/(dr_m*dr_m) - dr_p/dr_mm)) / (dr_mm + dr_m)
- (psi_ipjk * (dr_m*dr_m/(dr_p*dr_p) - 1) + psi_imjk * (dr_p*dr_p/(dr_m*dr_m) - 1)) / h_r)
+ 1 / (r*r*h_t) * ( cos(theta)/sin(theta) * (dt_m/dt_p * psi_ijpk - dt_p/dt_m * psi_ijmk)
+ (psi_ijppk * dt_m/dt_pp + psi_ijpk * (dt_m*dt_pp/(dt_p*dt_p) - dt_m/dt_pp)) / (dt_pp + dt_p)
+ (psi_ijmmk * dt_p/dt_mm + psi_ijmk * (dt_p*dt_mm/(dt_m*dt_m) - dt_p/dt_mm)) / (dt_mm + dt_m)
- (psi_ijpk * (dt_m*dt_m/(dt_p*dt_p) - 1) + psi_ijmk * (dt_p*dt_p/(dt_m*dt_m) - 1)) / h_t)
+ 1 / (r*r*sin2*h_p)* ( (psi_ijkpp * dp_m/dp_pp + psi_ijkp * (dp_m*dp_pp/(dp_p*dp_p) - dp_m/dp_pp)) / (dp_pp + dp_p)
+ (psi_ijkmm * dp_p/dp_mm + psi_ijkm * (dp_p*dp_mm/(dp_m*dp_m) - dp_p/dp_mm)) / (dp_mm + dp_m)
- (psi_ijkp * (dp_m*dp_m/(dp_p*dp_p) - 1) + psi_ijkm * (dp_p*dp_p/(dp_m*dp_m) - 1)) / h_p);
//-----
/*////-- higher accuracy here
//-----
hcFloat ar = 1 / (dr_p*dr_m*dr_m + dr_p*dr_p*dr_m);
hcFloat at = 1 / (dt_p*dt_m*dt_m + dt_p*dt_p*dt_m);
hcFloat ap = 1 / (dp_p*dp_m*dp_m + dp_p*dp_p*dp_m);
hcFloat fr = 2 * ar * (dr_p*dr_p - dr_m*dr_m) / r - 2 * ar * (dr_p + dr_m);
hcFloat ft = __cosf(theta) * at * (dt_p*dt_p - dt_m*dt_m) / (__sinf(theta) * r*r) - 2 * at * (dt_p + dt_m) / (r*r);
hcFloat fp = - 2 * ap * (dp_p + dp_m) / (r*r*sin2);
hcFloat d = -(fr + ft + fp);
hcFloat rhs = 2 * ar * (psi_imjk * dr_p * (1 - dr_p / r) + psi_ipjk * dr_m * (1 + dr_m / r) )
+ at/(r*r) * (psi_ijmk * dt_p * (2 - __cosf(theta)*dt_p / __sinf(theta)) + psi_ijpk * dt_m * (2 + __cosf(theta)*dt_m / __sinf(theta)) )
+ 2*ap/(r*r*sin2) * (psi_ijkm * dp_p + psi_ijkp * dp_m );//*/
//*/
gr.setTemp(ind, rhs / d);
gr.setRelError(ind, fabsf((gr.getTemp(ind) - gr.getPsi(ind)) / gr.getTemp(ind)));
}
}
}
__global__ void
__launch_bounds__(1024, 1)
kernel_setPsi(LaplaceSolver &solver, uint prevBlocks)
{
uint idx = (blockIdx.x + prevBlocks) * blockDim.x + threadIdx.x;
uint numR = solver.grid->numR;
uint numTheta = solver.grid->numTheta;
uint numPhi = solver.grid->numPhi;
// compute position in grid
uint i = idx%numR;
uint j = ((idx-i)/numR)%numTheta;
uint k = ((idx-i - j * numR) / numR) / numTheta;
if(idx<numTheta*numPhi*numR && i<numR-1)
{
uint ind = solver.grid->getIndex(i,j,k);
hcFloat tmp = solver.grid->getTemp(ind);
solver.grid->setPsi(ind, tmp);
}
}
/**********************************************************************************************************************
* iterate elliptic
**********************************************************************************************************************/
__global__ void
__launch_bounds__(1024, 1)
kernel_iterate_elliptic(LaplaceSolver &solver, uint prevBlocks)
{
uint idx = (blockIdx.x + prevBlocks) * blockDim.x + threadIdx.x;
SphericalGrid &gr = *solver.grid;
uint numR = gr.numR;
uint numT = gr.numTheta;
uint numP = gr.numPhi;
Vec3D *pos = gr.getPosArray();
hcFloat *psi = gr.getPsiArray();
// compute position in grid
uint i = idx % numR;
uint j = ((idx - i) / numR) % numT;
uint k = ((idx-i - j * numR) / numR) / numT;
uint ind = k * numR * numT + j * numR + i;
if(idx < numT * numP * numR && i > 0)
{
if(i==numR-1)
{
gr.setTemp(ind, 0.0);
gr.setRelError(ind, 0.0);
}
else
{
uint km = k==0 ? numP-1: k-1;
uint kp = k==numP-1 ? 0 : k+1;
uint ind_imjk = gr.getIndex(i-1, j, k );
uint ind_ipjk = gr.getIndex(i+1, j, k );
uint ind_ijmk = j==0 ? 0 : gr.getIndex(i, j-1, k );
uint ind_ijpk = j==numT-1 ? 0 : gr.getIndex(i, j+1, k );
uint ind_ijkm = gr.getIndex(i, j, km);
uint ind_ijkp = gr.getIndex(i, j, kp);
uint ind_imjmk = j==0 ? 0 : gr.getIndex(i-1, j-1, k );
uint ind_imjpk = j==numT-1 ? 0 : gr.getIndex(i-1, j+1, k );
uint ind_ipjmk = j==0 ? 0 : gr.getIndex(i+1, j-1, k );
uint ind_ipjpk = j==numT-1 ? 0 : gr.getIndex(i+1, j+1, k );
uint ind_imjkm = gr.getIndex(i-1, j, km);
uint ind_imjkp = gr.getIndex(i-1, j, kp);
uint ind_ipjkm = gr.getIndex(i+1, j, km);
uint ind_ipjkp = gr.getIndex(i+1, j, kp);
uint ind_ijmkm = j==0 ? 0 : gr.getIndex(i, j-1, km);
uint ind_ijmkp = j==0 ? 0 : gr.getIndex(i, j-1, kp);
uint ind_ijpkm = j==numT-1 ? 0 : gr.getIndex(i, j+1, km);
uint ind_ijpkp = j==numT-1 ? 0 : gr.getIndex(i, j+1, kp);
hcFloat psi_imjk = psi[ind_imjk];
hcFloat psi_ipjk = psi[ind_ipjk];
hcFloat psi_pole = 0.0;
hcFloat psi_pole_m = 0.0;
hcFloat psi_pole_p = 0.0;
for(uint l=0; l<numP; ++l)
{
uint ind_pole = l * numR * numT + j * numR + i;
uint ind_pole_m = l * numR * numT + j * numR + i-1;
uint ind_pole_p = l * numR * numT + j * numR + i+1;
psi_pole += psi[ind_pole];
psi_pole_m += psi[ind_pole_m];
psi_pole_p += psi[ind_pole_p];
}
psi_pole /= numP;
psi_pole_m /= numP;
psi_pole_p /= numP;
hcFloat psi_ijmk = j==0 ? psi_pole : psi[ind_ijmk];
hcFloat psi_ijpk = j==numT-1 ? psi_pole : psi[ind_ijpk];
hcFloat psi_ijkm = psi[ind_ijkm];
hcFloat psi_ijkp = psi[ind_ijkp];
hcFloat psi_imjmk = j==0 ? psi_pole_m : psi[ind_imjmk];
hcFloat psi_imjpk = j==numT-1 ? psi_pole_m : psi[ind_imjpk];
hcFloat psi_ipjmk = j==0 ? psi_pole_p : psi[ind_ipjmk];
hcFloat psi_ipjpk = j==numT-1 ? psi_pole_p : psi[ind_ipjpk];
hcFloat psi_imjkm = psi[ind_imjkm];
hcFloat psi_imjkp = psi[ind_imjkp];
hcFloat psi_ipjkm = psi[ind_ipjkm];
hcFloat psi_ipjkp = psi[ind_ipjkp];
hcFloat psi_ijmkm = j==0 ? psi_pole : psi[ind_ijmkm];
hcFloat psi_ijmkp = j==0 ? psi_pole : psi[ind_ijmkp];
hcFloat psi_ijpkm = j==numT-1 ? psi_pole : psi[ind_ijpkm];
hcFloat psi_ijpkp = j==numT-1 ? psi_pole : psi[ind_ijpkp];
hcFloat s_ijk = gr.s_ijk[ind];
hcFloat s_imjk = gr.s_imjk[ind];
hcFloat s_ipjk = gr.s_ipjk[ind];
hcFloat s_ijmk = gr.s_ijmk[ind];
hcFloat s_ijpk = gr.s_ijpk[ind];
hcFloat s_ijkm = gr.s_ijkm[ind];
hcFloat s_ijkp = gr.s_ijkp[ind];
hcFloat s_imjmk = gr.s_imjmk[ind];
hcFloat s_imjpk = gr.s_imjpk[ind];
hcFloat s_ipjmk = gr.s_ipjmk[ind];
hcFloat s_ipjpk = gr.s_ipjpk[ind];
hcFloat s_imjkm = gr.s_imjkm[ind];
hcFloat s_imjkp = gr.s_imjkp[ind];
hcFloat s_ipjkm = gr.s_ipjkm[ind];
hcFloat s_ipjkp = gr.s_ipjkp[ind];
hcFloat s_ijmkm = gr.s_ijmkm[ind];
hcFloat s_ijmkp = gr.s_ijmkp[ind];
hcFloat s_ijpkm = gr.s_ijpkm[ind];
hcFloat s_ijpkp = gr.s_ijpkp[ind];
hcFloat rhs = psi_imjmk * s_imjmk + psi_imjpk * s_imjpk
+ psi_ipjmk * s_ipjmk + psi_ipjpk * s_ipjpk
+ psi_imjkm * s_imjkm + psi_imjkp * s_imjkp
+ psi_ipjkm * s_ipjkm + psi_ipjkp * s_ipjkp
+ psi_ijmkm * s_ijmkm + psi_ijmkp * s_ijmkp
+ psi_ijpkm * s_ijpkm + psi_ijpkp * s_ijpkp
+ psi_imjk * s_imjk + psi_ipjk * s_ipjk
+ psi_ijmk * s_ijmk + psi_ijpk * s_ijpk
+ psi_ijkm * s_ijkm + psi_ijkp * s_ijkp;
/* // relaxation
hcFloat newVal = -1/s_ijk * rhs;
hcFloat psi = gr.getPsi(ind);
hcFloat relax = 0.2; // smaller than 1 -> under relaxation, otherwise -> overrelaxation
hcFloat retval = (1.0-relax)*psi + relax * newVal;
/*/
hcFloat retval = -1/s_ijk * rhs;//*/
gr.setTemp(ind, retval);
gr.setRelError(ind, fabsf((gr.getTemp(ind) - gr.getPsi(ind)) / gr.getTemp(ind)));
if (isnan(gr.getTemp(ind)) || isinf(gr.getTemp(ind)))
printf("\nNaN/INF idx: %u, i/j/k: %u(%u)/%u(%u)/%u(%u)", idx, i, numR, j, numT, k, numP);
}
}
}
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
//
// LaplaceSolver member functions
//
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
void LaplaceSolver::initNULL_GPU(){
d_image = NULL;
d_surfaceShapeLookup = NULL;
}
void LaplaceSolver::clear_GPU(){
if(d_image != NULL) hipFree(d_image);
if(d_surfaceShapeLookup != NULL) hipFree(d_surfaceShapeLookup);
if(grid != NULL) grid->clear_GPU();
checkCUDAerror("LaplaceSolver::clear_GPU");
onDevice = false;
initNULL_GPU();
}
bool LaplaceSolver::pushToDevice(hcImageFITS &photBoundary)
{
// TODO test if there is enough memory available
clear_GPU();
hipMalloc((void **)&d_solver, sizeof(LaplaceSolver));
hipMemcpy(d_solver, this, sizeof(LaplaceSolver), hipMemcpyHostToDevice);
hcFloat *imgData = new hcFloat[photBoundary.width * photBoundary.height];
for(uint x=0; x<photBoundary.width; ++x)
for(uint y=0; y<photBoundary.height; ++y)
imgData[y*photBoundary.width +x] = photBoundary(x,y);
hipMalloc((void **)&d_image, photBoundary.width * photBoundary.height * sizeof(hcFloat ));
hipMemcpy(d_image, imgData, photBoundary.width * photBoundary.height * sizeof(hcFloat ), hipMemcpyHostToDevice);
hipMemcpy(&(d_solver->d_image), &d_image, sizeof(hcFloat *), hipMemcpyHostToDevice);
grid->pushToGPU();
if(!grid->isElliptical()) hipMemcpy(&(d_solver->grid), &(grid->d_memStruct), sizeof(SphericalGrid*), hipMemcpyHostToDevice);
else hipMemcpy(&(d_solver->grid), &(((EllipticalGrid*)grid)->d_memStruct), sizeof(EllipticalGrid*), hipMemcpyHostToDevice);
uint null = 0;
hipMemcpy(&(d_solver->d_solver), &null, sizeof(uint *), hipMemcpyHostToDevice);
onDevice = true;
checkCUDAerror("LaplaceSolver::pushToDevice");
return true;
}
void LaplaceSolver::computeLowerBoundaryPsi_GPU()
{
uint TPB = MAX_TPB;
uint numBlocks_part1 = grid->numTheta * grid->numPhi / TPB + ((grid->numTheta * grid->numPhi)%TPB == 0 ? 0 : 1);
hipLaunchKernelGGL(( kernel_computeLowerBoundary), dim3(numBlocks_part1), dim3(TPB), 0, 0, *this->d_solver);
hipDeviceSynchronize();
checkCUDAerror("Kernel_computeLowerBoundary!\n");
}
void LaplaceSolver::iterate_GPU()
{
uint TPB = MAX_TPB;
uint BPI = MAX_NUM_BLOCK;
uint numGridPoints = grid->numTheta * grid->numPhi * grid->numR;
uint numBlocks = numGridPoints / TPB + (numGridPoints%TPB == 0 ? 0 : 1);
uint numSubdiv = numBlocks / BPI + (numBlocks%BPI == 0 ? 0 : 1);
for(uint num=0;num<numSubdiv;++num)
{
checkCUDAerror(__FILE__, __LINE__);
#ifdef SPHERICUNITVEC
if(!grid->isElliptical())hipLaunchKernelGGL(( kernel_iterate_spheric), dim3(BPI), dim3(TPB), 0, 0, *this->d_solver, num * BPI);
else
#endif
hipLaunchKernelGGL(( kernel_iterate_elliptic), dim3(BPI), dim3(TPB), 0, 0, *this->d_solver, num * BPI);
hipDeviceSynchronize();
checkCUDAerror(__FILE__, __LINE__);
hipLaunchKernelGGL(( kernel_setPsi), dim3(BPI), dim3(TPB), 0, 0, *this->d_solver, num * BPI);
hipDeviceSynchronize();
checkCUDAerror(__FILE__, __LINE__);
}
}
int LaplaceSolver::computeSolution_GPU(uint maxNumIterations, float errorThreshold, hcImageFITS &photBoundary, bool verbose)
{
printStdOutMess(__FILE__, __LINE__, "CUDA-version of PFSS solver started");
grid->clearValues();
// first, compute the lower boundary potential via B_r = - dPsi / dr -> Psi_0 = Phsi_1 + dr * B_r
// => Psi_0(t=0) = dr * B_l * csc(theta)
pushToDevice(photBoundary);
computeLowerBoundaryPsi_GPU();
grid->extract_relError();
// now perform the main loop. Compute next value for Psi and abort
// if the difference to the step before is below threshold
hcFloat maxError = errorThreshold;
uint counter = 0;
uint threshCheckSteps = 100;
uint loopnum = 0;
while(loopnum < maxNumIterations)
{
++counter;
iterate_GPU();
computeLowerBoundaryPsi_GPU();
if(counter == threshCheckSteps)
{
grid->extract_relError();
counter = 0;
maxError = 0.0;
for(uint i=1;i<grid->numR;++i)
for(uint j=0;j<grid->numTheta;++j)
for(uint k=0;k<grid->numPhi;++k)
{
uint ind = grid->getIndex(i,j,k);
if(grid->getRelError(ind) > maxError) maxError = grid->getRelError(ind);
}
}
#ifdef VERBOSE
cout << "\r \r";
cout << "\tstep " << loopnum << " / " << maxNumIterations << ", error / threshold: " << toStr(maxError) << " / " << toStr(errorThreshold);
#endif
if(maxError < errorThreshold)
{
#ifdef VERBOSE
cout << "\n";
#endif
break;
}
++loopnum;
}
grid->pullFromGPU();
for(uint i=0;i<grid->numR;++i)
for(uint j=0;j<grid->numTheta;++j)
for(uint k=0;k<grid->numPhi;++k)
{
uint ind = grid->getIndex(i,j,k);
Vec3D pos = grid->pos[ind];
grid->B[ind] = grid->getBFromPsi(i,j,k);
}
clear_GPU();
solutionComputed = true;
return loopnum;
}
| 8216dfbe1d2b69e4358814c28aac5d1b810b6a8f.cu | #include "src/cuda_interface.h"
#include "src/laplaceSolver.h"
#include "src/ellipticalGrid.h"
#include "engine/hcImage.h"
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
//
// CUDA Kernels
//
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
/**********************************************************************************************************************
* compute lower boundary
*
* this is correct for the general curvilinear case and the spheric case
**********************************************************************************************************************/
__global__ void
__launch_bounds__(1024, 1)
kernel_computeLowerBoundary(LaplaceSolver &solver)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
SphericalGrid &gr = *solver.grid;
uint numR = gr.numR;
uint numTheta = gr.numTheta;
uint numPhi = gr.numPhi;
// compute position in grid
uint k = idx % numPhi;
uint j = (idx - k) / numPhi;
uint i = 0;
uint ind = k * numR * numTheta + j * numR + i;
uint texInd = (numTheta - 1 - j) * numPhi + k;
if(texInd < numTheta * numPhi)
{
Vec3D pos = gr.pos[ind];
Vec3D pos_p = gr.pos[ind+1];
Vec3D pos_pp = gr.pos[ind+2];
hcFloat psi_p = gr.psi[ind+1];
hcFloat psi_pp = gr.psi[ind+2];
hcFloat B_l = solver.d_image[texInd];
hcFloat B_r = B_l / __sinf(pos[1]);
hcFloat dp = pos_p[0] - pos[0];
hcFloat dpp = pos_pp[0] - pos[0];
#ifdef RSCALE
dp /= r_sol;
dpp /= r_sol;
#endif
hcFloat a = 1 / (dpp*dpp - dp*dp);
hcFloat psi = a * (B_r * (dp*dpp*dpp - dpp*dp*dp) + psi_p*dpp*dpp - psi_pp*dp*dp);
gr.setPsi(ind, psi);
}
}
/**********************************************************************************************************************
* iterate spheric grid
*
* this version is specifically taylored to the spheric case. It is faster than the general purpose curvilinear
* version.
**********************************************************************************************************************/
__global__ void
__launch_bounds__(1024, 1)
kernel_iterate_spheric(LaplaceSolver &solver, uint prevBlocks)
{
const hcFloat PI = 3.1415926535897;
uint idx = (blockIdx.x + prevBlocks) * blockDim.x + threadIdx.x;
SphericalGrid &gr = *solver.grid;
uint numR = gr.numR;
uint numT = gr.numTheta;
uint numP = gr.numPhi;
Vec3D *pos = gr.getPosArray();
hcFloat *psi = gr.getPsiArray();
// compute position in grid
uint i = idx % numR;
uint j = ((idx - i) / numR) % numT;
uint k = ((idx-i - j * numR) / numR) / numT;
uint ind = k * numR * numT + j * numR + i;
if(idx < numT * numP * numR && i > 0)
{
if(i==numR-1)
{
gr.setTemp(ind, 0.0);
gr.setRelError(ind, 0.0);
}
else
{
uint ind_r_m = i==0 ? gr.getIndex(i,j,k) : gr.getIndex(i-1,j,k);
uint ind_r_p = i==numR-1 ? gr.getIndex(i,j,k) : gr.getIndex(i+1,j,k);
uint ind_t_m = j==0 ? 0 : gr.getIndex(i,j-1,k);
uint ind_t_p = j==numT-1 ? 0 : gr.getIndex(i,j+1,k);
uint ind_p_m = k==0 ? gr.getIndex(i,j,numP-1) : gr.getIndex(i,j,k-1);
uint ind_p_p = k==numP-1 ? gr.getIndex(i,j,0) : gr.getIndex(i,j,k+1);
hcFloat dr_p = pos[ind_r_p][0] - pos[ind][0];
hcFloat dr_m = pos[ind][0] - pos[ind_r_m][0];
hcFloat dt_p = (j==numT-1 ? PI - pos[ind][1] : pos[ind_t_p][1] - pos[ind][1]);
hcFloat dt_m = (j==0 ? pos[ind][1] : pos[ind][1] - pos[ind_t_m][1]);
hcFloat dp_p = (k==numP-1 ? pos[ind_p_p][2] + 2*PI - pos[ind][2] : pos[ind_p_p][2] - pos[ind][2]);
hcFloat dp_m = (k==0 ? 2*PI - pos[ind_p_m][2] + pos[ind][2] : pos[ind][2] - pos[ind_p_m][2]);
hcFloat r = pos[ind][0];
hcFloat theta = pos[ind][1];
hcFloat sin2 = __sinf(theta) * __sinf(theta);
#ifdef RSCALE
dr_p /= r_sol;
dr_m /= r_sol;
r /= r_sol;
#endif
hcFloat psi_pole= 0.0;
for(uint l=0; l<numP; ++l)
{
uint ind_pole = gr.getIndex(i,j,l);
psi_pole += psi[ind_pole];
}
psi_pole /= numP;
hcFloat psi_ipjk = psi[ind_r_p];
hcFloat psi_imjk = psi[ind_r_m];
hcFloat psi_ijpk = ( j==numT-1 ? psi_pole : psi[ind_t_p]);
hcFloat psi_ijmk = ( j==0 ? psi_pole : psi[ind_t_m]);
hcFloat psi_ijkp = psi[ind_p_p];
hcFloat psi_ijkm = psi[ind_p_m];
/*-----
//----- first version, working but not as accurate as below
//-----
hcFloat h_r = dr_p + dr_m;
hcFloat h_t = dt_p + dt_m;
hcFloat h_p = dp_p + dp_m;
hcFloat A_r = 1 / (dr_p * dr_m * h_r);
hcFloat A_theta = 1 / (dt_p * dt_m * h_t);
hcFloat A_phi = 1 / (dp_p * dp_m * h_p);
hcFloat d = 2*A_r / r * (dr_m*dr_m - dr_p*dr_p)
+ 2*A_r * (dr_m + dr_p)
+ A_theta / (r*r) * cos(theta)/sin(theta) * (dt_m*dt_m - dt_p*dt_p)
+ 2*A_theta / (r*r) * (dt_m + dt_p)
+ 2*A_phi / (r*r*sin2)* (dp_m + dp_p);
hcFloat rhs = 2 * A_r / r * (dr_m*dr_m*psi_ipjk - dr_p*dr_p*psi_imjk)
+ 2*A_r * (dr_m * psi_ipjk + dr_p * psi_imjk)
+ A_theta / (r*r) * cos(theta) / sin(theta) * (dt_m*dt_m * psi_ijpk - dt_p*dt_p * psi_ijmk)
+ 2*A_theta / (r*r) * (dt_m * psi_ijpk + dt_p * psi_ijmk)
+ 2*A_phi / (r*r*sin2)* (dp_m * psi_ijkp + dp_p * psi_ijkm);
//-----
////-- higher accuracy here (but numerically unstable due to two "interlocked" solutions)
//-----
hcFloat f_r = - dr_m*dr_pp / (dr_p*dr_p * (dr_pp + dr_p))
+ dr_m*dr_m / (dr_p*dr_p * h_r)
- 2 / h_r
+ dr_p*dr_p / (dr_m*dr_m * h_r)
- dr_p*dr_mm / (dr_m*dr_m * (dr_mm + dr_m));
hcFloat f_t = - dt_m*dt_pp / (dt_p*dt_p * (dt_pp + dt_p))
+ dt_m*dt_m / (dt_p*dt_p * h_t)
- 2 / h_t
+ dt_p*dt_p / (dt_m*dt_m * h_t)
- dt_p*dt_mm / (dt_m*dt_m * (dt_mm + dt_m));
hcFloat f_p = - dp_m*dp_pp / (dp_p*dp_p * (dp_pp + dp_p))
+ dp_m*dp_m / (dp_p*dp_p * h_p)
- 2 / h_p
+ dp_p*dp_p / (dp_m*dp_m * h_p)
- dp_p*dp_mm / (dp_m*dp_m * (dp_mm + dp_m));
hcFloat d = 2 / (r*h_r) * (dr_p/dr_m - dr_m/dr_p)
+ cos(theta)/ (r*r*sin(theta)*h_t) * (dt_p/dt_m - dt_m/dt_p)
+ f_r / h_r
+ f_t / (r*r*h_t)
+ f_p / (r*r*sin2*h_p);
d *= -1;
hcFloat rhs = 2 / (r*h_r) * ( psi_ipjk * dr_m/dr_p - psi_imjk * dr_p/dr_m)
+ 1 / h_r * ( (psi_ippjk * dr_m/dr_pp + psi_ipjk * (dr_m*dr_pp/(dr_p*dr_p) - dr_m/dr_pp)) / (dr_pp + dr_p)
+ (psi_immjk * dr_p/dr_mm + psi_imjk * (dr_p*dr_mm/(dr_m*dr_m) - dr_p/dr_mm)) / (dr_mm + dr_m)
- (psi_ipjk * (dr_m*dr_m/(dr_p*dr_p) - 1) + psi_imjk * (dr_p*dr_p/(dr_m*dr_m) - 1)) / h_r)
+ 1 / (r*r*h_t) * ( cos(theta)/sin(theta) * (dt_m/dt_p * psi_ijpk - dt_p/dt_m * psi_ijmk)
+ (psi_ijppk * dt_m/dt_pp + psi_ijpk * (dt_m*dt_pp/(dt_p*dt_p) - dt_m/dt_pp)) / (dt_pp + dt_p)
+ (psi_ijmmk * dt_p/dt_mm + psi_ijmk * (dt_p*dt_mm/(dt_m*dt_m) - dt_p/dt_mm)) / (dt_mm + dt_m)
- (psi_ijpk * (dt_m*dt_m/(dt_p*dt_p) - 1) + psi_ijmk * (dt_p*dt_p/(dt_m*dt_m) - 1)) / h_t)
+ 1 / (r*r*sin2*h_p)* ( (psi_ijkpp * dp_m/dp_pp + psi_ijkp * (dp_m*dp_pp/(dp_p*dp_p) - dp_m/dp_pp)) / (dp_pp + dp_p)
+ (psi_ijkmm * dp_p/dp_mm + psi_ijkm * (dp_p*dp_mm/(dp_m*dp_m) - dp_p/dp_mm)) / (dp_mm + dp_m)
- (psi_ijkp * (dp_m*dp_m/(dp_p*dp_p) - 1) + psi_ijkm * (dp_p*dp_p/(dp_m*dp_m) - 1)) / h_p);
//-----
/*////-- higher accuracy here
//-----
hcFloat ar = 1 / (dr_p*dr_m*dr_m + dr_p*dr_p*dr_m);
hcFloat at = 1 / (dt_p*dt_m*dt_m + dt_p*dt_p*dt_m);
hcFloat ap = 1 / (dp_p*dp_m*dp_m + dp_p*dp_p*dp_m);
hcFloat fr = 2 * ar * (dr_p*dr_p - dr_m*dr_m) / r - 2 * ar * (dr_p + dr_m);
hcFloat ft = __cosf(theta) * at * (dt_p*dt_p - dt_m*dt_m) / (__sinf(theta) * r*r) - 2 * at * (dt_p + dt_m) / (r*r);
hcFloat fp = - 2 * ap * (dp_p + dp_m) / (r*r*sin2);
hcFloat d = -(fr + ft + fp);
hcFloat rhs = 2 * ar * (psi_imjk * dr_p * (1 - dr_p / r) + psi_ipjk * dr_m * (1 + dr_m / r) )
+ at/(r*r) * (psi_ijmk * dt_p * (2 - __cosf(theta)*dt_p / __sinf(theta)) + psi_ijpk * dt_m * (2 + __cosf(theta)*dt_m / __sinf(theta)) )
+ 2*ap/(r*r*sin2) * (psi_ijkm * dp_p + psi_ijkp * dp_m );//*/
//*/
gr.setTemp(ind, rhs / d);
gr.setRelError(ind, fabsf((gr.getTemp(ind) - gr.getPsi(ind)) / gr.getTemp(ind)));
}
}
}
__global__ void
__launch_bounds__(1024, 1)
kernel_setPsi(LaplaceSolver &solver, uint prevBlocks)
{
uint idx = (blockIdx.x + prevBlocks) * blockDim.x + threadIdx.x;
uint numR = solver.grid->numR;
uint numTheta = solver.grid->numTheta;
uint numPhi = solver.grid->numPhi;
// compute position in grid
uint i = idx%numR;
uint j = ((idx-i)/numR)%numTheta;
uint k = ((idx-i - j * numR) / numR) / numTheta;
if(idx<numTheta*numPhi*numR && i<numR-1)
{
uint ind = solver.grid->getIndex(i,j,k);
hcFloat tmp = solver.grid->getTemp(ind);
solver.grid->setPsi(ind, tmp);
}
}
/**********************************************************************************************************************
* iterate elliptic
**********************************************************************************************************************/
__global__ void
__launch_bounds__(1024, 1)
kernel_iterate_elliptic(LaplaceSolver &solver, uint prevBlocks)
{
uint idx = (blockIdx.x + prevBlocks) * blockDim.x + threadIdx.x;
SphericalGrid &gr = *solver.grid;
uint numR = gr.numR;
uint numT = gr.numTheta;
uint numP = gr.numPhi;
Vec3D *pos = gr.getPosArray();
hcFloat *psi = gr.getPsiArray();
// compute position in grid
uint i = idx % numR;
uint j = ((idx - i) / numR) % numT;
uint k = ((idx-i - j * numR) / numR) / numT;
uint ind = k * numR * numT + j * numR + i;
if(idx < numT * numP * numR && i > 0)
{
if(i==numR-1)
{
gr.setTemp(ind, 0.0);
gr.setRelError(ind, 0.0);
}
else
{
uint km = k==0 ? numP-1: k-1;
uint kp = k==numP-1 ? 0 : k+1;
uint ind_imjk = gr.getIndex(i-1, j, k );
uint ind_ipjk = gr.getIndex(i+1, j, k );
uint ind_ijmk = j==0 ? 0 : gr.getIndex(i, j-1, k );
uint ind_ijpk = j==numT-1 ? 0 : gr.getIndex(i, j+1, k );
uint ind_ijkm = gr.getIndex(i, j, km);
uint ind_ijkp = gr.getIndex(i, j, kp);
uint ind_imjmk = j==0 ? 0 : gr.getIndex(i-1, j-1, k );
uint ind_imjpk = j==numT-1 ? 0 : gr.getIndex(i-1, j+1, k );
uint ind_ipjmk = j==0 ? 0 : gr.getIndex(i+1, j-1, k );
uint ind_ipjpk = j==numT-1 ? 0 : gr.getIndex(i+1, j+1, k );
uint ind_imjkm = gr.getIndex(i-1, j, km);
uint ind_imjkp = gr.getIndex(i-1, j, kp);
uint ind_ipjkm = gr.getIndex(i+1, j, km);
uint ind_ipjkp = gr.getIndex(i+1, j, kp);
uint ind_ijmkm = j==0 ? 0 : gr.getIndex(i, j-1, km);
uint ind_ijmkp = j==0 ? 0 : gr.getIndex(i, j-1, kp);
uint ind_ijpkm = j==numT-1 ? 0 : gr.getIndex(i, j+1, km);
uint ind_ijpkp = j==numT-1 ? 0 : gr.getIndex(i, j+1, kp);
hcFloat psi_imjk = psi[ind_imjk];
hcFloat psi_ipjk = psi[ind_ipjk];
hcFloat psi_pole = 0.0;
hcFloat psi_pole_m = 0.0;
hcFloat psi_pole_p = 0.0;
for(uint l=0; l<numP; ++l)
{
uint ind_pole = l * numR * numT + j * numR + i;
uint ind_pole_m = l * numR * numT + j * numR + i-1;
uint ind_pole_p = l * numR * numT + j * numR + i+1;
psi_pole += psi[ind_pole];
psi_pole_m += psi[ind_pole_m];
psi_pole_p += psi[ind_pole_p];
}
psi_pole /= numP;
psi_pole_m /= numP;
psi_pole_p /= numP;
hcFloat psi_ijmk = j==0 ? psi_pole : psi[ind_ijmk];
hcFloat psi_ijpk = j==numT-1 ? psi_pole : psi[ind_ijpk];
hcFloat psi_ijkm = psi[ind_ijkm];
hcFloat psi_ijkp = psi[ind_ijkp];
hcFloat psi_imjmk = j==0 ? psi_pole_m : psi[ind_imjmk];
hcFloat psi_imjpk = j==numT-1 ? psi_pole_m : psi[ind_imjpk];
hcFloat psi_ipjmk = j==0 ? psi_pole_p : psi[ind_ipjmk];
hcFloat psi_ipjpk = j==numT-1 ? psi_pole_p : psi[ind_ipjpk];
hcFloat psi_imjkm = psi[ind_imjkm];
hcFloat psi_imjkp = psi[ind_imjkp];
hcFloat psi_ipjkm = psi[ind_ipjkm];
hcFloat psi_ipjkp = psi[ind_ipjkp];
hcFloat psi_ijmkm = j==0 ? psi_pole : psi[ind_ijmkm];
hcFloat psi_ijmkp = j==0 ? psi_pole : psi[ind_ijmkp];
hcFloat psi_ijpkm = j==numT-1 ? psi_pole : psi[ind_ijpkm];
hcFloat psi_ijpkp = j==numT-1 ? psi_pole : psi[ind_ijpkp];
hcFloat s_ijk = gr.s_ijk[ind];
hcFloat s_imjk = gr.s_imjk[ind];
hcFloat s_ipjk = gr.s_ipjk[ind];
hcFloat s_ijmk = gr.s_ijmk[ind];
hcFloat s_ijpk = gr.s_ijpk[ind];
hcFloat s_ijkm = gr.s_ijkm[ind];
hcFloat s_ijkp = gr.s_ijkp[ind];
hcFloat s_imjmk = gr.s_imjmk[ind];
hcFloat s_imjpk = gr.s_imjpk[ind];
hcFloat s_ipjmk = gr.s_ipjmk[ind];
hcFloat s_ipjpk = gr.s_ipjpk[ind];
hcFloat s_imjkm = gr.s_imjkm[ind];
hcFloat s_imjkp = gr.s_imjkp[ind];
hcFloat s_ipjkm = gr.s_ipjkm[ind];
hcFloat s_ipjkp = gr.s_ipjkp[ind];
hcFloat s_ijmkm = gr.s_ijmkm[ind];
hcFloat s_ijmkp = gr.s_ijmkp[ind];
hcFloat s_ijpkm = gr.s_ijpkm[ind];
hcFloat s_ijpkp = gr.s_ijpkp[ind];
hcFloat rhs = psi_imjmk * s_imjmk + psi_imjpk * s_imjpk
+ psi_ipjmk * s_ipjmk + psi_ipjpk * s_ipjpk
+ psi_imjkm * s_imjkm + psi_imjkp * s_imjkp
+ psi_ipjkm * s_ipjkm + psi_ipjkp * s_ipjkp
+ psi_ijmkm * s_ijmkm + psi_ijmkp * s_ijmkp
+ psi_ijpkm * s_ijpkm + psi_ijpkp * s_ijpkp
+ psi_imjk * s_imjk + psi_ipjk * s_ipjk
+ psi_ijmk * s_ijmk + psi_ijpk * s_ijpk
+ psi_ijkm * s_ijkm + psi_ijkp * s_ijkp;
/* // relaxation
hcFloat newVal = -1/s_ijk * rhs;
hcFloat psi = gr.getPsi(ind);
hcFloat relax = 0.2; // smaller than 1 -> under relaxation, otherwise -> overrelaxation
hcFloat retval = (1.0-relax)*psi + relax * newVal;
/*/
hcFloat retval = -1/s_ijk * rhs;//*/
gr.setTemp(ind, retval);
gr.setRelError(ind, fabsf((gr.getTemp(ind) - gr.getPsi(ind)) / gr.getTemp(ind)));
if (isnan(gr.getTemp(ind)) || isinf(gr.getTemp(ind)))
printf("\nNaN/INF idx: %u, i/j/k: %u(%u)/%u(%u)/%u(%u)", idx, i, numR, j, numT, k, numP);
}
}
}
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
//
// LaplaceSolver member functions
//
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
//------------------------------------------------------------------------------------------------------------
void LaplaceSolver::initNULL_GPU(){
d_image = NULL;
d_surfaceShapeLookup = NULL;
}
void LaplaceSolver::clear_GPU(){
if(d_image != NULL) cudaFree(d_image);
if(d_surfaceShapeLookup != NULL) cudaFree(d_surfaceShapeLookup);
if(grid != NULL) grid->clear_GPU();
checkCUDAerror("LaplaceSolver::clear_GPU");
onDevice = false;
initNULL_GPU();
}
bool LaplaceSolver::pushToDevice(hcImageFITS &photBoundary)
{
// TODO test if there is enough memory available
clear_GPU();
cudaMalloc((void **)&d_solver, sizeof(LaplaceSolver));
cudaMemcpy(d_solver, this, sizeof(LaplaceSolver), cudaMemcpyHostToDevice);
hcFloat *imgData = new hcFloat[photBoundary.width * photBoundary.height];
for(uint x=0; x<photBoundary.width; ++x)
for(uint y=0; y<photBoundary.height; ++y)
imgData[y*photBoundary.width +x] = photBoundary(x,y);
cudaMalloc((void **)&d_image, photBoundary.width * photBoundary.height * sizeof(hcFloat ));
cudaMemcpy(d_image, imgData, photBoundary.width * photBoundary.height * sizeof(hcFloat ), cudaMemcpyHostToDevice);
cudaMemcpy(&(d_solver->d_image), &d_image, sizeof(hcFloat *), cudaMemcpyHostToDevice);
grid->pushToGPU();
if(!grid->isElliptical()) cudaMemcpy(&(d_solver->grid), &(grid->d_memStruct), sizeof(SphericalGrid*), cudaMemcpyHostToDevice);
else cudaMemcpy(&(d_solver->grid), &(((EllipticalGrid*)grid)->d_memStruct), sizeof(EllipticalGrid*), cudaMemcpyHostToDevice);
uint null = 0;
cudaMemcpy(&(d_solver->d_solver), &null, sizeof(uint *), cudaMemcpyHostToDevice);
onDevice = true;
checkCUDAerror("LaplaceSolver::pushToDevice");
return true;
}
void LaplaceSolver::computeLowerBoundaryPsi_GPU()
{
uint TPB = MAX_TPB;
uint numBlocks_part1 = grid->numTheta * grid->numPhi / TPB + ((grid->numTheta * grid->numPhi)%TPB == 0 ? 0 : 1);
kernel_computeLowerBoundary<<<numBlocks_part1, TPB, 0>>>(*this->d_solver);
cudaDeviceSynchronize();
checkCUDAerror("Kernel_computeLowerBoundary!\n");
}
void LaplaceSolver::iterate_GPU()
{
uint TPB = MAX_TPB;
uint BPI = MAX_NUM_BLOCK;
uint numGridPoints = grid->numTheta * grid->numPhi * grid->numR;
uint numBlocks = numGridPoints / TPB + (numGridPoints%TPB == 0 ? 0 : 1);
uint numSubdiv = numBlocks / BPI + (numBlocks%BPI == 0 ? 0 : 1);
for(uint num=0;num<numSubdiv;++num)
{
checkCUDAerror(__FILE__, __LINE__);
#ifdef SPHERICUNITVEC
if(!grid->isElliptical()) kernel_iterate_spheric<<< BPI, TPB, 0>>>(*this->d_solver, num * BPI);
else
#endif
kernel_iterate_elliptic<<<BPI, TPB, 0>>>(*this->d_solver, num * BPI);
cudaDeviceSynchronize();
checkCUDAerror(__FILE__, __LINE__);
kernel_setPsi<<<BPI, TPB, 0>>>(*this->d_solver, num * BPI);
cudaDeviceSynchronize();
checkCUDAerror(__FILE__, __LINE__);
}
}
int LaplaceSolver::computeSolution_GPU(uint maxNumIterations, float errorThreshold, hcImageFITS &photBoundary, bool verbose)
{
printStdOutMess(__FILE__, __LINE__, "CUDA-version of PFSS solver started");
grid->clearValues();
// first, compute the lower boundary potential via B_r = - dPsi / dr -> Psi_0 = Phsi_1 + dr * B_r
// => Psi_0(t=0) = dr * B_l * csc(theta)
pushToDevice(photBoundary);
computeLowerBoundaryPsi_GPU();
grid->extract_relError();
// now perform the main loop. Compute next value for Psi and abort
// if the difference to the step before is below threshold
hcFloat maxError = errorThreshold;
uint counter = 0;
uint threshCheckSteps = 100;
uint loopnum = 0;
while(loopnum < maxNumIterations)
{
++counter;
iterate_GPU();
computeLowerBoundaryPsi_GPU();
if(counter == threshCheckSteps)
{
grid->extract_relError();
counter = 0;
maxError = 0.0;
for(uint i=1;i<grid->numR;++i)
for(uint j=0;j<grid->numTheta;++j)
for(uint k=0;k<grid->numPhi;++k)
{
uint ind = grid->getIndex(i,j,k);
if(grid->getRelError(ind) > maxError) maxError = grid->getRelError(ind);
}
}
#ifdef VERBOSE
cout << "\r \r";
cout << "\tstep " << loopnum << " / " << maxNumIterations << ", error / threshold: " << toStr(maxError) << " / " << toStr(errorThreshold);
#endif
if(maxError < errorThreshold)
{
#ifdef VERBOSE
cout << "\n";
#endif
break;
}
++loopnum;
}
grid->pullFromGPU();
for(uint i=0;i<grid->numR;++i)
for(uint j=0;j<grid->numTheta;++j)
for(uint k=0;k<grid->numPhi;++k)
{
uint ind = grid->getIndex(i,j,k);
Vec3D pos = grid->pos[ind];
grid->B[ind] = grid->getBFromPsi(i,j,k);
}
clear_GPU();
solutionComputed = true;
return loopnum;
}
|
8d4f96a2a1b1568f1e222600a1006ba90a0668db.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <exceptions/cuda_exception.h>
#include <helpers/ConstantHelper.h>
#include <array/DataTypeUtils.h>
#include <helpers/shape.h>
#include <execution/LaunchContext.h>
#include <ops/specials.h>
#include <helpers/logger.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <execution/AffinityManager.h>
#include <array/PrimaryPointerDeallocator.h>
#define CONSTANT_LIMIT 49152
__constant__ char deviceConstantMemory[CONSTANT_LIMIT];
namespace sd {
static void* getConstantSpace() {
Nd4jPointer dConstAddr;
auto dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0)
throw cuda_exception::build("hipGetSymbolAddress(...) failed", dZ);
return dConstAddr;
}
int ConstantHelper::getCurrentDevice() {
return AffinityManager::currentDeviceId();
}
int ConstantHelper::getNumberOfDevices() {
return AffinityManager::numberOfDevices();
}
ConstantHelper::ConstantHelper() {
auto initialDevice = getCurrentDevice();
auto numDevices = getNumberOfDevices();
_devicePointers.resize(numDevices);
_deviceOffsets.resize(numDevices);
_cache.resize(numDevices);
_counters.resize(numDevices);
// filling all pointers
for (int e = 0; e < numDevices; e++) {
auto res = hipSetDevice(e);
if (res != 0)
throw cuda_exception::build("hipSetDevice failed", res);
auto constant = getConstantSpace();
MAP_IMPL<ConstantDescriptor, ConstantHolder*> devCache;
_devicePointers[e] = constant;
_deviceOffsets[e] = 0;
_cache[e] = devCache;
_counters[e] = 0L;
}
//
auto res = hipSetDevice(initialDevice);
if (res != 0)
throw cuda_exception::build("Final hipSetDevice failed", res);
}
ConstantHelper::~ConstantHelper() {
for (const auto &v:_cache) {
for (const auto &c:v) {
delete c.second;
}
}
}
ConstantHelper& ConstantHelper::getInstance() {
static ConstantHelper instance;
return instance;
}
void* ConstantHelper::replicatePointer(void *src, size_t numBytes, memory::Workspace *workspace) {
std::lock_guard<std::mutex> lock(_mutex);
auto deviceId = getCurrentDevice();
Nd4jPointer constantPtr = nullptr;
Nd4jLong constantOffset = 0L;
if (_devicePointers[deviceId] == 0) {
auto constant = getConstantSpace();
// filling default ptr, which will be 0 probably
_devicePointers[deviceId] = constant;
_deviceOffsets[deviceId] = 0;
constantPtr = constant;
} else {
constantPtr = _devicePointers[deviceId];
constantOffset = _deviceOffsets[deviceId];
}
if (constantOffset + numBytes >= CONSTANT_LIMIT) {
int8_t *ptr = nullptr;
ALLOCATE_SPECIAL(ptr, workspace, numBytes, int8_t);
auto res = hipMemcpy(ptr, src, numBytes, hipMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("hipMemcpy failed", res);
return ptr;
} else {
auto originalBytes = numBytes;
auto rem = numBytes % 8;
if (rem != 0)
numBytes += 8 - rem;
_deviceOffsets[deviceId] += numBytes;
auto res = hipMemcpyToSymbol(deviceConstantMemory, const_cast<const void *>(src), originalBytes, constantOffset, hipMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("hipMemcpyToSymbol failed", res);
return reinterpret_cast<int8_t *>(constantPtr) + constantOffset;
}
}
ConstantDataBuffer* ConstantHelper::constantBuffer(const ConstantDescriptor &descriptor, sd::DataType dataType) {
const auto deviceId = getCurrentDevice();
// all cache modifications are synchronous
_mutexHolder.lock();
if (_cache[deviceId].count(descriptor) == 0) {
_cache[deviceId][descriptor] = new ConstantHolder();
}
auto holder = _cache[deviceId][descriptor];
// release cache lock
_mutexHolder.unlock();
ConstantDataBuffer* result;
// access to this holder instance is synchronous
std::lock_guard<std::mutex> lock(*holder->mutex());
if (holder->hasBuffer(dataType)) {
result = holder->getConstantDataBuffer(dataType);
} else {
auto numBytes = descriptor.length() * DataTypeUtils::sizeOf(dataType);
auto cbuff = std::make_shared<PointerWrapper>(new int8_t[numBytes], std::make_shared<PrimaryPointerDeallocator>());
_counters[deviceId] += numBytes;
// create buffer with this dtype
if (descriptor.isFloat()) {
BUILD_DOUBLE_SELECTOR(sd::DataType::DOUBLE, dataType, sd::SpecialTypeConverter::convertGeneric, (nullptr, const_cast<double *>(descriptor.floatValues().data()), descriptor.length(), cbuff->pointer()), (sd::DataType::DOUBLE, double), LIBND4J_TYPES);
} else if (descriptor.isInteger()) {
BUILD_DOUBLE_SELECTOR(sd::DataType::INT64, dataType, sd::SpecialTypeConverter::convertGeneric, (nullptr, const_cast<Nd4jLong *>(descriptor.integerValues().data()), descriptor.length(), cbuff->pointer()), (sd::DataType::INT64, Nd4jLong), LIBND4J_TYPES);
}
// we don't have deallocator here.
// TODO: we probably want to make use deallocator here, if we're not using constant memory
auto dbuff = std::make_shared<PointerWrapper>(replicatePointer(cbuff->pointer(), descriptor.length() * DataTypeUtils::sizeOf(dataType)));
ConstantDataBuffer dataBuffer(cbuff, dbuff, descriptor.length(), dataType);
holder->addBuffer(dataBuffer, dataType);
result = holder->getConstantDataBuffer(dataType);
}
return result;
}
Nd4jLong ConstantHelper::getCachedAmount(int deviceId) {
int numDevices = getNumberOfDevices();
if (deviceId > numDevices || deviceId < 0)
return 0L;
else
return _counters[deviceId];
}
} | 8d4f96a2a1b1568f1e222600a1006ba90a0668db.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
* Copyright (c) 2019 Konduit K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <exceptions/cuda_exception.h>
#include <helpers/ConstantHelper.h>
#include <array/DataTypeUtils.h>
#include <helpers/shape.h>
#include <execution/LaunchContext.h>
#include <ops/specials.h>
#include <helpers/logger.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <execution/AffinityManager.h>
#include <array/PrimaryPointerDeallocator.h>
#define CONSTANT_LIMIT 49152
__constant__ char deviceConstantMemory[CONSTANT_LIMIT];
namespace sd {
static void* getConstantSpace() {
Nd4jPointer dConstAddr;
auto dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0)
throw cuda_exception::build("cudaGetSymbolAddress(...) failed", dZ);
return dConstAddr;
}
int ConstantHelper::getCurrentDevice() {
return AffinityManager::currentDeviceId();
}
int ConstantHelper::getNumberOfDevices() {
return AffinityManager::numberOfDevices();
}
ConstantHelper::ConstantHelper() {
auto initialDevice = getCurrentDevice();
auto numDevices = getNumberOfDevices();
_devicePointers.resize(numDevices);
_deviceOffsets.resize(numDevices);
_cache.resize(numDevices);
_counters.resize(numDevices);
// filling all pointers
for (int e = 0; e < numDevices; e++) {
auto res = cudaSetDevice(e);
if (res != 0)
throw cuda_exception::build("cudaSetDevice failed", res);
auto constant = getConstantSpace();
MAP_IMPL<ConstantDescriptor, ConstantHolder*> devCache;
_devicePointers[e] = constant;
_deviceOffsets[e] = 0;
_cache[e] = devCache;
_counters[e] = 0L;
}
//
auto res = cudaSetDevice(initialDevice);
if (res != 0)
throw cuda_exception::build("Final cudaSetDevice failed", res);
}
ConstantHelper::~ConstantHelper() {
for (const auto &v:_cache) {
for (const auto &c:v) {
delete c.second;
}
}
}
ConstantHelper& ConstantHelper::getInstance() {
static ConstantHelper instance;
return instance;
}
void* ConstantHelper::replicatePointer(void *src, size_t numBytes, memory::Workspace *workspace) {
std::lock_guard<std::mutex> lock(_mutex);
auto deviceId = getCurrentDevice();
Nd4jPointer constantPtr = nullptr;
Nd4jLong constantOffset = 0L;
if (_devicePointers[deviceId] == 0) {
auto constant = getConstantSpace();
// filling default ptr, which will be 0 probably
_devicePointers[deviceId] = constant;
_deviceOffsets[deviceId] = 0;
constantPtr = constant;
} else {
constantPtr = _devicePointers[deviceId];
constantOffset = _deviceOffsets[deviceId];
}
if (constantOffset + numBytes >= CONSTANT_LIMIT) {
int8_t *ptr = nullptr;
ALLOCATE_SPECIAL(ptr, workspace, numBytes, int8_t);
auto res = cudaMemcpy(ptr, src, numBytes, cudaMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("cudaMemcpy failed", res);
return ptr;
} else {
auto originalBytes = numBytes;
auto rem = numBytes % 8;
if (rem != 0)
numBytes += 8 - rem;
_deviceOffsets[deviceId] += numBytes;
auto res = cudaMemcpyToSymbol(deviceConstantMemory, const_cast<const void *>(src), originalBytes, constantOffset, cudaMemcpyHostToDevice);
if (res != 0)
throw cuda_exception::build("cudaMemcpyToSymbol failed", res);
return reinterpret_cast<int8_t *>(constantPtr) + constantOffset;
}
}
ConstantDataBuffer* ConstantHelper::constantBuffer(const ConstantDescriptor &descriptor, sd::DataType dataType) {
const auto deviceId = getCurrentDevice();
// all cache modifications are synchronous
_mutexHolder.lock();
if (_cache[deviceId].count(descriptor) == 0) {
_cache[deviceId][descriptor] = new ConstantHolder();
}
auto holder = _cache[deviceId][descriptor];
// release cache lock
_mutexHolder.unlock();
ConstantDataBuffer* result;
// access to this holder instance is synchronous
std::lock_guard<std::mutex> lock(*holder->mutex());
if (holder->hasBuffer(dataType)) {
result = holder->getConstantDataBuffer(dataType);
} else {
auto numBytes = descriptor.length() * DataTypeUtils::sizeOf(dataType);
auto cbuff = std::make_shared<PointerWrapper>(new int8_t[numBytes], std::make_shared<PrimaryPointerDeallocator>());
_counters[deviceId] += numBytes;
// create buffer with this dtype
if (descriptor.isFloat()) {
BUILD_DOUBLE_SELECTOR(sd::DataType::DOUBLE, dataType, sd::SpecialTypeConverter::convertGeneric, (nullptr, const_cast<double *>(descriptor.floatValues().data()), descriptor.length(), cbuff->pointer()), (sd::DataType::DOUBLE, double), LIBND4J_TYPES);
} else if (descriptor.isInteger()) {
BUILD_DOUBLE_SELECTOR(sd::DataType::INT64, dataType, sd::SpecialTypeConverter::convertGeneric, (nullptr, const_cast<Nd4jLong *>(descriptor.integerValues().data()), descriptor.length(), cbuff->pointer()), (sd::DataType::INT64, Nd4jLong), LIBND4J_TYPES);
}
// we don't have deallocator here.
// TODO: we probably want to make use deallocator here, if we're not using constant memory
auto dbuff = std::make_shared<PointerWrapper>(replicatePointer(cbuff->pointer(), descriptor.length() * DataTypeUtils::sizeOf(dataType)));
ConstantDataBuffer dataBuffer(cbuff, dbuff, descriptor.length(), dataType);
holder->addBuffer(dataBuffer, dataType);
result = holder->getConstantDataBuffer(dataType);
}
return result;
}
Nd4jLong ConstantHelper::getCachedAmount(int deviceId) {
int numDevices = getNumberOfDevices();
if (deviceId > numDevices || deviceId < 0)
return 0L;
else
return _counters[deviceId];
}
} |
38e8580983a095811204a1b7b3a3c316a07d66df.hip | // !!! This is a file automatically generated by hipify!!!
#include "fft.h"
void Tfft(float *t,int l,int bat,hipfftComplex *tf){ //tltubea*btf
hipfftComplex *t_f = new hipfftComplex[l*bat]; //
for(int i = 0;i<bat;i++)
for(int j = 0;j<l;j++){
t_f[i*l+j].x = t[j*bat+i]; //t
t_f[i*l+j].y = 0; //c2cmode3tube
}
hipfftComplex *d_f;
hipMalloc((void**)&d_f,l*bat*sizeof(hipfftComplex)); //
hipMemcpy(d_f,t_f,l*bat*sizeof(hipfftComplex),hipMemcpyHostToDevice); //
hipfftHandle plan = 0; //
hipfftPlan1d(&plan,l,HIPFFT_C2C,bat);
hipfftExecC2C(plan,(hipfftComplex *)d_f,(hipfftComplex *)d_f,HIPFFT_FORWARD); //
hipDeviceSynchronize();
hipMemcpy(t_f,d_f,l*bat*sizeof(hipfftComplex),hipMemcpyDeviceToHost); //
hipfftDestroy(plan);//
hipFree(d_f);
//t_f
for(int i =0;i<bat;i++)
for(int j = 0;j<l;j++){
tf[j*bat+i] = t_f[i*l+j];
}
delete[] t_f; //t_f
t_f = nullptr;
}
void Tifft(float *t,int l,int bat,hipfftComplex *tf){ //ttf
hipfftComplex *t_f = new hipfftComplex[l*bat]; //
for(int i =0;i<bat;i++)
for(int j = 0;j<l;j++){
t_f[i*l+j]= tf[j*bat+i]; //mode3
}
hipfftComplex *d_f;
hipMalloc((void **)&d_f,sizeof(hipfftComplex)*l*bat); //
hipMemcpy(d_f,t_f,l*bat*sizeof(hipfftComplex),hipMemcpyHostToDevice); //
hipfftHandle plan = 1; //
hipfftPlan1d(&plan,l,HIPFFT_C2C,bat);
hipfftExecC2C(plan,(hipfftComplex *)d_f,(hipfftComplex *)d_f,HIPFFT_BACKWARD);
hipDeviceSynchronize();
hipMemcpy(t_f,d_f,sizeof(hipfftComplex)*bat*l,hipMemcpyDeviceToHost);
hipfftDestroy(plan);
hipFree(d_f);
//t
for(int i = 0;i<bat;i++)
for(int j = 0;j<l;j++){
t[j*bat+i] = t_f[i*l+j].x/l;
}
delete[] t_f; //
t_f = nullptr;
}
void Ttranspose(float *t,float *temp,int row,int col,int tube){ //
/*for(int i= 0;i<row*col*tube;i++){
temp[i] = t1[i];
}*/
for(int i = 0;i<row;i++)
for(int j = 0;j<col;j++){
temp[j*row+i] = t[i*col+j]; //
}
for(int k = 1;k<tube;k++)
for(int i = 0;i<row;i++)
for(int j = 0;j<col;j++){
temp[k*col*row+j*row+i] = t[(tube-k)*col*row+i*col+j] ; //
}
//cout<<""<<endl;
}
void mul_pro(hipfftComplex *a,hipfftComplex *b,hipfftComplex *c,int row,int col,int rank){
//cublasa*b=c arow*rank brank*col crow*col
//b*aCPUc
hipfftComplex *alpha = new hipfftComplex[1];
for(int i = 0;i<1;i++){
alpha[i].x=1; alpha[i].y=0;
}
hipfftComplex *beta = new hipfftComplex[1];
for(int i = 0;i<1;i++){
beta[i].x=0; beta[i].y=0;
}
hipfftComplex *d_a;
hipfftComplex *d_b;
hipfftComplex *d_c;
hipMalloc((void**)&d_a,row*rank*sizeof(hipfftComplex));
hipMalloc((void**)&d_b,rank*col*sizeof(hipfftComplex));
hipMalloc((void**)&d_c,row*col*sizeof(hipfftComplex)); //
hipMemcpy(d_a,a,row*rank*sizeof(hipfftComplex),hipMemcpyHostToDevice);
hipMemcpy(d_b,b,rank*col*sizeof(hipfftComplex),hipMemcpyHostToDevice); //
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasCgemm(
handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
col,
row,
rank,
alpha,
d_b,
col,
d_a,
rank,
beta,
d_c,
col);
hipMemcpy(c,d_c,row*col*sizeof(hipfftComplex),hipMemcpyDeviceToHost); //
hipblasDestroy(handle);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c); //
delete[] alpha;
alpha = nullptr;
delete[] beta;
beta = nullptr;
}
void mulvec_pro(float *a,float *b,float *c,int m,int n,int k){
//m*k k*n m*n row col rank
float alpha = 1;
float beta = 0;
float *d_a;
float *d_b;
float *d_c;
hipMalloc((void**)&d_a,m*k*sizeof(float));
hipMalloc((void**)&d_b,k*n*sizeof(float));
hipMalloc((void**)&d_c,m*n*sizeof(float)); //
hipMemcpy(d_a,a,m*k*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(d_b,b,k*n*sizeof(float),hipMemcpyHostToDevice); //
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasSgemm(
handle,
HIPBLAS_OP_N,
HIPBLAS_OP_N,
n,
m,
k,
&alpha,
d_b,
n,
d_a,
k,
&beta,
d_c,
n);
hipMemcpy(c,d_c,m*n*sizeof(float),hipMemcpyDeviceToHost); //
hipblasDestroy(handle);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c); //
}
void printTensor(int m,int n,int k,float *t){
for(int i = 0;i<k;i++){
for(int j = 0;j<m;j++){
for(int l = 0;l<n;l++){
cout<<t[i*m*n+j*n+l]<<" ";
}
cout<<endl;
cout<<endl;
}
cout<<"______________________________________________________________________"<<endl;
}
}
void printfTensor(int m,int n,int k,hipfftComplex *t){
for(int i = 0;i<k;i++){
for(int j = 0;j<m;j++){
for(int l = 0;l<n;l++){
cout<<t[i*m*n+j*n+l].x<<"+"<<t[i*m*n+j*n+l].y<<"i"<<" ";
}
cout<<endl;
cout<<endl;
}
cout<<"_____________________________________________________________________"<<endl;
}
}
void Tftranspose(hipfftComplex *tf,hipfftComplex *temp,int row,int col,int tube){
//,temp
for(int i = 0;i<tube;i++)
for(int j = 0;j<row;j++)
for(int k = 0;k<col;k++){
temp[i*col*row+k*row+j].x = tf[i*col*row+j*col+k].x;
temp[i*col*row+k*row+j].y = 0 - tf[i*col*row+j*col+k].y;
}
}
| 38e8580983a095811204a1b7b3a3c316a07d66df.cu | #include "fft.h"
void Tfft(float *t,int l,int bat,cufftComplex *tf){ //t为数据,l为tube,a*b为个数,tf为结果
cufftComplex *t_f = new cufftComplex[l*bat]; //中间变量,存放转换后的复数数据
for(int i = 0;i<bat;i++)
for(int j = 0;j<l;j++){
t_f[i*l+j].x = t[j*bat+i]; //t为按行存储的
t_f[i*l+j].y = 0; //c2c,张量变为复数显示且进行数据的转换,变为mode3的tube
}
cufftComplex *d_f;
cudaMalloc((void**)&d_f,l*bat*sizeof(cufftComplex)); //显存上分配空间
cudaMemcpy(d_f,t_f,l*bat*sizeof(cufftComplex),cudaMemcpyHostToDevice); //要变换的数据传到显存上
cufftHandle plan = 0; //创建句柄
cufftPlan1d(&plan,l,CUFFT_C2C,bat);
cufftExecC2C(plan,(cufftComplex *)d_f,(cufftComplex *)d_f,CUFFT_FORWARD); //执行
cudaDeviceSynchronize();
cudaMemcpy(t_f,d_f,l*bat*sizeof(cufftComplex),cudaMemcpyDeviceToHost); //传回数据
cufftDestroy(plan);//删除上下文
cudaFree(d_f);
//目前t_f中存放傅里叶变换后的张量数据,下一步将其转换回原来的形式。
for(int i =0;i<bat;i++)
for(int j = 0;j<l;j++){
tf[j*bat+i] = t_f[i*l+j];
}
delete[] t_f; //动态释放t_f
t_f = nullptr;
}
void Tifft(float *t,int l,int bat,cufftComplex *tf){ //t存放结果,tf存放傅里叶变换的数据来转换
cufftComplex *t_f = new cufftComplex[l*bat]; //中间变量,存放转换后的复数数据来进行变换
for(int i =0;i<bat;i++)
for(int j = 0;j<l;j++){
t_f[i*l+j]= tf[j*bat+i]; //变成mode3的形式
}
cufftComplex *d_f;
cudaMalloc((void **)&d_f,sizeof(cufftComplex)*l*bat); //显存上分配空间
cudaMemcpy(d_f,t_f,l*bat*sizeof(cufftComplex),cudaMemcpyHostToDevice); //给显存空间传入数据
cufftHandle plan = 1; //句柄
cufftPlan1d(&plan,l,CUFFT_C2C,bat);
cufftExecC2C(plan,(cufftComplex *)d_f,(cufftComplex *)d_f,CUFFT_INVERSE);
cudaDeviceSynchronize();
cudaMemcpy(t_f,d_f,sizeof(cufftComplex)*bat*l,cudaMemcpyDeviceToHost);
cufftDestroy(plan);
cudaFree(d_f);
//将结果为复数形式的数据转换到t中
for(int i = 0;i<bat;i++)
for(int j = 0;j<l;j++){
t[j*bat+i] = t_f[i*l+j].x/l;
}
delete[] t_f; //释放
t_f = nullptr;
}
void Ttranspose(float *t,float *temp,int row,int col,int tube){ //实现张量的转置操作,输入张量的各种的信息
/*for(int i= 0;i<row*col*tube;i++){
temp[i] = t1[i];
}*/
for(int i = 0;i<row;i++)
for(int j = 0;j<col;j++){
temp[j*row+i] = t[i*col+j]; //等号后面为原张量切片,第一个切片直接转置
}
for(int k = 1;k<tube;k++)
for(int i = 0;i<row;i++)
for(int j = 0;j<col;j++){
temp[k*col*row+j*row+i] = t[(tube-k)*col*row+i*col+j] ; //
}
//cout<<"进入张量转置了"<<endl;
}
void mul_pro(cufftComplex *a,cufftComplex *b,cufftComplex *c,int row,int col,int rank){
//进行普通矩阵乘,调用cublas库来实现,啊a*b=c a为row*rank b为rank*col c为row*col
//注意输入应该为b*a,这样结果传到CPU端才是按行存储的c
cufftComplex *alpha = new cufftComplex[1];
for(int i = 0;i<1;i++){
alpha[i].x=1; alpha[i].y=0;
}
cufftComplex *beta = new cufftComplex[1];
for(int i = 0;i<1;i++){
beta[i].x=0; beta[i].y=0;
}
cufftComplex *d_a;
cufftComplex *d_b;
cufftComplex *d_c;
cudaMalloc((void**)&d_a,row*rank*sizeof(cufftComplex));
cudaMalloc((void**)&d_b,rank*col*sizeof(cufftComplex));
cudaMalloc((void**)&d_c,row*col*sizeof(cufftComplex)); //显存分配空间
cudaMemcpy(d_a,a,row*rank*sizeof(cufftComplex),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,rank*col*sizeof(cufftComplex),cudaMemcpyHostToDevice); //数据传到显存
cublasHandle_t handle;
cublasCreate(&handle);
cublasCgemm(
handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
col,
row,
rank,
alpha,
d_b,
col,
d_a,
rank,
beta,
d_c,
col);
cudaMemcpy(c,d_c,row*col*sizeof(cufftComplex),cudaMemcpyDeviceToHost); //数据传回
cublasDestroy(handle);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c); //释放空间
delete[] alpha;
alpha = nullptr;
delete[] beta;
beta = nullptr;
}
void mulvec_pro(float *a,float *b,float *c,int m,int n,int k){
//m*k k*n m*n row col rank
float alpha = 1;
float beta = 0;
float *d_a;
float *d_b;
float *d_c;
cudaMalloc((void**)&d_a,m*k*sizeof(float));
cudaMalloc((void**)&d_b,k*n*sizeof(float));
cudaMalloc((void**)&d_c,m*n*sizeof(float)); //显存分配空间
cudaMemcpy(d_a,a,m*k*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,k*n*sizeof(float),cudaMemcpyHostToDevice); //数据传到显存
cublasHandle_t handle;
cublasCreate(&handle);
cublasSgemm(
handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
n,
m,
k,
&alpha,
d_b,
n,
d_a,
k,
&beta,
d_c,
n);
cudaMemcpy(c,d_c,m*n*sizeof(float),cudaMemcpyDeviceToHost); //数据传回
cublasDestroy(handle);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c); //释放空间
}
void printTensor(int m,int n,int k,float *t){
for(int i = 0;i<k;i++){
for(int j = 0;j<m;j++){
for(int l = 0;l<n;l++){
cout<<t[i*m*n+j*n+l]<<" ";
}
cout<<endl;
cout<<endl;
}
cout<<"______________________________________________________________________"<<endl;
}
}
void printfTensor(int m,int n,int k,cufftComplex *t){
for(int i = 0;i<k;i++){
for(int j = 0;j<m;j++){
for(int l = 0;l<n;l++){
cout<<t[i*m*n+j*n+l].x<<"+"<<t[i*m*n+j*n+l].y<<"i"<<" ";
}
cout<<endl;
cout<<endl;
}
cout<<"_____________________________________________________________________"<<endl;
}
}
void Tftranspose(cufftComplex *tf,cufftComplex *temp,int row,int col,int tube){
//复数张量共轭转置,temp存放结果
for(int i = 0;i<tube;i++)
for(int j = 0;j<row;j++)
for(int k = 0;k<col;k++){
temp[i*col*row+k*row+j].x = tf[i*col*row+j*col+k].x;
temp[i*col*row+k*row+j].y = 0 - tf[i*col*row+j*col+k].y;
}
}
|
79b83967bf18d8fc71d785616580ef7471e1be54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<cuda_runtime.h>
#include<cuda.h>
#include<device_launch_parameters.h>
#include "common.h"
#define DTYPE float
#define M 1024
#define N 2048
#define K 1024
using namespace std;
__global__ void GEMM(DTYPE * a, DTYPE * b, DTYPE * c, int m, int n, int k){
// Find out the actual row and column that this thread inside the thread block
// maps to.
// int row = blockIdx.y;
// int col = blockDim.x;
// Instead find the iteration of the original loop nest that maps to this
// thread block here.
// It is more elegant to map the iterations instead of row or col. At the end
// it doesn't matter becuase the iterations actually determine which row or
// col is it.
int i_iter = blockIdx.y;
int j_iter = blockIdx.x;
// K dimension is sequential so this is not mapped to the gpu compute
// heirarchy.
c[i_iter * n + j_iter] = 0.0f;
for(int kk = 0; kk < k; ++kk){
if(i_iter < m && j_iter < n){
c[i_iter * n + j_iter] += a[i_iter * k + kk] * b[kk * n + j_iter];
}
}
}
void hostGEMM(DTYPE * a, DTYPE * b, DTYPE * c, int m, int n, int k){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j ){
DTYPE temp = 0;
for(int kk = 0; kk < k ; ++kk){
temp += a[i * k + kk] * b[kk * n + j];
}
c[i * n + j] = temp;
}
}
}
bool compareGEMM(DTYPE * h_c, DTYPE * h_c_gpu_res, int m, int n){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j ){
if(abs(h_c[i * n + j] - h_c_gpu_res[i * n + j]) > 1e-4)
return false;
}
}
return true;
}
void initMatrix(DTYPE * matrix, int m, int n){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
matrix[i * n + j] = static_cast <DTYPE> (rand()) / static_cast <DTYPE> (RAND_MAX);
}
}
}
void printMatrix(DTYPE * matrix, int m, int n){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
cout<<matrix[i * n + j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
int main(){
DTYPE *d_a, *d_b, *d_c, *h_a, *h_b, *h_c, *h_c_gpu_res;
int m ,n, k;
m = M;
n = N;
k = K;
h_a = (DTYPE*) malloc(m * k * sizeof(DTYPE));
h_b = (DTYPE*) malloc(k * n * sizeof(DTYPE));
h_c = (DTYPE*) malloc(m * n * sizeof(DTYPE));
h_c_gpu_res = (DTYPE*) malloc(m * n * sizeof(DTYPE));
hipMalloc(&d_a, m * k * sizeof(DTYPE));
hipMalloc(&d_b, k * n * sizeof(DTYPE));
hipMalloc(&d_c, m * n * sizeof(DTYPE));
initMatrix(h_a, m , k);
initMatrix(h_b, k , n);
initMatrix(h_c_gpu_res, m , n);
hipMemcpy(d_a, h_a, m * k * sizeof(DTYPE), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, k * n * sizeof(DTYPE), hipMemcpyHostToDevice);
dim3 block(1, 1, 1);
dim3 grid(n, m, 1);
hipLaunchKernelGGL(( GEMM), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_c, m , n, k);
hipDeviceSynchronize();
hipMemcpy(h_c_gpu_res, d_c, m * n * sizeof(DTYPE), hipMemcpyDeviceToHost);
hostGEMM(h_a, h_b, h_c, m, n, k);
cout<<compareGEMM(h_c, h_c_gpu_res, m, n)<<endl;
//printMatrix(h_c, m, n);
//cout<<"output gpu\n";
//printMatrix(h_c_gpu_res, m, n);
free(h_a);
free(h_b);
free(h_c);
free(h_c_gpu_res);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 79b83967bf18d8fc71d785616580ef7471e1be54.cu | #include<iostream>
#include<cuda_runtime.h>
#include<cuda.h>
#include<device_launch_parameters.h>
#include "common.h"
#define DTYPE float
#define M 1024
#define N 2048
#define K 1024
using namespace std;
__global__ void GEMM(DTYPE * a, DTYPE * b, DTYPE * c, int m, int n, int k){
// Find out the actual row and column that this thread inside the thread block
// maps to.
// int row = blockIdx.y;
// int col = blockDim.x;
// Instead find the iteration of the original loop nest that maps to this
// thread block here.
// It is more elegant to map the iterations instead of row or col. At the end
// it doesn't matter becuase the iterations actually determine which row or
// col is it.
int i_iter = blockIdx.y;
int j_iter = blockIdx.x;
// K dimension is sequential so this is not mapped to the gpu compute
// heirarchy.
c[i_iter * n + j_iter] = 0.0f;
for(int kk = 0; kk < k; ++kk){
if(i_iter < m && j_iter < n){
c[i_iter * n + j_iter] += a[i_iter * k + kk] * b[kk * n + j_iter];
}
}
}
void hostGEMM(DTYPE * a, DTYPE * b, DTYPE * c, int m, int n, int k){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j ){
DTYPE temp = 0;
for(int kk = 0; kk < k ; ++kk){
temp += a[i * k + kk] * b[kk * n + j];
}
c[i * n + j] = temp;
}
}
}
bool compareGEMM(DTYPE * h_c, DTYPE * h_c_gpu_res, int m, int n){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j ){
if(abs(h_c[i * n + j] - h_c_gpu_res[i * n + j]) > 1e-4)
return false;
}
}
return true;
}
void initMatrix(DTYPE * matrix, int m, int n){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
matrix[i * n + j] = static_cast <DTYPE> (rand()) / static_cast <DTYPE> (RAND_MAX);
}
}
}
void printMatrix(DTYPE * matrix, int m, int n){
for(int i = 0; i < m; ++i){
for(int j = 0; j < n; ++j){
cout<<matrix[i * n + j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
int main(){
DTYPE *d_a, *d_b, *d_c, *h_a, *h_b, *h_c, *h_c_gpu_res;
int m ,n, k;
m = M;
n = N;
k = K;
h_a = (DTYPE*) malloc(m * k * sizeof(DTYPE));
h_b = (DTYPE*) malloc(k * n * sizeof(DTYPE));
h_c = (DTYPE*) malloc(m * n * sizeof(DTYPE));
h_c_gpu_res = (DTYPE*) malloc(m * n * sizeof(DTYPE));
cudaMalloc(&d_a, m * k * sizeof(DTYPE));
cudaMalloc(&d_b, k * n * sizeof(DTYPE));
cudaMalloc(&d_c, m * n * sizeof(DTYPE));
initMatrix(h_a, m , k);
initMatrix(h_b, k , n);
initMatrix(h_c_gpu_res, m , n);
cudaMemcpy(d_a, h_a, m * k * sizeof(DTYPE), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, k * n * sizeof(DTYPE), cudaMemcpyHostToDevice);
dim3 block(1, 1, 1);
dim3 grid(n, m, 1);
GEMM<<<grid, block>>>(d_a, d_b, d_c, m , n, k);
cudaDeviceSynchronize();
cudaMemcpy(h_c_gpu_res, d_c, m * n * sizeof(DTYPE), cudaMemcpyDeviceToHost);
hostGEMM(h_a, h_b, h_c, m, n, k);
cout<<compareGEMM(h_c, h_c_gpu_res, m, n)<<endl;
//printMatrix(h_c, m, n);
//cout<<"output gpu\n";
//printMatrix(h_c_gpu_res, m, n);
free(h_a);
free(h_b);
free(h_c);
free(h_c_gpu_res);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
98e0a1543849a44b0fd5818094a3373a1d089c0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2009,2010, Volodymyr Mnih
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that
the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <rocblas.h>
#include <math.h>
#include "rnd_multipliers_32bit.h"
#include "cudamat_kernels.cuh"
extern "C" {
#include "cudamat.cuh"
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = hipblasGetError();
return status != HIPBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
hipError_t err = hipGetLastError();
if (hipSuccess != err)
printf("%s\n", hipGetErrorString( err));
return hipSuccess != err;
}
const char* get_last_cuda_error() {
hipError_t err = hipGetLastError();
return hipGetErrorString( err);
}
int cublas_init() {
hipblasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
int cublas_shutdown() {
hipblasShutdown();
hipDeviceReset();
return 0;
}
int cuda_record_event(hipEvent_t* t) {
hipError_t err = hipEventRecord(*t, 0);
if (hipSuccess != err) {
printf("%s\n", hipGetErrorString( err));
}
return hipSuccess != err;
}
int cuda_synchronize_event(hipEvent_t* t) {
hipError_t err = hipEventSynchronize(*t);
if (hipSuccess != err) {
printf("%s\n", hipGetErrorString( err));
}
return hipSuccess != err;
}
int cuda_create_event(hipEvent_t* t) {
hipError_t err = hipEventCreateWithFlags(t, hipEventBlockingSync);
if (hipSuccess != err) {
printf("%s\n", hipGetErrorString( err));
}
return hipSuccess != err;
}
int cuda_set_device(int deviceId) {
hipSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
bool cuda_is_fermi(int deviceId) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, deviceId);
return prop.major >= 2;
}
int cuda_set_P2P(int gpu1, int gpu2) {
bool is_fermi = cuda_is_fermi(gpu1) && cuda_is_fermi(gpu2);
int access2from1, access1from2;
hipDeviceCanAccessPeer(&access2from1, gpu1, gpu2);
hipDeviceCanAccessPeer(&access1from2, gpu2, gpu1);
bool same_complex = false;
if(access2from1==1 && access1from2==1) same_complex = true;
if(is_fermi && same_complex) {
hipSetDevice(gpu1);
hipDeviceEnablePeerAccess(gpu2, 0); //second argument is flags
hipSetDevice(gpu2);
hipDeviceEnablePeerAccess(gpu1, 0); //second argument is flags
return 0;
} else {
return CUDA_ERROR;
}
}
int init_random(rnd_struct* rnd_state, int seed, const char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
for (int i = 0; i < NUM_RND_STREAMS; i++) {
host_mults[i] = _rand_words[i];
}
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
hipblasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
free(host_mults);
//hipMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//hipMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//hipMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kSeedRandom), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, seed);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
// Allocates and gives up ownership of pointer. Caller must free.
int get_rnd_state(rnd_struct* rnd_state, unsigned long long* host_words_out, int *size_out) {
*size_out = NUM_RND_STREAMS;
host_words_out = (unsigned long long*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
if (host_words_out == NULL) {
return ERROR_GENERIC; // Out of memory.
}
hipblasGetVector(NUM_RND_STREAMS, sizeof(unsigned long long), rnd_state->dev_words, 1, host_words_out, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
int init_random_from_state(rnd_struct* rnd_state, unsigned long long* host_words, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
if (cudamatpath == NULL) {
pFile = fopen ("/u/nitish/deepnet/cudamat/rnd_multipliers_32bit.txt","r");
} else {
pFile = fopen (cudamatpath,"r");
}
if (pFile == NULL) {
printf("Error: Missing rnd_multipliers_32bit.txt file\n");
return 1;
}
for (int i = 0; i < NUM_RND_STREAMS; i++) {
int r = fscanf (pFile, "%u", &host_mults[i]);
if (r != 1) return ERROR_GENERIC;
}
fclose (pFile);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
hipblasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
hipblasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_words, 1, rnd_state->dev_words, 1);
free(host_mults);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
void cuda_sync_threads() {
hipDeviceSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = hipblasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
int allocate_device_memory_bbox(cudamat_bbox* mat) {
int size = mat->size;
int numboxes = mat->numboxes;
cublasStatus stat;
stat = hipblasAlloc(size, sizeof(int), (void**)&mat->data_device.seg);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = hipblasAlloc(numboxes, sizeof(int), (void**)&mat->data_device.labels);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = hipblasAlloc(4 * numboxes, sizeof(int), (void**)&mat->data_device.boxes);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
int allocate_device_memory_sparse(cudamat_sparse* mat) {
int nnz = mat->nnz, rows = mat->size[0];
cublasStatus stat;
stat = hipblasAlloc(nnz, sizeof(mat->data_device.data[0]), (void**)&mat->data_device.data);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = hipblasAlloc(nnz, sizeof(mat->data_device.indices[0]), (void**)&mat->data_device.indices);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = hipblasAlloc(rows + 1, sizeof(mat->data_device.indptr[0]), (void**)&mat->data_device.indptr);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
int copy_to_host_slice(cudamat* mat, int start, int end) {
if (start >= end || end > mat->size[1])
return ERROR_GENERIC;
int len = mat->size[0] * (end - start);
int offset = mat->size[0] * start;
if (mat->on_device) {
hipblasGetVector(len, sizeof(mat->data_host[0]), mat->data_device + offset, 1, mat->data_host + offset, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
hipblasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
int copy_bbox_to_host(cudamat_bbox* mat) {
if (mat->on_device) {
hipblasGetVector(mat->size, sizeof(int), mat->data_device.seg, 1, mat->data_host.seg, 1);
hipblasGetVector(mat->numboxes, sizeof(int), mat->data_device.labels, 1, mat->data_host.labels, 1);
hipblasGetVector(4 * mat->numboxes, sizeof(int), mat->data_device.boxes, 1, mat->data_host.boxes, 1);
if (check_cublas_error()) return CUBLAS_ERROR;
} else {
return ERROR_NOT_ON_DEVICE;
}
return 0;
}
int copy_to_device_slice(cudamat* mat, int start, int end) {
if (end <= start || end > mat->size[1])
return ERROR_GENERIC;
int len = mat->size[0] * (end - start);
int err_code = 0;
int offset = mat->size[0] * start;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
hipblasSetVector(len, sizeof(mat->data_host[0]), mat->data_host + offset, 1, mat->data_device + offset, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
hipblasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int copy_bbox_to_device(cudamat_bbox* mat) {
int size = mat->size;
int numboxes = mat->numboxes;
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory_bbox(mat);
if (err_code)
return err_code;
}
hipblasSetVector(size, sizeof(int), mat->data_host.seg, 1, mat->data_device.seg, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
hipblasSetVector(numboxes, sizeof(int), mat->data_host.labels, 1, mat->data_device.labels, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
hipblasSetVector(4 * numboxes, sizeof(int), mat->data_host.boxes, 1, mat->data_device.boxes, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int copy_sparse_to_device(cudamat_sparse* mat) {
int len = mat->nnz, rows = mat->size[0];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory_sparse(mat);
if (err_code)
return err_code;
}
hipblasSetVector(len, sizeof(mat->data_host.data[0]), mat->data_host.data, 1, mat->data_device.data, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
hipblasSetVector(len, sizeof(mat->data_host.indices[0]), mat->data_host.indices, 1, mat->data_device.indices, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
hipblasSetVector(rows + 1, sizeof(mat->data_host.indptr[0]), mat->data_host.indptr, 1, mat->data_device.indptr, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
// mat 1 : source
// mat 2 : dest
int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipMemcpy(mat2->data_device, mat1->data_device, len * sizeof(float), hipMemcpyDefault);
//hipblasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kGetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kSetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( kTranspose), dim3(grid), dim3(threads) , 0, 0, target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = hipblasFree(mat->data_device);
mat->on_device = 0;
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
int free_device_memory_bbox(cudamat_bbox* mat) {
if (mat->on_device) {
cublasStatus stat;
stat = hipblasFree(mat->data_device.seg);
stat = hipblasFree(mat->data_device.labels);
stat = hipblasFree(mat->data_device.boxes);
mat->on_device = 0;
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
int set_shape(cudamat* mat, unsigned int m, unsigned int n) {
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
int reshape(cudamat* mat, int m, int n) {
if (m < 0 && n < 0)
return ERROR_GENERIC;
if (m < 0)
m = (mat->size[0] * mat->size[1]) / n;
if (n < 0)
n = (mat->size[0] * mat->size[1]) / m;
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = source->data_host + first_col * num_rows;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector.
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
void init_from_sparse_array(cudamat_sparse* mat, float* data, int* indices, int* indptr, int m, int n, int nnz) {
mat->data_host.data = data;
mat->data_host.indices = indices;
mat->data_host.indptr = indptr;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
mat->nnz = nnz;
}
void set_on_device(cudamat* mat) {
mat->on_device = 1;
}
int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomUniform), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_bernoulli(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kSampleBernoulli), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_bernoulli_tanh(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kSampleBernoulliTanh), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_poisson(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kSamplePoisson), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_gaussian(rnd_struct* rnd_state, cudamat* mat, cudamat* target, float mult) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kSampleGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len, mult);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int perturb_energy(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kPerturbEnergy), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int perturb_prob(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kPerturbProb), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int dropout(rnd_struct* rnd_state, cudamat* mat, float dropprob, float val, float scale) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomDropout), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len, dropprob, val, scale);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int gaussian_dropout(rnd_struct* rnd_state, cudamat* mat, float scale) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomGaussianDropout), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len, scale);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddColMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultDiagonalScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddDiagonalScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultDiagonal), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddDiagonal), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_row_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddRowMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int div_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivByColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int div_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivByRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThanEq), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThan), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThanEqScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThanEq), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThan), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int upper_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kUpperBound), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int lower_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLowerBound), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThanEqScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int upper_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kUpperBoundScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int upper_bound_mod_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kUpperBoundModScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int lower_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLowerBoundScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kMaxColumnwise), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int choose_max_and_accumulate(cudamat* mat, cudamat* acc) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !acc->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (acc->size[0] != mat->size[0] || acc->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kChooseMaxAndAccumulate), dim3(gridDim),dim3(32), 0, 0, mat->data_device, acc->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int choose_max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kChooseMaxColumnwise), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int argmax_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kArgMaxColumnwise), dim3(gridDim),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int sqsum_by_axis(cudamat* mat, cudamat* target, int axis, float mult, float p) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = (w + w1 - 1) / w1;
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSqSumColumnwise), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h, mult, p);
} else if (axis == 1) {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int h1 = floor(sqrt(h));
int h2 = (h + h1 - 1) / h1;
dim3 gridDim(h1, h2, 1);
hipLaunchKernelGGL(( kSqSumRowwise), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h, mult, p);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int normlimit_by_axis(cudamat* mat, cudamat* target, int axis,
float norm, int constraint) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
if (axis == 0) {
int w1 = floor(sqrt(w));
int w2 = DIVUP(w, w1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kNormLimitColumnwise), dim3(gridDim),dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, norm, w, h, constraint);
} else {
int h1 = floor(sqrt(h));
int h2 = DIVUP(h, h1);
dim3 gridDim(h1, h2, 1);
hipLaunchKernelGGL(( kNormLimitRowwise), dim3(gridDim),dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, norm, w, h, constraint);
}
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSign), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_cos(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyCos), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sin(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplySin), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplySigmoid), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyTanh), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyAbs), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyLog1PlusExp), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_log(cudamat* mat, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLog), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kExp), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_ceil(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCeil), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_floor(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kFloor), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSqrt), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPow), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPowMatrix), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int compute_cross_entropy(cudamat* dist1, cudamat* dist2, cudamat* target, float tiny) {
unsigned int len = dist1->size[0] * dist1->size[1];
if (!dist1->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (dist1->size[0] != target->size[0] || dist1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (dist1->size[0] != dist2->size[0] || dist1->size[1] != dist2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCrossEntropy), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, dist1->data_device, dist2->data_device, target->data_device, len, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int compute_cross_entropy_bernoulli(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCrossEntropyBernoulli), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow->data_device, target->data_device, len, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int correct_preds(cudamat* mat, cudamat* pow, cudamat* target, float cutoff) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCorrectPreds), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow->data_device, target->data_device, len, cutoff);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kReciprocal), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
hipblasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int sparse_dot(cudamat_sparse* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
int m = mat1->size[0],
k = mat1->size[1],
k2 = mat2->size[0],
n = mat2->size[1];
if (k != k2) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
unsigned int grid_x = m / COPY_BLOCK_SIZE;
if (m % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = n / COPY_BLOCK_SIZE;
if (n % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( kSparseDot), dim3(grid), dim3(threads), 0, 0, m, n, k, mat1->data_device.data,
mat1->data_device.indptr,
mat1->data_device.indices,
mat2->data_device, target->data_device, beta, alpha);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat2->size[0] * mat2->size[1] != len) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = hipblasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipblasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int add_mult_sign(cudamat* mat1, cudamat* mat2, float mult) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddMultSign), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, len, mult);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAdd), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSubtract), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivide), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sin_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSinDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_cos_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCosDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_logistic_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLogisticDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// mat1 - output of network
// mat2 - target
// out_grad - output gradient
int apply_logistic_grad(cudamat* mat1, cudamat* mat2, cudamat* out_grad) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !out_grad->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != out_grad->size[0] || mat1->size[1] != out_grad->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLogisticGrad), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, out_grad->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// mat1 - output of network
// mat2 - target
// out - .
int get_logistic_correct_normalized(cudamat* mat1, cudamat* mat2, cudamat* out) {
if (!mat1->on_device || !mat2->on_device || !out->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != out->size[0] || 1 != out->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_blocks = DIVUP(mat1->size[0], NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kLogisticCorrectNormalized), dim3(num_blocks), dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, out->data_device, mat1->size[0], mat1->size[1]);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_tanh_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kTanhDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_rectified_linear_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kRectifiedLinearDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_rectified_linear_smooth_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kRectifiedLinearSmoothDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kAssignScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivideScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
float res = hipblasSnrm2(len, mat->data_device, 1);
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSelectRows), dim3(gridDim), dim3(blockDim), 0, 0, source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int swapColumns(cudamat* source, cudamat* target, cudamat* indices1, cudamat* indices2){
const int cols = indices1->size[1]*indices1->size[0],
h = source->size[0],
w = source->size[1];
hipLaunchKernelGGL(( kSwapColumns), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, source->data_device, target->data_device, indices1->data_device, indices2->data_device, cols, w, h);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int shuffleColumns(cudamat* source, cudamat* rand_perm_indices) {
const int h = source->size[0],
w = source->size[1];
if (rand_perm_indices->size[0] != 1 || rand_perm_indices->size[1] != w) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
hipLaunchKernelGGL(( kShuffleColumns), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, source->data_device, source->data_device, rand_perm_indices->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSetSelectedRows), dim3(gridDim), dim3(blockDim), 0, 0, target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int generate_translations_big_var_off(cudamat* source, cudamat* target, cudamat* off_x, cudamat* off_y, int source_w, int target_w, int num_channels) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
hipLaunchKernelGGL(( kGenerateTranslationsBigVarOff), dim3(kernelBlockGrid), dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, off_x->data_device, off_y->data_device, source_w, target_w, num_channels);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int extract_patches(cudamat* images, cudamat* patches, cudamat* width_offset, cudamat* height_offset, cudamat* flip, int img_width, int img_height, int patch_width, int patch_height) {
int num_images = images->size[1];
int num_colors = images->size[0] / (img_width * img_height);
if (patches->size[1] != num_colors * patch_width * patch_height || patches->size[0] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (width_offset->size[0] * width_offset->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (height_offset->size[0] * height_offset->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (flip->size[0] * flip->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
unsigned int grid_x = patch_height / COPY_BLOCK_SIZE;
if (patch_height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = patch_width / COPY_BLOCK_SIZE;
if (patch_width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, num_images);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, num_colors);
hipLaunchKernelGGL(( kExtractPatches2), dim3(grid), dim3(threads), 0, 0,
images->data_device, patches->data_device, width_offset->data_device,
height_offset->data_device, flip->data_device, num_images, img_width, img_height,
patch_width, patch_height, num_colors);
//*/
/*
kExtractPatches<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(
images->data_device, patches->data_device, indices->data_device, width_offset->data_device,
height_offset->data_device, num_images, img_width, img_height,
patch_width, patch_height, num_colors);
*/
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int blockify(cudamat* source, cudamat* target, int blocksize) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
hipLaunchKernelGGL(( kBlockify), dim3(kernelBlockGrid), dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, source->size[0], blocksize);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int softmax(cudamat* mat, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSoftMax), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int softmax_overwrite(cudamat* mat) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSoftMaxOverwrite), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int softmax_row_major(cudamat* mat) {
return softmax_row_major_multi(mat, mat->size[1]);
}
int softmax_row_major_multi(cudamat* mat, int numslices) {
unsigned int len = mat->size[0] * mat->size[1];
unsigned int h = len / numslices;
if (len % numslices != 0)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
int shared_mem_size = 32 * sizeof(float) ;
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
hipLaunchKernelGGL(( kSoftMaxRowMajor), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, numslices, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_softmax_grad(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSoftMaxGrad), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_softmax_grad_CLS(cudamat* mat, cudamat_bbox* labels, cudamat* indices, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSoftMaxGradCLS), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0,
mat->data_device, labels->data_device.labels, indices->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_softmax_grad_row_major(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] * labels->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSoftMaxGradRowMajor), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_grad_bbox(
cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset,
cudamat* height_offset, cudamat* target, int width, int height, int depth,
float scale_width, float scale_height, int loss_function) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device || !bbox->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (loss_function == 0) {
//int grid_y = DIVUP(height, COPY_BLOCK_SIZE);
//int grid_x = DIVUP(width, COPY_BLOCK_SIZE) * h;
dim3 grid(width, height, depth);
dim3 threads(h, 1, 1);
hipLaunchKernelGGL(( kBoundingBoxLogisticGrad), dim3(grid), dim3(threads), 0, 0,
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, h, width, height, depth, scale_width,
scale_height, target->data_device);
} else {
hipLaunchKernelGGL(( kBoundingBoxSoftMaxGrad), dim3(NUM_VECTOR_OP_BLOCKS), dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0,
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, h, width, height, depth, scale_width,
scale_height, target->data_device);
}
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSoftMaxCorrect), dim3(gridDim), dim3(32), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct_row_major(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] * labels->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
hipLaunchKernelGGL(( kSoftMaxCorrectRowMajor), dim3(gridDim), dim3(32), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct_CLS(cudamat* mat, cudamat_bbox* labels, cudamat* indices, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device || !indices->on_device || !labels->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] * target->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] * indices->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
hipLaunchKernelGGL(( kSoftMaxCorrectCLS), dim3(gridDim), dim3(32), 0, 0, mat->data_device, labels->data_device.labels, indices->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct_row_major_bbox(cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset, cudamat* height_offset, cudamat* target, int width, int height, int depth, float scale_width, float scale_height) {
unsigned int h = mat->size[0] * width * height;
if (!mat->on_device || !target->on_device || !bbox->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] * target->size[1] != h) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
hipLaunchKernelGGL(( kSoftMaxCorrectBoundingBox), dim3(gridDim), dim3(32), 0, 0,
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, mat->size[0], width,
height, depth, scale_width, scale_height, target->data_device);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_logistic_correct_row_major_bbox(cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset, cudamat* height_offset, cudamat* target, int width, int height, int depth, float scale_width, float scale_height, float cutoff) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device || !bbox->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int grid_y = DIVUP(height, COPY_BLOCK_SIZE);
int grid_x = DIVUP(width, COPY_BLOCK_SIZE) * h;
dim3 grid(grid_x, grid_y, depth);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( kLogisticCorrectBoundingBox), dim3(grid), dim3(threads), 0, 0,
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, h, width, height, depth, scale_width,
scale_height, target->data_device, cutoff);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int accumulate_columns(cudamat* mat, cudamat* indices, cudamat* target, float mult, int avg) {
unsigned int h = mat->size[0],
w = mat->size[1],
w2 = target->size[1];
if (!mat->on_device || !indices->on_device|| !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (NUM_VECTOR_OP_THREADS_PER_BLOCK < w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAccumulateColumns), dim3(h), dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, indices->data_device, target->data_device, w, w2, h, mult, avg);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_cross_entropy(cudamat* mat, cudamat* labels, cudamat* target, float tiny) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSoftMaxCrossEntropy), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_cross_entropy_row_major(cudamat* mat, cudamat* labels, cudamat* target, float tiny) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != h || labels->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSoftMaxCrossEntropyRowMajor), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int expand(cudamat* source, cudamat* indices, cudamat* target){
unsigned int h = source->size[0],
w = source->size[1],
w2 = target->size[1];
if (!source->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kExpand), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, source->data_device, indices->data_device, target->data_device, h, w, w2);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int expand_and_add(cudamat* source, cudamat* mat, cudamat* indices, cudamat* target, float mult){
unsigned int h = source->size[0],
w = source->size[1],
w2 = mat->size[1];
if (!source->on_device || !mat->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kExpandAndAdd), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, source->data_device, mat->data_device, indices->data_device, target->data_device, w, h, mult, w2);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int adagrad(cudamat* w, cudamat* grad, cudamat* sum_grad_sq, float decay, float epsilon) {
int len = w->size[0] * w->size[1];
int trans = w->is_trans;
if (!w->on_device || !grad->on_device || !sum_grad_sq->on_device)
return ERROR_NOT_ON_DEVICE;
if (trans != grad->is_trans || trans != sum_grad_sq->is_trans)
return ERROR_TRANSPOSEDNESS;
if (len != grad->size[0] * grad->size[1] || len != sum_grad_sq->size[0] * sum_grad_sq->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAdagrad), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, w->data_device, grad->data_device, sum_grad_sq->data_device, len, decay, epsilon);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
}
| 98e0a1543849a44b0fd5818094a3373a1d089c0d.cu | /*
Copyright (c) 2009,2010, Volodymyr Mnih
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that
the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cublas.h>
#include <math.h>
#include "rnd_multipliers_32bit.h"
#include "cudamat_kernels.cuh"
extern "C" {
#include "cudamat.cuh"
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = cublasGetError();
return status != CUBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
printf("%s\n", cudaGetErrorString( err));
return cudaSuccess != err;
}
const char* get_last_cuda_error() {
cudaError_t err = cudaGetLastError();
return cudaGetErrorString( err);
}
int cublas_init() {
cublasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
int cublas_shutdown() {
cublasShutdown();
cudaThreadExit();
return 0;
}
int cuda_record_event(cudaEvent_t* t) {
cudaError_t err = cudaEventRecord(*t, 0);
if (cudaSuccess != err) {
printf("%s\n", cudaGetErrorString( err));
}
return cudaSuccess != err;
}
int cuda_synchronize_event(cudaEvent_t* t) {
cudaError_t err = cudaEventSynchronize(*t);
if (cudaSuccess != err) {
printf("%s\n", cudaGetErrorString( err));
}
return cudaSuccess != err;
}
int cuda_create_event(cudaEvent_t* t) {
cudaError_t err = cudaEventCreateWithFlags(t, cudaEventBlockingSync);
if (cudaSuccess != err) {
printf("%s\n", cudaGetErrorString( err));
}
return cudaSuccess != err;
}
int cuda_set_device(int deviceId) {
cudaSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
bool cuda_is_fermi(int deviceId) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, deviceId);
return prop.major >= 2;
}
int cuda_set_P2P(int gpu1, int gpu2) {
bool is_fermi = cuda_is_fermi(gpu1) && cuda_is_fermi(gpu2);
int access2from1, access1from2;
cudaDeviceCanAccessPeer(&access2from1, gpu1, gpu2);
cudaDeviceCanAccessPeer(&access1from2, gpu2, gpu1);
bool same_complex = false;
if(access2from1==1 && access1from2==1) same_complex = true;
if(is_fermi && same_complex) {
cudaSetDevice(gpu1);
cudaDeviceEnablePeerAccess(gpu2, 0); //second argument is flags
cudaSetDevice(gpu2);
cudaDeviceEnablePeerAccess(gpu1, 0); //second argument is flags
return 0;
} else {
return CUDA_ERROR;
}
}
int init_random(rnd_struct* rnd_state, int seed, const char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
for (int i = 0; i < NUM_RND_STREAMS; i++) {
host_mults[i] = _rand_words[i];
}
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
free(host_mults);
//cudaMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//cudaMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//cudaMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
kSeedRandom<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, seed);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
// Allocates and gives up ownership of pointer. Caller must free.
int get_rnd_state(rnd_struct* rnd_state, unsigned long long* host_words_out, int *size_out) {
*size_out = NUM_RND_STREAMS;
host_words_out = (unsigned long long*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
if (host_words_out == NULL) {
return ERROR_GENERIC; // Out of memory.
}
cublasGetVector(NUM_RND_STREAMS, sizeof(unsigned long long), rnd_state->dev_words, 1, host_words_out, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
int init_random_from_state(rnd_struct* rnd_state, unsigned long long* host_words, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
if (cudamatpath == NULL) {
pFile = fopen ("/u/nitish/deepnet/cudamat/rnd_multipliers_32bit.txt","r");
} else {
pFile = fopen (cudamatpath,"r");
}
if (pFile == NULL) {
printf("Error: Missing rnd_multipliers_32bit.txt file\n");
return 1;
}
for (int i = 0; i < NUM_RND_STREAMS; i++) {
int r = fscanf (pFile, "%u", &host_mults[i]);
if (r != 1) return ERROR_GENERIC;
}
fclose (pFile);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_words, 1, rnd_state->dev_words, 1);
free(host_mults);
cudaDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
void cuda_sync_threads() {
cudaDeviceSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = cublasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
int allocate_device_memory_bbox(cudamat_bbox* mat) {
int size = mat->size;
int numboxes = mat->numboxes;
cublasStatus stat;
stat = cublasAlloc(size, sizeof(int), (void**)&mat->data_device.seg);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(numboxes, sizeof(int), (void**)&mat->data_device.labels);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(4 * numboxes, sizeof(int), (void**)&mat->data_device.boxes);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
int allocate_device_memory_sparse(cudamat_sparse* mat) {
int nnz = mat->nnz, rows = mat->size[0];
cublasStatus stat;
stat = cublasAlloc(nnz, sizeof(mat->data_device.data[0]), (void**)&mat->data_device.data);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(nnz, sizeof(mat->data_device.indices[0]), (void**)&mat->data_device.indices);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(rows + 1, sizeof(mat->data_device.indptr[0]), (void**)&mat->data_device.indptr);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
int copy_to_host_slice(cudamat* mat, int start, int end) {
if (start >= end || end > mat->size[1])
return ERROR_GENERIC;
int len = mat->size[0] * (end - start);
int offset = mat->size[0] * start;
if (mat->on_device) {
cublasGetVector(len, sizeof(mat->data_host[0]), mat->data_device + offset, 1, mat->data_host + offset, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
cublasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
int copy_bbox_to_host(cudamat_bbox* mat) {
if (mat->on_device) {
cublasGetVector(mat->size, sizeof(int), mat->data_device.seg, 1, mat->data_host.seg, 1);
cublasGetVector(mat->numboxes, sizeof(int), mat->data_device.labels, 1, mat->data_host.labels, 1);
cublasGetVector(4 * mat->numboxes, sizeof(int), mat->data_device.boxes, 1, mat->data_host.boxes, 1);
if (check_cublas_error()) return CUBLAS_ERROR;
} else {
return ERROR_NOT_ON_DEVICE;
}
return 0;
}
int copy_to_device_slice(cudamat* mat, int start, int end) {
if (end <= start || end > mat->size[1])
return ERROR_GENERIC;
int len = mat->size[0] * (end - start);
int err_code = 0;
int offset = mat->size[0] * start;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host[0]), mat->data_host + offset, 1, mat->data_device + offset, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int copy_bbox_to_device(cudamat_bbox* mat) {
int size = mat->size;
int numboxes = mat->numboxes;
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory_bbox(mat);
if (err_code)
return err_code;
}
cublasSetVector(size, sizeof(int), mat->data_host.seg, 1, mat->data_device.seg, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(numboxes, sizeof(int), mat->data_host.labels, 1, mat->data_device.labels, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(4 * numboxes, sizeof(int), mat->data_host.boxes, 1, mat->data_device.boxes, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int copy_sparse_to_device(cudamat_sparse* mat) {
int len = mat->nnz, rows = mat->size[0];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory_sparse(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host.data[0]), mat->data_host.data, 1, mat->data_device.data, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(len, sizeof(mat->data_host.indices[0]), mat->data_host.indices, 1, mat->data_device.indices, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(rows + 1, sizeof(mat->data_host.indptr[0]), mat->data_host.indptr, 1, mat->data_device.indptr, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
// mat 1 : source
// mat 2 : dest
int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cudaMemcpy(mat2->data_device, mat1->data_device, len * sizeof(float), cudaMemcpyDefault);
//cublasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kGetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kSetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kTranspose<<< grid, threads >>>(target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = cublasFree(mat->data_device);
mat->on_device = 0;
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
int free_device_memory_bbox(cudamat_bbox* mat) {
if (mat->on_device) {
cublasStatus stat;
stat = cublasFree(mat->data_device.seg);
stat = cublasFree(mat->data_device.labels);
stat = cublasFree(mat->data_device.boxes);
mat->on_device = 0;
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
int set_shape(cudamat* mat, unsigned int m, unsigned int n) {
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
int reshape(cudamat* mat, int m, int n) {
if (m < 0 && n < 0)
return ERROR_GENERIC;
if (m < 0)
m = (mat->size[0] * mat->size[1]) / n;
if (n < 0)
n = (mat->size[0] * mat->size[1]) / m;
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = source->data_host + first_col * num_rows;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector.
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
void init_from_sparse_array(cudamat_sparse* mat, float* data, int* indices, int* indptr, int m, int n, int nnz) {
mat->data_host.data = data;
mat->data_host.indices = indices;
mat->data_host.indptr = indptr;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
mat->nnz = nnz;
}
void set_on_device(cudamat* mat) {
mat->on_device = 1;
}
int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomUniform<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_bernoulli(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleBernoulli<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_bernoulli_tanh(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleBernoulliTanh<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_poisson(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSamplePoisson<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_gaussian(rnd_struct* rnd_state, cudamat* mat, cudamat* target, float mult) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len, mult);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int perturb_energy(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kPerturbEnergy<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int perturb_prob(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kPerturbProb<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int dropout(rnd_struct* rnd_state, cudamat* mat, float dropprob, float val, float scale) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomDropout<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len, dropprob, val, scale);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int gaussian_dropout(rnd_struct* rnd_state, cudamat* mat, float scale) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomGaussianDropout<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len, scale);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultDiagonalScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddDiagonalScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultDiagonal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddDiagonal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_row_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int div_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int div_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanEq<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanEqScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanEq<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int upper_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBound<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int lower_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLowerBound<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanEqScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int upper_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBoundScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int upper_bound_mod_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBoundModScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int lower_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLowerBoundScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kMaxColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int choose_max_and_accumulate(cudamat* mat, cudamat* acc) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !acc->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (acc->size[0] != mat->size[0] || acc->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kChooseMaxAndAccumulate<<<gridDim,32>>>(mat->data_device, acc->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int choose_max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kChooseMaxColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int argmax_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kArgMaxColumnwise<<<gridDim,32>>>(mat->data_device, target->data_device, w, h);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int sqsum_by_axis(cudamat* mat, cudamat* target, int axis, float mult, float p) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = (w + w1 - 1) / w1;
dim3 gridDim(w1, w2, 1);
kSqSumColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h, mult, p);
} else if (axis == 1) {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int h1 = floor(sqrt(h));
int h2 = (h + h1 - 1) / h1;
dim3 gridDim(h1, h2, 1);
kSqSumRowwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h, mult, p);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int normlimit_by_axis(cudamat* mat, cudamat* target, int axis,
float norm, int constraint) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
if (axis == 0) {
int w1 = floor(sqrt(w));
int w2 = DIVUP(w, w1);
dim3 gridDim(w1, w2, 1);
kNormLimitColumnwise<<<gridDim,32, shared_mem_size>>>(mat->data_device, target->data_device, norm, w, h, constraint);
} else {
int h1 = floor(sqrt(h));
int h2 = DIVUP(h, h1);
dim3 gridDim(h1, h2, 1);
kNormLimitRowwise<<<gridDim,32, shared_mem_size>>>(mat->data_device, target->data_device, norm, w, h, constraint);
}
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_cos(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyCos<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sin(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySin<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySigmoid<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyTanh<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyAbs<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyLog1PlusExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_log(cudamat* mat, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLog<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_ceil(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCeil<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_floor(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kFloor<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSqrt<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPow<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPowMatrix<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int compute_cross_entropy(cudamat* dist1, cudamat* dist2, cudamat* target, float tiny) {
unsigned int len = dist1->size[0] * dist1->size[1];
if (!dist1->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (dist1->size[0] != target->size[0] || dist1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (dist1->size[0] != dist2->size[0] || dist1->size[1] != dist2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCrossEntropy<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(dist1->data_device, dist2->data_device, target->data_device, len, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int compute_cross_entropy_bernoulli(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCrossEntropyBernoulli<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int correct_preds(cudamat* mat, cudamat* pow, cudamat* target, float cutoff) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCorrectPreds<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, cutoff);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kReciprocal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
cublasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int sparse_dot(cudamat_sparse* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
int m = mat1->size[0],
k = mat1->size[1],
k2 = mat2->size[0],
n = mat2->size[1];
if (k != k2) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
unsigned int grid_x = m / COPY_BLOCK_SIZE;
if (m % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = n / COPY_BLOCK_SIZE;
if (n % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kSparseDot<<<grid, threads>>>(m, n, k, mat1->data_device.data,
mat1->data_device.indptr,
mat1->data_device.indices,
mat2->data_device, target->data_device, beta, alpha);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat2->size[0] * mat2->size[1] != len) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = cublasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int add_mult_sign(cudamat* mat1, cudamat* mat2, float mult) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddMultSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, len, mult);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSubtract<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivide<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sin_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSinDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_cos_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCosDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_logistic_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLogisticDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// mat1 - output of network
// mat2 - target
// out_grad - output gradient
int apply_logistic_grad(cudamat* mat1, cudamat* mat2, cudamat* out_grad) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !out_grad->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != out_grad->size[0] || mat1->size[1] != out_grad->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLogisticGrad<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, out_grad->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// mat1 - output of network
// mat2 - target
// out - .
int get_logistic_correct_normalized(cudamat* mat1, cudamat* mat2, cudamat* out) {
if (!mat1->on_device || !mat2->on_device || !out->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != out->size[0] || 1 != out->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_blocks = DIVUP(mat1->size[0], NUM_VECTOR_OP_THREADS_PER_BLOCK);
kLogisticCorrectNormalized<<<num_blocks, NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, out->data_device, mat1->size[0], mat1->size[1]);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_tanh_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kTanhDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_rectified_linear_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kRectifiedLinearDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_rectified_linear_smooth_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kRectifiedLinearSmoothDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kAssignScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivideScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
float res = cublasSnrm2(len, mat->data_device, 1);
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
kSelectRows<<<gridDim, blockDim>>>(source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int swapColumns(cudamat* source, cudamat* target, cudamat* indices1, cudamat* indices2){
const int cols = indices1->size[1]*indices1->size[0],
h = source->size[0],
w = source->size[1];
kSwapColumns<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, target->data_device, indices1->data_device, indices2->data_device, cols, w, h);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int shuffleColumns(cudamat* source, cudamat* rand_perm_indices) {
const int h = source->size[0],
w = source->size[1];
if (rand_perm_indices->size[0] != 1 || rand_perm_indices->size[1] != w) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
kShuffleColumns<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, source->data_device, rand_perm_indices->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
kSetSelectedRows<<<gridDim, blockDim>>>(target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int generate_translations_big_var_off(cudamat* source, cudamat* target, cudamat* off_x, cudamat* off_y, int source_w, int target_w, int num_channels) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
kGenerateTranslationsBigVarOff<<<kernelBlockGrid, kernelBlockDim>>>(source->data_device, target->data_device, off_x->data_device, off_y->data_device, source_w, target_w, num_channels);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int extract_patches(cudamat* images, cudamat* patches, cudamat* width_offset, cudamat* height_offset, cudamat* flip, int img_width, int img_height, int patch_width, int patch_height) {
int num_images = images->size[1];
int num_colors = images->size[0] / (img_width * img_height);
if (patches->size[1] != num_colors * patch_width * patch_height || patches->size[0] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (width_offset->size[0] * width_offset->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (height_offset->size[0] * height_offset->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (flip->size[0] * flip->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
unsigned int grid_x = patch_height / COPY_BLOCK_SIZE;
if (patch_height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = patch_width / COPY_BLOCK_SIZE;
if (patch_width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, num_images);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, num_colors);
kExtractPatches2<<<grid, threads>>>(
images->data_device, patches->data_device, width_offset->data_device,
height_offset->data_device, flip->data_device, num_images, img_width, img_height,
patch_width, patch_height, num_colors);
//*/
/*
kExtractPatches<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(
images->data_device, patches->data_device, indices->data_device, width_offset->data_device,
height_offset->data_device, num_images, img_width, img_height,
patch_width, patch_height, num_colors);
*/
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int blockify(cudamat* source, cudamat* target, int blocksize) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
kBlockify<<<kernelBlockGrid, kernelBlockDim>>>(source->data_device, target->data_device, source->size[0], blocksize);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int softmax(cudamat* mat, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMax<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int softmax_overwrite(cudamat* mat) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMaxOverwrite<<<gridDim, 32, shared_mem_size>>>(mat->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int softmax_row_major(cudamat* mat) {
return softmax_row_major_multi(mat, mat->size[1]);
}
int softmax_row_major_multi(cudamat* mat, int numslices) {
unsigned int len = mat->size[0] * mat->size[1];
unsigned int h = len / numslices;
if (len % numslices != 0)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
int shared_mem_size = 32 * sizeof(float) ;
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
kSoftMaxRowMajor<<<gridDim, 32, shared_mem_size>>>(mat->data_device, numslices, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_softmax_grad(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxGrad<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_softmax_grad_CLS(cudamat* mat, cudamat_bbox* labels, cudamat* indices, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxGradCLS<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(
mat->data_device, labels->data_device.labels, indices->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_softmax_grad_row_major(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] * labels->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxGradRowMajor<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_grad_bbox(
cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset,
cudamat* height_offset, cudamat* target, int width, int height, int depth,
float scale_width, float scale_height, int loss_function) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device || !bbox->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (loss_function == 0) {
//int grid_y = DIVUP(height, COPY_BLOCK_SIZE);
//int grid_x = DIVUP(width, COPY_BLOCK_SIZE) * h;
dim3 grid(width, height, depth);
dim3 threads(h, 1, 1);
kBoundingBoxLogisticGrad<<<grid, threads>>>(
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, h, width, height, depth, scale_width,
scale_height, target->data_device);
} else {
kBoundingBoxSoftMaxGrad<<<NUM_VECTOR_OP_BLOCKS, NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, h, width, height, depth, scale_width,
scale_height, target->data_device);
}
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMaxCorrect<<<gridDim, 32>>>(mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct_row_major(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] * labels->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
kSoftMaxCorrectRowMajor<<<gridDim, 32>>>(mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct_CLS(cudamat* mat, cudamat_bbox* labels, cudamat* indices, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device || !indices->on_device || !labels->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] * target->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] * indices->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
kSoftMaxCorrectCLS<<<gridDim, 32>>>(mat->data_device, labels->data_device.labels, indices->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct_row_major_bbox(cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset, cudamat* height_offset, cudamat* target, int width, int height, int depth, float scale_width, float scale_height) {
unsigned int h = mat->size[0] * width * height;
if (!mat->on_device || !target->on_device || !bbox->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] * target->size[1] != h) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
kSoftMaxCorrectBoundingBox<<<gridDim, 32>>>(
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, mat->size[0], width,
height, depth, scale_width, scale_height, target->data_device);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_logistic_correct_row_major_bbox(cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset, cudamat* height_offset, cudamat* target, int width, int height, int depth, float scale_width, float scale_height, float cutoff) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device || !bbox->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int grid_y = DIVUP(height, COPY_BLOCK_SIZE);
int grid_x = DIVUP(width, COPY_BLOCK_SIZE) * h;
dim3 grid(grid_x, grid_y, depth);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kLogisticCorrectBoundingBox<<<grid, threads>>>(
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, h, width, height, depth, scale_width,
scale_height, target->data_device, cutoff);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int accumulate_columns(cudamat* mat, cudamat* indices, cudamat* target, float mult, int avg) {
unsigned int h = mat->size[0],
w = mat->size[1],
w2 = target->size[1];
if (!mat->on_device || !indices->on_device|| !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (NUM_VECTOR_OP_THREADS_PER_BLOCK < w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAccumulateColumns<<<h, NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, indices->data_device, target->data_device, w, w2, h, mult, avg);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_cross_entropy(cudamat* mat, cudamat* labels, cudamat* target, float tiny) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxCrossEntropy<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_cross_entropy_row_major(cudamat* mat, cudamat* labels, cudamat* target, float tiny) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != h || labels->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxCrossEntropyRowMajor<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int expand(cudamat* source, cudamat* indices, cudamat* target){
unsigned int h = source->size[0],
w = source->size[1],
w2 = target->size[1];
if (!source->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExpand<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, indices->data_device, target->data_device, h, w, w2);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int expand_and_add(cudamat* source, cudamat* mat, cudamat* indices, cudamat* target, float mult){
unsigned int h = source->size[0],
w = source->size[1],
w2 = mat->size[1];
if (!source->on_device || !mat->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExpandAndAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, mat->data_device, indices->data_device, target->data_device, w, h, mult, w2);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int adagrad(cudamat* w, cudamat* grad, cudamat* sum_grad_sq, float decay, float epsilon) {
int len = w->size[0] * w->size[1];
int trans = w->is_trans;
if (!w->on_device || !grad->on_device || !sum_grad_sq->on_device)
return ERROR_NOT_ON_DEVICE;
if (trans != grad->is_trans || trans != sum_grad_sq->is_trans)
return ERROR_TRANSPOSEDNESS;
if (len != grad->size[0] * grad->size[1] || len != sum_grad_sq->size[0] * sum_grad_sq->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAdagrad<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(w->data_device, grad->data_device, sum_grad_sq->data_device, len, decay, epsilon);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
}
|
fad942e3a46d0b2ba35feda6013c9ec0516817cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#ifdef __cplusplus
extern "C" {
#endif
//========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// COMMON
//======================================================================================================================================================150
#include "../common.h" // (in main program directory) needed to recognized input variables
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "../util/cuda/cuda.h" // (in path specified to compiler) needed by for device functions
#include "../util/timer/timer.h" // (in path specified to compiler) needed by timer
//======================================================================================================================================================150
// KERNEL
//======================================================================================================================================================150
#include "./kernel_gpu_cuda.cu" // (in current directory) GPU kernel, cannot include with header file because of complications with passing of constant memory variables
//======================================================================================================================================================150
// HEADER
//======================================================================================================================================================150
#include "./kernel_gpu_cuda_wrapper.h" // (in current directory)
//========================================================================================================================================================================================================200
// KERNEL_GPU_CUDA_WRAPPER FUNCTION
//========================================================================================================================================================================================================200
void
kernel_gpu_cuda_wrapper(record *records,
long records_mem,
knode *knodes,
long knodes_elem,
long knodes_mem,
int order,
long maxheight,
int count,
long *currKnode,
long *offset,
int *keys,
record *ans)
{
//======================================================================================================================================================150
// CPU VARIABLES
//======================================================================================================================================================150
// timer
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
time0 = get_time();
//======================================================================================================================================================150
// GPU SETUP
//======================================================================================================================================================150
//====================================================================================================100
// INITIAL DRIVER OVERHEAD
//====================================================================================================100
hipDeviceSynchronize();
//====================================================================================================100
// EXECUTION PARAMETERS
//====================================================================================================100
int numBlocks;
numBlocks = count; // max # of blocks can be 65,535
int threadsPerBlock;
threadsPerBlock = order < 1024 ? order : 1024;
printf("# of blocks = %d, # of threads/block = %d (ensure that device can handle)\n", numBlocks, threadsPerBlock);
time1 = get_time();
//======================================================================================================================================================150
// GPU MEMORY (MALLOC)
//======================================================================================================================================================150
//====================================================================================================100
// DEVICE IN
//====================================================================================================100
//==================================================50
// recordsD
//==================================================50
record *recordsD;
hipMalloc((void**)&recordsD, records_mem);
checkCUDAError("hipMalloc recordsD");
//==================================================50
// knodesD
//==================================================50
knode *knodesD;
hipMalloc((void**)&knodesD, knodes_mem);
checkCUDAError("hipMalloc recordsD");
//==================================================50
// currKnodeD
//==================================================50
long *currKnodeD;
hipMalloc((void**)&currKnodeD, count*sizeof(long));
checkCUDAError("hipMalloc currKnodeD");
//==================================================50
// offsetD
//==================================================50
long *offsetD;
hipMalloc((void**)&offsetD, count*sizeof(long));
checkCUDAError("hipMalloc offsetD");
//==================================================50
// keysD
//==================================================50
int *keysD;
hipMalloc((void**)&keysD, count*sizeof(int));
checkCUDAError("hipMalloc keysD");
//====================================================================================================100
// DEVICE IN/OUT
//====================================================================================================100
//==================================================50
// ansD
//==================================================50
record *ansD;
hipMalloc((void**)&ansD, count*sizeof(record));
checkCUDAError("hipMalloc ansD");
time2 = get_time();
//======================================================================================================================================================150
// GPU MEMORY COPY
//======================================================================================================================================================150
//====================================================================================================100
// GPU MEMORY (MALLOC) COPY IN
//====================================================================================================100
//==================================================50
// recordsD
//==================================================50
hipMemcpy(recordsD, records, records_mem, hipMemcpyHostToDevice);
checkCUDAError("hipMalloc hipMemcpy memD");
//==================================================50
// knodesD
//==================================================50
hipMemcpy(knodesD, knodes, knodes_mem, hipMemcpyHostToDevice);
checkCUDAError("hipMalloc hipMemcpy memD");
//==================================================50
// currKnodeD
//==================================================50
hipMemcpy(currKnodeD, currKnode, count*sizeof(long), hipMemcpyHostToDevice);
checkCUDAError("hipMalloc hipMemcpy currKnodeD");
//==================================================50
// offsetD
//==================================================50
hipMemcpy(offsetD, offset, count*sizeof(long), hipMemcpyHostToDevice);
checkCUDAError("hipMalloc hipMemcpy offsetD");
//==================================================50
// keysD
//==================================================50
hipMemcpy(keysD, keys, count*sizeof(int), hipMemcpyHostToDevice);
checkCUDAError("hipMalloc hipMemcpy keysD");
//====================================================================================================100
// DEVICE IN/OUT
//====================================================================================================100
//==================================================50
// ansD
//==================================================50
hipMemcpy(ansD, ans, count*sizeof(record), hipMemcpyHostToDevice);
checkCUDAError("hipMalloc hipMemcpy ansD");
time3 = get_time();
//======================================================================================================================================================150
// findK kernel
//======================================================================================================================================================150
allocateReadWriteSets(numBlocks, threadsPerBlock);
hipLaunchKernelGGL(( findK), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, maxheight,
knodesD,
knodes_elem,
recordsD,
currKnodeD,
offsetD,
keysD,
ansD);
freeReadWriteSets(numBlocks, threadsPerBlock);
hipDeviceSynchronize();
checkCUDAError("findK");
time4 = get_time();
//======================================================================================================================================================150
// GPU MEMORY COPY (CONTD.)
//======================================================================================================================================================150
//====================================================================================================100
// DEVICE IN/OUT
//====================================================================================================100
//==================================================50
// ansD
//==================================================50
hipMemcpy(ans, ansD, count*sizeof(record), hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy ansD");
time5 = get_time();
//======================================================================================================================================================150
// GPU MEMORY DEALLOCATION
//======================================================================================================================================================150
hipFree(recordsD);
hipFree(knodesD);
hipFree(currKnodeD);
hipFree(offsetD);
hipFree(keysD);
hipFree(ansD);
time6 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of GPU_CUDA KERNEL:\n");
printf("%15.12f s, %15.12f % : GPU: SET DEVICE / DRIVER INIT\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: ALO\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: COPY IN\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU: KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: COPY OUT\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: FRE\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time6-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time6-time0) / 1000000);
//========================================================================================================================================================================================================200
// End
//========================================================================================================================================================================================================200
}
//========================================================================================================================================================================================================200
// END
//========================================================================================================================================================================================================200
#ifdef __cplusplus
}
#endif
| fad942e3a46d0b2ba35feda6013c9ec0516817cd.cu | #include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#include<curd_lib_host.h>
#ifdef __cplusplus
extern "C" {
#endif
//========================================================================================================================================================================================================200
// DEFINE/INCLUDE
//========================================================================================================================================================================================================200
//======================================================================================================================================================150
// COMMON
//======================================================================================================================================================150
#include "../common.h" // (in main program directory) needed to recognized input variables
//======================================================================================================================================================150
// UTILITIES
//======================================================================================================================================================150
#include "../util/cuda/cuda.h" // (in path specified to compiler) needed by for device functions
#include "../util/timer/timer.h" // (in path specified to compiler) needed by timer
//======================================================================================================================================================150
// KERNEL
//======================================================================================================================================================150
#include "./kernel_gpu_cuda.cu" // (in current directory) GPU kernel, cannot include with header file because of complications with passing of constant memory variables
//======================================================================================================================================================150
// HEADER
//======================================================================================================================================================150
#include "./kernel_gpu_cuda_wrapper.h" // (in current directory)
//========================================================================================================================================================================================================200
// KERNEL_GPU_CUDA_WRAPPER FUNCTION
//========================================================================================================================================================================================================200
void
kernel_gpu_cuda_wrapper(record *records,
long records_mem,
knode *knodes,
long knodes_elem,
long knodes_mem,
int order,
long maxheight,
int count,
long *currKnode,
long *offset,
int *keys,
record *ans)
{
//======================================================================================================================================================150
// CPU VARIABLES
//======================================================================================================================================================150
// timer
long long time0;
long long time1;
long long time2;
long long time3;
long long time4;
long long time5;
long long time6;
time0 = get_time();
//======================================================================================================================================================150
// GPU SETUP
//======================================================================================================================================================150
//====================================================================================================100
// INITIAL DRIVER OVERHEAD
//====================================================================================================100
cudaThreadSynchronize();
//====================================================================================================100
// EXECUTION PARAMETERS
//====================================================================================================100
int numBlocks;
numBlocks = count; // max # of blocks can be 65,535
int threadsPerBlock;
threadsPerBlock = order < 1024 ? order : 1024;
printf("# of blocks = %d, # of threads/block = %d (ensure that device can handle)\n", numBlocks, threadsPerBlock);
time1 = get_time();
//======================================================================================================================================================150
// GPU MEMORY (MALLOC)
//======================================================================================================================================================150
//====================================================================================================100
// DEVICE IN
//====================================================================================================100
//==================================================50
// recordsD
//==================================================50
record *recordsD;
cudaMalloc((void**)&recordsD, records_mem);
checkCUDAError("cudaMalloc recordsD");
//==================================================50
// knodesD
//==================================================50
knode *knodesD;
cudaMalloc((void**)&knodesD, knodes_mem);
checkCUDAError("cudaMalloc recordsD");
//==================================================50
// currKnodeD
//==================================================50
long *currKnodeD;
cudaMalloc((void**)&currKnodeD, count*sizeof(long));
checkCUDAError("cudaMalloc currKnodeD");
//==================================================50
// offsetD
//==================================================50
long *offsetD;
cudaMalloc((void**)&offsetD, count*sizeof(long));
checkCUDAError("cudaMalloc offsetD");
//==================================================50
// keysD
//==================================================50
int *keysD;
cudaMalloc((void**)&keysD, count*sizeof(int));
checkCUDAError("cudaMalloc keysD");
//====================================================================================================100
// DEVICE IN/OUT
//====================================================================================================100
//==================================================50
// ansD
//==================================================50
record *ansD;
cudaMalloc((void**)&ansD, count*sizeof(record));
checkCUDAError("cudaMalloc ansD");
time2 = get_time();
//======================================================================================================================================================150
// GPU MEMORY COPY
//======================================================================================================================================================150
//====================================================================================================100
// GPU MEMORY (MALLOC) COPY IN
//====================================================================================================100
//==================================================50
// recordsD
//==================================================50
cudaMemcpy(recordsD, records, records_mem, cudaMemcpyHostToDevice);
checkCUDAError("cudaMalloc cudaMemcpy memD");
//==================================================50
// knodesD
//==================================================50
cudaMemcpy(knodesD, knodes, knodes_mem, cudaMemcpyHostToDevice);
checkCUDAError("cudaMalloc cudaMemcpy memD");
//==================================================50
// currKnodeD
//==================================================50
cudaMemcpy(currKnodeD, currKnode, count*sizeof(long), cudaMemcpyHostToDevice);
checkCUDAError("cudaMalloc cudaMemcpy currKnodeD");
//==================================================50
// offsetD
//==================================================50
cudaMemcpy(offsetD, offset, count*sizeof(long), cudaMemcpyHostToDevice);
checkCUDAError("cudaMalloc cudaMemcpy offsetD");
//==================================================50
// keysD
//==================================================50
cudaMemcpy(keysD, keys, count*sizeof(int), cudaMemcpyHostToDevice);
checkCUDAError("cudaMalloc cudaMemcpy keysD");
//====================================================================================================100
// DEVICE IN/OUT
//====================================================================================================100
//==================================================50
// ansD
//==================================================50
cudaMemcpy(ansD, ans, count*sizeof(record), cudaMemcpyHostToDevice);
checkCUDAError("cudaMalloc cudaMemcpy ansD");
time3 = get_time();
//======================================================================================================================================================150
// findK kernel
//======================================================================================================================================================150
allocateReadWriteSets(numBlocks, threadsPerBlock);
findK<<<numBlocks, threadsPerBlock>>>( maxheight,
knodesD,
knodes_elem,
recordsD,
currKnodeD,
offsetD,
keysD,
ansD);
freeReadWriteSets(numBlocks, threadsPerBlock);
cudaThreadSynchronize();
checkCUDAError("findK");
time4 = get_time();
//======================================================================================================================================================150
// GPU MEMORY COPY (CONTD.)
//======================================================================================================================================================150
//====================================================================================================100
// DEVICE IN/OUT
//====================================================================================================100
//==================================================50
// ansD
//==================================================50
cudaMemcpy(ans, ansD, count*sizeof(record), cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy ansD");
time5 = get_time();
//======================================================================================================================================================150
// GPU MEMORY DEALLOCATION
//======================================================================================================================================================150
cudaFree(recordsD);
cudaFree(knodesD);
cudaFree(currKnodeD);
cudaFree(offsetD);
cudaFree(keysD);
cudaFree(ansD);
time6 = get_time();
//======================================================================================================================================================150
// DISPLAY TIMING
//======================================================================================================================================================150
printf("Time spent in different stages of GPU_CUDA KERNEL:\n");
printf("%15.12f s, %15.12f % : GPU: SET DEVICE / DRIVER INIT\n", (float) (time1-time0) / 1000000, (float) (time1-time0) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: ALO\n", (float) (time2-time1) / 1000000, (float) (time2-time1) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: COPY IN\n", (float) (time3-time2) / 1000000, (float) (time3-time2) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU: KERNEL\n", (float) (time4-time3) / 1000000, (float) (time4-time3) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: COPY OUT\n", (float) (time5-time4) / 1000000, (float) (time5-time4) / (float) (time6-time0) * 100);
printf("%15.12f s, %15.12f % : GPU MEM: FRE\n", (float) (time6-time5) / 1000000, (float) (time6-time5) / (float) (time6-time0) * 100);
printf("Total time:\n");
printf("%.12f s\n", (float) (time6-time0) / 1000000);
//========================================================================================================================================================================================================200
// End
//========================================================================================================================================================================================================200
}
//========================================================================================================================================================================================================200
// END
//========================================================================================================================================================================================================200
#ifdef __cplusplus
}
#endif
|
b542480d1bd287a9d5c46bb169aaa82a3cca9c87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Histogram Equalization
#include <wb.h>
#define HISTOGRAM_LENGTH 256
#define BLOCK_SIZE 256
//@@ insert code here
float clamp(float x, float start, float end)
{
if(x>end) return end;
if(x<start) return start;
return x;
}
float correct(float val, float* CDF)
{
float y = (CDF[(uint8_t)(val*255)] - CDF[0]) / (1.0f - CDF[0]);
return clamp(y, 0.0f, 1.0f);
}
__global__ void kernal_histogram(uint8_t *image, uint32_t *hist, int len)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < len)
{
unsigned int* address = &hist[image[idx]];
atomicAdd(address, 1);
}
}
int main(int argc, char ** argv) {
wbArg_t args;
int imageWidth;
int imageHeight;
int imageChannels;
wbImage_t inputImage;
wbImage_t outputImage;
float * hostInputImageData;
float * hostOutputImageData;
const char * inputImageFile;
//@@ Insert more code here
uint8_t *hostGrayScaleImageData;
uint8_t *deviceGrayScaleImageData;
uint32_t *hostHistogram;
uint32_t *deviceHistogram;
float hostCDF[HISTOGRAM_LENGTH] = {0};
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(args, 0);
wbTime_start(Generic, "Importing data and creating memory on host");
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
wbTime_stop(Generic, "Importing data and creating memory on host");
//@@ insert code here
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
hostGrayScaleImageData = (uint8_t*)malloc(imageWidth*imageHeight*sizeof(uint8_t));
wbTime_start(Generic, "Convert to gray scale on CPU");
for(int i=0;i<imageWidth*imageHeight;++i)
{
float r = hostInputImageData[i*3+0];
float g = hostInputImageData[i*3+1];
float b = hostInputImageData[i*3+2];
uint8_t y = 255.0*(0.21*r + 0.71*g + 0.07*b);
hostGrayScaleImageData[i] = y;
}
wbTime_stop(Generic, "Convert to gray scale on CPU");
wbTime_start(GPU, "Copy gray image to GPU");
hipMalloc((void**)&deviceGrayScaleImageData, imageWidth*imageHeight*sizeof(uint8_t));
hipMemcpy(
deviceGrayScaleImageData,
hostGrayScaleImageData,
imageWidth*imageHeight*sizeof(uint8_t),
hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copy gray image to GPU");
wbTime_start(GPU, "Compute histogram GPU");
hipMalloc((void**)&deviceHistogram, HISTOGRAM_LENGTH * sizeof(uint32_t));
hipMemset(deviceHistogram, 0, HISTOGRAM_LENGTH * sizeof(uint32_t));
dim3 block(BLOCK_SIZE);
dim3 grid((imageWidth*imageHeight + BLOCK_SIZE - 1)/BLOCK_SIZE);
hipLaunchKernelGGL(( kernal_histogram), dim3(grid),dim3(block), 0, 0,
deviceGrayScaleImageData,
deviceHistogram,
imageWidth*imageHeight);
wbTime_stop(GPU, "Compute histogram GPU");
wbTime_start(GPU, "Copy histogram back");
hostHistogram = (uint32_t*)malloc(HISTOGRAM_LENGTH * sizeof(uint32_t));
hipMemcpy(
hostHistogram,
deviceHistogram,
HISTOGRAM_LENGTH * sizeof(uint32_t),
hipMemcpyDeviceToHost);
wbTime_stop(GPU, "Copy histogram back");
wbTime_start(Generic, "Compute CDF on CPU");
hostCDF[0] = 1.0f*hostHistogram[0]/imageWidth/imageHeight;
for(int i=1;i<HISTOGRAM_LENGTH;++i)
hostCDF[i] = hostCDF[i-1] + 1.0f*hostHistogram[i]/imageWidth/imageHeight;
wbTime_stop(Generic, "Compute CDF on CPU");
wbTime_start(Generic, "Correct Image on CPU");
for(int i=0;i<imageWidth*imageHeight*imageChannels;++i)
{
hostOutputImageData[i] = correct(hostInputImageData[i], hostCDF);
}
wbTime_stop(Generic, "Correct Image on CPU");
wbSolution(args, outputImage);
//@@ insert code here
free(hostHistogram);
return 0;
}
| b542480d1bd287a9d5c46bb169aaa82a3cca9c87.cu | // Histogram Equalization
#include <wb.h>
#define HISTOGRAM_LENGTH 256
#define BLOCK_SIZE 256
//@@ insert code here
float clamp(float x, float start, float end)
{
if(x>end) return end;
if(x<start) return start;
return x;
}
float correct(float val, float* CDF)
{
float y = (CDF[(uint8_t)(val*255)] - CDF[0]) / (1.0f - CDF[0]);
return clamp(y, 0.0f, 1.0f);
}
__global__ void kernal_histogram(uint8_t *image, uint32_t *hist, int len)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < len)
{
unsigned int* address = &hist[image[idx]];
atomicAdd(address, 1);
}
}
int main(int argc, char ** argv) {
wbArg_t args;
int imageWidth;
int imageHeight;
int imageChannels;
wbImage_t inputImage;
wbImage_t outputImage;
float * hostInputImageData;
float * hostOutputImageData;
const char * inputImageFile;
//@@ Insert more code here
uint8_t *hostGrayScaleImageData;
uint8_t *deviceGrayScaleImageData;
uint32_t *hostHistogram;
uint32_t *deviceHistogram;
float hostCDF[HISTOGRAM_LENGTH] = {0};
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(args, 0);
wbTime_start(Generic, "Importing data and creating memory on host");
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
wbTime_stop(Generic, "Importing data and creating memory on host");
//@@ insert code here
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
hostGrayScaleImageData = (uint8_t*)malloc(imageWidth*imageHeight*sizeof(uint8_t));
wbTime_start(Generic, "Convert to gray scale on CPU");
for(int i=0;i<imageWidth*imageHeight;++i)
{
float r = hostInputImageData[i*3+0];
float g = hostInputImageData[i*3+1];
float b = hostInputImageData[i*3+2];
uint8_t y = 255.0*(0.21*r + 0.71*g + 0.07*b);
hostGrayScaleImageData[i] = y;
}
wbTime_stop(Generic, "Convert to gray scale on CPU");
wbTime_start(GPU, "Copy gray image to GPU");
cudaMalloc((void**)&deviceGrayScaleImageData, imageWidth*imageHeight*sizeof(uint8_t));
cudaMemcpy(
deviceGrayScaleImageData,
hostGrayScaleImageData,
imageWidth*imageHeight*sizeof(uint8_t),
cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copy gray image to GPU");
wbTime_start(GPU, "Compute histogram GPU");
cudaMalloc((void**)&deviceHistogram, HISTOGRAM_LENGTH * sizeof(uint32_t));
cudaMemset(deviceHistogram, 0, HISTOGRAM_LENGTH * sizeof(uint32_t));
dim3 block(BLOCK_SIZE);
dim3 grid((imageWidth*imageHeight + BLOCK_SIZE - 1)/BLOCK_SIZE);
kernal_histogram<<<grid,block>>>(
deviceGrayScaleImageData,
deviceHistogram,
imageWidth*imageHeight);
wbTime_stop(GPU, "Compute histogram GPU");
wbTime_start(GPU, "Copy histogram back");
hostHistogram = (uint32_t*)malloc(HISTOGRAM_LENGTH * sizeof(uint32_t));
cudaMemcpy(
hostHistogram,
deviceHistogram,
HISTOGRAM_LENGTH * sizeof(uint32_t),
cudaMemcpyDeviceToHost);
wbTime_stop(GPU, "Copy histogram back");
wbTime_start(Generic, "Compute CDF on CPU");
hostCDF[0] = 1.0f*hostHistogram[0]/imageWidth/imageHeight;
for(int i=1;i<HISTOGRAM_LENGTH;++i)
hostCDF[i] = hostCDF[i-1] + 1.0f*hostHistogram[i]/imageWidth/imageHeight;
wbTime_stop(Generic, "Compute CDF on CPU");
wbTime_start(Generic, "Correct Image on CPU");
for(int i=0;i<imageWidth*imageHeight*imageChannels;++i)
{
hostOutputImageData[i] = correct(hostInputImageData[i], hostCDF);
}
wbTime_stop(Generic, "Correct Image on CPU");
wbSolution(args, outputImage);
//@@ insert code here
free(hostHistogram);
return 0;
}
|
60c7a5e4251e7e28749da2731bd1d5d0e5951638.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include <ostream>
#include <fstream>
#include <sys/time.h>
#include <time.h>
using namespace std;
#define CASENAME "rk1"
#define BLOCKSIZEX 192
#define BLOCKSIZEY 1
#define UBLOCKSIZEX 32
#define UBLOCKSIZEY 10
#define HALO 2
#define XDIM 576
#define YDIM 165
#define OBSTD 20 //.f
#define OBSTX 122 //.f
#define OBSTY 72 //.f
#define TMAX 20000
#define MAXIT 5000
#define MAXRES 0.0001
#define RE 100
#define UMAX 1.f
#define BETA 0.01f //beta = 1/c^2
#define DTAU 0.003f
#define DT 1.f
#define CONV 2 //1:UDS 2:Hybrid
#define TIMEMARCH 1 //1:Explicit Euler 2:RK2
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-2*HALO)*0.5f;
float result = -1.0f*(((1.0f-(x-HALO)/radius))*((1.0f-(x-HALO)/radius))-1.0f);
return (result);
// return 1.f;
}
void AllocateArray(float ****f,int x,int y)
{
float ***array = new float **[4];
for(int i = 0;i<4;i++)
{
array[i] = new float *[x];
for(int j = 0;j<x;j++)
{
array[i][j] = new float [y];
for(int k = 0;k<y;k++)
array[i][j][k] = 0.f;
}
}
*f = array;
}
void DeallocateArray(float ***f,int x)
{
for(int i = 0;i<4;i++)
{
for(int j = 0;j<x;j++)
delete [] f[i][j];
delete [] f[i];
}
delete [] f;
}
void WriteResults(ostream &output, float *u, float *v, float *p)
{
output<<"VARIABLES = \"X\",\"Y\",\"u\",\"v\",\"p\"\n";
output<<"ZONE F=POINT, I="<<XDIM-HALO*2<<", J="<<YDIM-HALO*2<<endl;
float dx = 1;
float dy = 1;
float uval,vval,pval;
for(int j = HALO; j<YDIM-HALO; j++){
for(int i = HALO; i<XDIM-HALO; i++)
{
float xc = 0.5f*dx+(i)*dx;
float yc = 0.5f*dy+(j)*dy;
uval = 0.5f*(u[i+j*XDIM]+u[i-1 +j*XDIM]);
vval = 0.5f*(v[i+j*XDIM]+v[i+(j-1)*XDIM]);
pval = p[i+j*XDIM];
if(xc>OBSTX && xc<OBSTX+OBSTD && yc>OBSTY && yc<OBSTY+OBSTD)
{
uval = 0.f; vval = 0.f; pval = 0.f;
}
output<<xc<<", "<<yc<<", "<<uval<<", "<<vval<<", "<<pval<<endl;
}
}
}
void WriteResiduals(ostream &output, float *Res)
{
for(int i = 0; i<TMAX; i++)
output<<i<<", "<<sqrt(Res[i])/((XDIM-2*HALO)*(YDIM-2*HALO))<<endl;
}
void WriteInputs(ostream &output)
{
output<<"Domain size: \t"<<XDIM<<"x"<<YDIM<<endl;
output<<"Halo size: \t"<<HALO<<endl;
output<<"Target residual: \t"<<MAXRES<<endl;
output<<"Pseudo time step size: \t"<<DTAU<<endl;
output<<"Maximum iterations: \t"<<MAXIT<<endl;
output<<"Real time step size: \t"<<DT<<endl;
output<<"Maximum time steps: \t"<<TMAX<<endl;
output<<"Re: \t"<<RE<<endl;
output<<"uMax: \t"<<UMAX<<endl;
string scheme;
if(CONV == 0) scheme = "CDS ";
if(CONV == 1) scheme = "UDS ";
if(CONV == 2) scheme = "Hybrid";
if(CONV == 3) scheme = "QUICK ";
output<<"Convective discretization: \t"<<scheme<<endl;
}
__global__ void ACM_U_Shared_Single(float* uA, float* vA, float* Res, float* uB, float* vB, float* pB, float nu, int it, int t, size_t pitch)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int tx = threadIdx.x+1;
int ty = threadIdx.y+1;
__shared__ float u[UBLOCKSIZEX+2][UBLOCKSIZEY+2];
__shared__ float v[UBLOCKSIZEX+2][UBLOCKSIZEY+2];
//if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){
if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){
if(threadIdx.x == 0){
u[0][ty] = uB[ x-1+ y *pitch];
v[0][ty] = vB[ x-1+ y *pitch];
if(threadIdx.y == blockDim.y-1){
u[0][ty+1] = uB[ x-1+ (y+1)*pitch];
}
}
if(threadIdx.x == blockDim.x-1){
u[UBLOCKSIZEX+1][ty] = uB[ x+1+ y *pitch];
v[UBLOCKSIZEX+1][ty] = vB[ x+1+ y *pitch];
}
if(threadIdx.y == 0){
u[tx][0] = uB[ x+ (y-1)*pitch];
v[tx][0] = vB[ x+ (y-1)*pitch];
if(threadIdx.x == blockDim.x-1){
v[tx+1][0] = vB[ x+1+(y-1)*pitch];
}
}
if(threadIdx.y == blockDim.y-1){
u[tx][UBLOCKSIZEY+1] = uB[ x+ (y+1)*pitch];
v[tx][UBLOCKSIZEY+1] = vB[ x+ (y+1)*pitch];
}
u[tx][ty] = uB[ x+ (y )*pitch];
v[tx][ty] = vB[ x+ (y )*pitch];
}
syncthreads();
//if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){
if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){
float Ae,Aw,An,As;
float AP_Ue,AE_Ue,AW_Ue,AN_Ue,AS_Ue; //A coeff for East node on u of east face
float AP_Vn,AE_Vn,AW_Vn,AN_Vn,AS_Vn;
float Fe,Fw,Fn,Fs;
float De,Dw,Dn,Ds;
float B_Ue,B_Vn;
float dx = 1.f/OBSTD;
float dy = 1.f/OBSTD;
Ae = dy; Aw = dy; An = dx; As = dx;
Fe = 0.5f*(u[tx ][ty ]+u[tx+1][ty ])*Ae;
Fw = 0.5f*(u[tx-1][ty ]+u[tx ][ty ])*Aw;
Fn = 0.5f*(v[tx ][ty ]+v[tx+1][ty ])*An;
Fs = 0.5f*(v[tx ][ty-1]+v[tx+1][ty-1])*As;
De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy;
if(CONV == 1){
AE_Ue = max(-Fe,0.f)+De;
AW_Ue = max( Fw,0.f)+Dw;
AN_Ue = max(-Fn,0.f)+Dn;
AS_Ue = max( Fs,0.f)+Ds;
AP_Ue = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds;
}
else if(CONV == 2){
AE_Ue = max(-Fe*0.5f+De,max(-Fe,0.f));
AW_Ue = max( Fw*0.5f+Dw,max( Fw,0.f));
AN_Ue = max(-Fn*0.5f+Dn,max(-Fn,0.f));
AS_Ue = max( Fs*0.5f+Ds,max( Fs,0.f));
AP_Ue = AE_Ue+AW_Ue+AN_Ue+AS_Ue;
}
B_Ue = Ae*(pB[x +(y )*pitch]-pB[x+1+(y )*pitch]);
uA[ x + y *pitch] =((AE_Ue*u[tx+1][ty ]+AW_Ue*u[tx-1][ty ]
+AN_Ue*u[tx ][ty+1]+AS_Ue*u[tx ][ty-1]
-AP_Ue*u[tx ][ty ]+ B_Ue)/(dx*dy)
)*DTAU
+ u[tx ][ty ];
Fe = 0.5f*(u[tx ][ty ]+u[tx ][ty+1])*Ae;
Fw = 0.5f*(u[tx-1][ty ]+u[tx-1][ty+1])*Aw;
Fn = 0.5f*(v[tx ][ty ]+v[tx ][ty+1])*An;
Fs = 0.5f*(v[tx ][ty ]+v[tx ][ty-1])*As;
De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy;
if(CONV == 1){
AE_Vn = max(-Fe,0.f)+De;
AW_Vn = max( Fw,0.f)+Dw;
AN_Vn = max(-Fn,0.f)+Dn;
AS_Vn = max( Fs,0.f)+Ds;
AP_Vn = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds;
}
else if(CONV == 2){
AE_Vn = max(-Fe*0.5f+De,max(-Fe,0.f));
AW_Vn = max( Fw*0.5f+Dw,max( Fw,0.f));
AN_Vn = max(-Fn*0.5f+Dn,max(-Fn,0.f));
AS_Vn = max( Fs*0.5f+Ds,max( Fs,0.f));
AP_Vn = AE_Vn+AW_Vn+AN_Vn+AS_Vn;
}
B_Vn = An*(pB[x +(y )*pitch]-pB[x +(y+1)*pitch]);
vA[ x + y *pitch] =((AE_Vn*v[tx+1][ty ]+AW_Vn*v[tx-1][ty ]
+AN_Vn*v[tx ][ty+1]+AS_Vn*v[tx ][ty-1]
-AP_Vn*v[tx ][ty ]+ B_Vn)/(dx*dy)
)*DTAU
+ v[tx ][ty ];
if(y < HALO)
{
vA[ x + y *pitch] = 0.f;
uA[ x + y *pitch] = -u[tx ][ty+1];
}
if(y > YDIM-HALO-2)
{
vA[ x + y *pitch] = 0.f;
uA[ x + y *pitch] = -u[tx ][ty-1];
}
if(y > YDIM-HALO-3)
vA[ x + y *pitch] = 0.f;
if(x < HALO)
{
uA[ x + y *pitch] = UMAX*PoisProf(y);
vA[ x + y *pitch] = 0.f;
}
if(x > XDIM-HALO-2)
{
uA[ x + y *pitch] = u[tx-1][ty ];
vA[ x + y *pitch] = v[tx-1][ty ];
}
if(x > XDIM-HALO-3)
uA[ x + y *pitch] = u[tx-1][ty ];
if(x == OBSTX-1 && y>=OBSTY & y<OBSTY+OBSTD)
uA[ x + y *pitch] = 0.f;
if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD)
uA[ x + y *pitch] = 0.f;
if(y == OBSTY-1 && x>=OBSTX & x<OBSTX+OBSTD)
vA[ x + y *pitch] = 0.f;
if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD)
vA[ x + y *pitch] = 0.f;
if(x == OBSTX && y>=OBSTY & y<OBSTY+OBSTD-1)
vA[ x + y *pitch] = -v[tx-1][ty ];
if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD-1)
vA[ x + y *pitch] = -v[tx+1][ty ];
if(y == OBSTY && x>=OBSTX & x<OBSTX+OBSTD-1){
if(t < 500) uA[ x + y *pitch] = u[tx][ty-1];
else
uA[ x + y *pitch] = -u[tx ][ty-1];
}
if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD-1)
uA[ x + y *pitch] = -u[tx ][ty+1];
}
if(x == 0 && y == 0) Res[t] = 0.f;
}
__global__ void ACM_U_Shared_Single1(float* uA, float* vA, float* Res, float* uB, float* vB, float* pB, float nu, float dtau, int it, int t, size_t pitch)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int tx = threadIdx.x+1;
int ty = threadIdx.y+1;
__shared__ float u[UBLOCKSIZEX+2][UBLOCKSIZEY+2];
__shared__ float v[UBLOCKSIZEX+2][UBLOCKSIZEY+2];
//if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){
if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){
if(threadIdx.x == 0){
u[0][ty] = uB[ x-1+ y *pitch];
v[0][ty] = vB[ x-1+ y *pitch];
if(threadIdx.y == blockDim.y-1){
u[0][ty+1] = uB[ x-1+ (y+1)*pitch];
}
}
if(threadIdx.x == blockDim.x-1){
u[UBLOCKSIZEX+1][ty] = uB[ x+1+ y *pitch];
v[UBLOCKSIZEX+1][ty] = vB[ x+1+ y *pitch];
}
if(threadIdx.y == 0){
u[tx][0] = uB[ x+ (y-1)*pitch];
v[tx][0] = vB[ x+ (y-1)*pitch];
if(threadIdx.x == blockDim.x-1){
v[tx+1][0] = vB[ x+1+(y-1)*pitch];
}
}
if(threadIdx.y == blockDim.y-1){
u[tx][UBLOCKSIZEY+1] = uB[ x+ (y+1)*pitch];
v[tx][UBLOCKSIZEY+1] = vB[ x+ (y+1)*pitch];
}
u[tx][ty] = uB[ x+ (y )*pitch];
v[tx][ty] = vB[ x+ (y )*pitch];
}
syncthreads();
//if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){
if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){
float Ae,Aw,An,As;
float AP_Ue,AE_Ue,AW_Ue,AN_Ue,AS_Ue; //A coeff for East node on u of east face
float AP_Vn,AE_Vn,AW_Vn,AN_Vn,AS_Vn;
float Fe,Fw,Fn,Fs;
float De,Dw,Dn,Ds;
float B_Ue,B_Vn;
float dx = 1.f/OBSTD;
float dy = 1.f/OBSTD;
Ae = dy; Aw = dy; An = dx; As = dx;
Fe = 0.5f*(u[tx ][ty ]+u[tx+1][ty ])*Ae;
Fw = 0.5f*(u[tx-1][ty ]+u[tx ][ty ])*Aw;
Fn = 0.5f*(v[tx ][ty ]+v[tx+1][ty ])*An;
Fs = 0.5f*(v[tx ][ty-1]+v[tx+1][ty-1])*As;
De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy;
if(CONV == 1){
AE_Ue = max(-Fe,0.f)+De;
AW_Ue = max( Fw,0.f)+Dw;
AN_Ue = max(-Fn,0.f)+Dn;
AS_Ue = max( Fs,0.f)+Ds;
AP_Ue = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds;
}
else if(CONV == 2){
AE_Ue = max(-Fe*0.5f+De,max(-Fe,0.f));
AW_Ue = max( Fw*0.5f+Dw,max( Fw,0.f));
AN_Ue = max(-Fn*0.5f+Dn,max(-Fn,0.f));
AS_Ue = max( Fs*0.5f+Ds,max( Fs,0.f));
AP_Ue = AE_Ue+AW_Ue+AN_Ue+AS_Ue;
}
B_Ue = Ae*(pB[x +(y )*pitch]-pB[x+1+(y )*pitch]);
uA[ x + y *pitch] =((AE_Ue*u[tx+1][ty ]+AW_Ue*u[tx-1][ty ]
+AN_Ue*u[tx ][ty+1]+AS_Ue*u[tx ][ty-1]
-AP_Ue*u[tx ][ty ]+ B_Ue)/(dx*dy)
)*dtau
+ u[tx ][ty ];
Fe = 0.5f*(u[tx ][ty ]+u[tx ][ty+1])*Ae;
Fw = 0.5f*(u[tx-1][ty ]+u[tx-1][ty+1])*Aw;
Fn = 0.5f*(v[tx ][ty ]+v[tx ][ty+1])*An;
Fs = 0.5f*(v[tx ][ty ]+v[tx ][ty-1])*As;
De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy;
if(CONV == 1){
AE_Vn = max(-Fe,0.f)+De;
AW_Vn = max( Fw,0.f)+Dw;
AN_Vn = max(-Fn,0.f)+Dn;
AS_Vn = max( Fs,0.f)+Ds;
AP_Vn = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds;
}
else if(CONV == 2){
AE_Vn = max(-Fe*0.5f+De,max(-Fe,0.f));
AW_Vn = max( Fw*0.5f+Dw,max( Fw,0.f));
AN_Vn = max(-Fn*0.5f+Dn,max(-Fn,0.f));
AS_Vn = max( Fs*0.5f+Ds,max( Fs,0.f));
AP_Vn = AE_Vn+AW_Vn+AN_Vn+AS_Vn;
}
B_Vn = An*(pB[x +(y )*pitch]-pB[x +(y+1)*pitch]);
vA[ x + y *pitch] =((AE_Vn*v[tx+1][ty ]+AW_Vn*v[tx-1][ty ]
+AN_Vn*v[tx ][ty+1]+AS_Vn*v[tx ][ty-1]
-AP_Vn*v[tx ][ty ]+ B_Vn)/(dx*dy)
)*dtau
+ v[tx ][ty ];
if(y < HALO)
{
vA[ x + y *pitch] = 0.f;
uA[ x + y *pitch] = -u[tx ][ty+1];
}
if(y > YDIM-HALO-2)
{
vA[ x + y *pitch] = 0.f;
uA[ x + y *pitch] = -u[tx ][ty-1];
}
if(y > YDIM-HALO-3)
vA[ x + y *pitch] = 0.f;
if(x < HALO)
{
uA[ x + y *pitch] = UMAX*PoisProf(y);
vA[ x + y *pitch] = 0.f;
}
if(x > XDIM-HALO-2)
{
uA[ x + y *pitch] = u[tx-1][ty ];
vA[ x + y *pitch] = v[tx-1][ty ];
}
if(x > XDIM-HALO-3)
uA[ x + y *pitch] = u[tx-1][ty ];
if(x == OBSTX-1 && y>=OBSTY & y<OBSTY+OBSTD)
uA[ x + y *pitch] = 0.f;
if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD)
uA[ x + y *pitch] = 0.f;
if(y == OBSTY-1 && x>=OBSTX & x<OBSTX+OBSTD)
vA[ x + y *pitch] = 0.f;
if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD)
vA[ x + y *pitch] = 0.f;
if(x == OBSTX && y>=OBSTY & y<OBSTY+OBSTD-1)
vA[ x + y *pitch] = -v[tx-1][ty ];
if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD-1)
vA[ x + y *pitch] = -v[tx+1][ty ];
if(y == OBSTY && x>=OBSTX & x<OBSTX+OBSTD-1){
if(t < 500) uA[ x + y *pitch] = u[tx][ty-1];
else
uA[ x + y *pitch] = -u[tx ][ty-1];
}
if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD-1)
uA[ x + y *pitch] = -u[tx ][ty+1];
}
if(x == 0 && y == 0) Res[t] = 0.f;
}
__global__ void ACM_U_Shared_Single2(float* uA, float* vA, float* Res, float* uB, float* vB, float* pB, float* uC, float* vC, float nu, float dtau, int it, int t, size_t pitch)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int tx = threadIdx.x+1;
int ty = threadIdx.y+1;
__shared__ float u[UBLOCKSIZEX+2][UBLOCKSIZEY+2];
__shared__ float v[UBLOCKSIZEX+2][UBLOCKSIZEY+2];
//if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){
if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){
if(threadIdx.x == 0){
u[0][ty] = uB[ x-1+ y *pitch];
v[0][ty] = vB[ x-1+ y *pitch];
if(threadIdx.y == blockDim.y-1){
u[0][ty+1] = uB[ x-1+ (y+1)*pitch];
}
}
if(threadIdx.x == blockDim.x-1){
u[UBLOCKSIZEX+1][ty] = uB[ x+1+ y *pitch];
v[UBLOCKSIZEX+1][ty] = vB[ x+1+ y *pitch];
}
if(threadIdx.y == 0){
u[tx][0] = uB[ x+ (y-1)*pitch];
v[tx][0] = vB[ x+ (y-1)*pitch];
if(threadIdx.x == blockDim.x-1){
v[tx+1][0] = vB[ x+1+(y-1)*pitch];
}
}
if(threadIdx.y == blockDim.y-1){
u[tx][UBLOCKSIZEY+1] = uB[ x+ (y+1)*pitch];
v[tx][UBLOCKSIZEY+1] = vB[ x+ (y+1)*pitch];
}
u[tx][ty] = uB[ x+ (y )*pitch];
v[tx][ty] = vB[ x+ (y )*pitch];
}
syncthreads();
//if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){
if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){
float Ae,Aw,An,As;
float AP_Ue,AE_Ue,AW_Ue,AN_Ue,AS_Ue; //A coeff for East node on u of east face
float AP_Vn,AE_Vn,AW_Vn,AN_Vn,AS_Vn;
float Fe,Fw,Fn,Fs;
float De,Dw,Dn,Ds;
float B_Ue,B_Vn;
float dx = 1.f/OBSTD;
float dy = 1.f/OBSTD;
Ae = dy; Aw = dy; An = dx; As = dx;
Fe = 0.5f*(u[tx ][ty ]+u[tx+1][ty ])*Ae;
Fw = 0.5f*(u[tx-1][ty ]+u[tx ][ty ])*Aw;
Fn = 0.5f*(v[tx ][ty ]+v[tx+1][ty ])*An;
Fs = 0.5f*(v[tx ][ty-1]+v[tx+1][ty-1])*As;
De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy;
if(CONV == 1){
AE_Ue = max(-Fe,0.f)+De;
AW_Ue = max( Fw,0.f)+Dw;
AN_Ue = max(-Fn,0.f)+Dn;
AS_Ue = max( Fs,0.f)+Ds;
AP_Ue = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds;
}
else if(CONV == 2){
AE_Ue = max(-Fe*0.5f+De,max(-Fe,0.f));
AW_Ue = max( Fw*0.5f+Dw,max( Fw,0.f));
AN_Ue = max(-Fn*0.5f+Dn,max(-Fn,0.f));
AS_Ue = max( Fs*0.5f+Ds,max( Fs,0.f));
AP_Ue = AE_Ue+AW_Ue+AN_Ue+AS_Ue;
}
B_Ue = Ae*(pB[x +(y )*pitch]-pB[x+1+(y )*pitch]);
//B_Ue-=( 3.f*u[tx ][ty ]-4.f*uC[x +(y )*pitch]+uD[x +(y )*pitch])*0.5f/DT;
uA[ x + y *pitch] =((AE_Ue*u[tx+1][ty ]+AW_Ue*u[tx-1][ty ]
+AN_Ue*u[tx ][ty+1]+AS_Ue*u[tx ][ty-1]
-AP_Ue*u[tx ][ty ]+ B_Ue)/(dx*dy)
//-( 3.f*u[tx ][ty ]-4.f*uC[x +(y )*pitch]+uD[x +(y )*pitch])*0.5f/DT
//-( u[tx ][ty ]-uC[x +(y )*pitch])*0.5f/DT
)*dtau
+ uC[x+y*pitch];
Fe = 0.5f*(u[tx ][ty ]+u[tx ][ty+1])*Ae;
Fw = 0.5f*(u[tx-1][ty ]+u[tx-1][ty+1])*Aw;
Fn = 0.5f*(v[tx ][ty ]+v[tx ][ty+1])*An;
Fs = 0.5f*(v[tx ][ty ]+v[tx ][ty-1])*As;
De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy;
if(CONV == 1){
AE_Vn = max(-Fe,0.f)+De;
AW_Vn = max( Fw,0.f)+Dw;
AN_Vn = max(-Fn,0.f)+Dn;
AS_Vn = max( Fs,0.f)+Ds;
AP_Vn = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds;
}
else if(CONV == 2){
AE_Vn = max(-Fe*0.5f+De,max(-Fe,0.f));
AW_Vn = max( Fw*0.5f+Dw,max( Fw,0.f));
AN_Vn = max(-Fn*0.5f+Dn,max(-Fn,0.f));
AS_Vn = max( Fs*0.5f+Ds,max( Fs,0.f));
AP_Vn = AE_Vn+AW_Vn+AN_Vn+AS_Vn;
}
B_Vn = An*(pB[x +(y )*pitch]-pB[x +(y+1)*pitch]);
//B_Vn-=( 3.f*v[tx ][ty ]-4.f*vC[x +(y )*pitch]+vD[x +(y )*pitch])*0.5f/DT;
vA[ x + y *pitch] =((AE_Vn*v[tx+1][ty ]+AW_Vn*v[tx-1][ty ]
+AN_Vn*v[tx ][ty+1]+AS_Vn*v[tx ][ty-1]
-AP_Vn*v[tx ][ty ]+ B_Vn)/(dx*dy)
//-( 3.f*v[tx ][ty ]-4.f*vC[x +(y )*pitch]+vD[x +(y )*pitch])*0.5f/DT
//-( v[tx ][ty ]-vC[x +(y )*pitch])*0.5f/DT
)*dtau
+ vC[x+y*pitch];
if(y < HALO)
{
vA[ x + y *pitch] = 0.f;
uA[ x + y *pitch] = -u[tx ][ty+1];
}
if(y > YDIM-HALO-2)
{
vA[ x + y *pitch] = 0.f;
uA[ x + y *pitch] = -u[tx ][ty-1];
}
if(y > YDIM-HALO-3)
vA[ x + y *pitch] = 0.f;
if(x < HALO)
{
uA[ x + y *pitch] = UMAX*PoisProf(y);
vA[ x + y *pitch] = 0.f;
}
if(x > XDIM-HALO-2)
{
uA[ x + y *pitch] = u[tx-1][ty ];
vA[ x + y *pitch] = v[tx-1][ty ];
}
if(x > XDIM-HALO-3)
uA[ x + y *pitch] = u[tx-1][ty ];
if(x == OBSTX-1 && y>=OBSTY & y<OBSTY+OBSTD)
uA[ x + y *pitch] = 0.f;
if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD)
uA[ x + y *pitch] = 0.f;
if(y == OBSTY-1 && x>=OBSTX & x<OBSTX+OBSTD)
vA[ x + y *pitch] = 0.f;
if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD)
vA[ x + y *pitch] = 0.f;
if(x == OBSTX && y>=OBSTY & y<OBSTY+OBSTD-1)
vA[ x + y *pitch] = -v[tx-1][ty ];
if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD-1)
vA[ x + y *pitch] = -v[tx+1][ty ];
if(y == OBSTY && x>=OBSTX & x<OBSTX+OBSTD-1){
if(t < 500) uA[ x + y *pitch] = u[tx][ty-1];
else
uA[ x + y *pitch] = -u[tx ][ty-1];
}
if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD-1)
uA[ x + y *pitch] = -u[tx ][ty+1];
}
if(x == 0 && y == 0) Res[t] = 0.f;
}
__global__ void ACM_P(float* pA, float* Res, float* uA, float* vA, float* pB, float dtau, int it, int t, size_t pitch)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
__shared__ float sumRes[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
float dx = 1.f/OBSTD;
float dy = 1.f/OBSTD;
if(x > HALO-1 && x < XDIM-HALO && y > HALO-1 && y < YDIM-HALO){
float res = (dy*(uA[ x + y *pitch]-uA[ x-1+ y *pitch])
+dx*(vA[ x + y *pitch]-vA[ x +(y-1)*pitch]))/(dx*dy);
pA[ x + y *pitch] = -res*dtau/BETA+pB[ x + y *pitch];
if(x > HALO && x < XDIM-HALO-2 && y > HALO && y < YDIM-HALO-2
&& abs(res/UMAX)>MAXRES && !(x>= OBSTX && y>=OBSTY && x<OBSTX+OBSTD && y<OBSTY+OBSTD)){
check[0] = 1;
sumRes[threadIdx.x]=1.f;
}
else
sumRes[threadIdx.x]=0.f;
}
else{
sumRes[threadIdx.x]=0.f;
}
syncthreads();
if(check[0] == 1){
//reduction for residual
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumRes[threadIdx.x] += sumRes[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&Res[t],sumRes[0]);
}
}
}
__global__ void ACM_VelTransfer(float* uD, float* vD, float* uA, float* vA, size_t pitch)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
uD[ x + y *pitch] = uA[ x + y *pitch];
vD[ x + y *pitch] = vA[ x + y *pitch];
}
__global__ void ACM_Forces(float *FX, float *FY, float* uD, float* vD, float* pA, float nu, int t, size_t pitch, float *test)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
__shared__ float sumFX[BLOCKSIZEX],sumFY[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
sumFX[threadIdx.x] = 0.f;
sumFY[threadIdx.x] = 0.f;
syncthreads();
float dx = 1.f/OBSTD;
float dy = 1.f/OBSTD;
int a= 0;
//forces on bottom wall
if(x > OBSTX-1 && x<OBSTX+OBSTD && abs(y-OBSTY)<0.00001f){
a = 1;
check[0] = 1;
sumFY[threadIdx.x] += 0.f;//dx*pA[ x +(y-1)*pitch];
if(x<OBSTX+OBSTD-1)
sumFX[threadIdx.x] += dx*nu*2.f*uD[ x +(y )*pitch]/dy;
}
//forces on top wall
if(x > OBSTX-1 && x<OBSTX+OBSTD && abs(y-(OBSTY+OBSTD-1))<0.00001f){
a = 1;
check[0] = 1;
sumFY[threadIdx.x] -= 0.f;//dx*pA[ x +(y+1)*pitch];
if(x<OBSTX+OBSTD-1)
sumFX[threadIdx.x] += dx*nu*2.f*uD[ x +(y )*pitch]/dy;
}
//forces on left wall
if(y > OBSTY-1 && y<OBSTY+OBSTD && abs(x-OBSTX)<0.00001f){
//if(y > OBSTY-1 && y<OBSTY+OBSTD && x>OBSTX-1 && x<OBSTX+2){
//if(x>OBSTX-1.5 && x<OBSTX+1.5){
a = 1;
//if(x == OBSTX){
check[0] = 1;
sumFX[threadIdx.x] += dy*pA[ x-1+(y )*pitch];
//if(y<OBSTY+OBSTD-1)
sumFY[threadIdx.x] += 1.f;//dy*nu*2.f*vD[ x +(y )*pitch]/dx;
}
//forces on right wall
if(y > OBSTY-1 && y<OBSTY+OBSTD && abs(x-(OBSTX+OBSTD-1))<0.00001f){
a = 1;
check[0] = 1;
sumFX[threadIdx.x] -= dy*pA[ x+1+(y )*pitch];
if(y<OBSTY+OBSTD-1)
sumFY[threadIdx.x] += 0.f;//dy*nu*2.f*vD[ x +(y )*pitch]/dx;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
// while(nTotalThreads > 1){
// int halfPoint = (nTotalThreads >> 1);
// if(threadIdx.x < halfPoint){
// sumFX[threadIdx.x] += sumFX[threadIdx.x+halfPoint];
// sumFY[threadIdx.x] += sumFY[threadIdx.x+halfPoint];
// }
// syncthreads();
// nTotalThreads = halfPoint;
// }
float sum = 0;
if(threadIdx.x == 0){
for(int i = 0; i<blockDim.x; i++)
sum += FY[threadIdx.x];
//test[x+y*pitch] = sum;
sumFY[0] = sum;
}
syncthreads();
atomicAdd(&FY[t],sum);
if(threadIdx.x == 0){
//atomicAdd(&FX[t],sumFX[0]);
//atomicAdd(&FY[t],sumFY[0]);
atomicAdd(&FX[t],sum);
atomicAdd(&FY[t],sum);
}
}
}
__global__ void ACM_Forces1(float *FX_intm, float *FY_intm, float* uD, float* vD, float* pA, float nu, int t, size_t pitch, float *test)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int xcoord = x+OBSTX;
int ycoord = y+OBSTY;
__shared__ float sumFX[OBSTD],sumFY[OBSTD];
sumFX[threadIdx.x] = 0.f;
sumFY[threadIdx.x] = 0.f;
syncthreads();
float dx = 1.f/OBSTD;
float dy = 1.f/OBSTD;
//forces on bottom wall
if(y == 0){
sumFY[threadIdx.x] += dx*pA[ xcoord +(ycoord-1)*pitch];
sumFX[threadIdx.x] -= dx*nu*2.f*uD[ xcoord +(ycoord )*pitch]/dy;
}
//forces on top wall
if(y == OBSTD-1){
sumFY[threadIdx.x] -= dx*pA[ xcoord +(ycoord+1)*pitch];
sumFX[threadIdx.x] -= dx*nu*2.f*uD[ xcoord +(ycoord )*pitch]/dy;
}
//forces on left wall
if(x == 0){
sumFX[threadIdx.x] += dy*pA[ xcoord-1+(ycoord )*pitch];
sumFY[threadIdx.x] -= dy*nu*2.f*vD[ xcoord +(ycoord )*pitch]/dy;
}
//forces on right wall
if(x == OBSTD-1){
sumFX[threadIdx.x] -= dy*pA[ xcoord+1+(ycoord )*pitch];
sumFY[threadIdx.x] -= dy*nu*2.f*vD[ xcoord +(ycoord )*pitch]/dx;
}
syncthreads();
//reduction for force
// int nTotalThreads = blockDim.x;
// while(nTotalThreads > 1){
// int halfPoint = (nTotalThreads >> 1);
// if(threadIdx.x < halfPoint){
// sumFX[threadIdx.x] += sumFX[threadIdx.x+halfPoint];
// sumFY[threadIdx.x] += sumFY[threadIdx.x+halfPoint];
// }
// syncthreads();
// nTotalThreads = halfPoint;
// }
float sum = 0;
if(threadIdx.x == 0){
for(int i = 0; i<blockDim.x; i++)
sum += sumFX[i];
}
sumFX[0] = sum;
sum = 0;
if(threadIdx.x == 0){
for(int i = 0; i<blockDim.x; i++)
sum += sumFY[i];
}
sumFY[0] = sum;
if(threadIdx.x == 0){
FX_intm[y] = sumFX[0];
FY_intm[y] = sumFY[0];
}
}
__global__ void ACM_Forces2(float *FX, float *FY, float *FX_intm, float *FY_intm, int t, size_t pitch, float *test)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int xcoord = x+OBSTX;
int ycoord = y+OBSTY;
__shared__ float sumFX[OBSTD],sumFY[OBSTD];
sumFX[threadIdx.y] = 0.f;
sumFY[threadIdx.y] = 0.f;
syncthreads();
sumFX[threadIdx.y] = FX_intm[threadIdx.y];
sumFY[threadIdx.y] = FY_intm[threadIdx.y];
//test[xcoord+ycoord*pitch] = 1.f;
syncthreads();
float sum = 0;
if(threadIdx.y == 0){
for(int i = 0; i<blockDim.y; i++)
sum += sumFX[i];
}
sumFX[0] = sum;
sum = 0;
if(threadIdx.y == 0){
for(int i = 0; i<blockDim.y; i++)
sum += sumFY[i];
}
sumFY[0] = sum;
if(threadIdx.y == 0){
FX[t] = sumFX[0];
FY[t] = sumFY[0];
}
}
int main()
{
ofstream output_log,output_results,output_residual,output_vel,output_force;
float nu = UMAX/RE;
float Ma = UMAX*sqrt(BETA);
cout<<"Ma = "<<Ma<<endl;
string FileName = CASENAME;
output_log.open ((FileName+".log").c_str());
output_results.open ((FileName+".dat").c_str());
output_residual.open ((FileName+".res").c_str());
output_vel.open ((FileName+".vel").c_str());
output_force.open ((FileName+".frc").c_str());
//write input parameters to console and log file
WriteInputs(cout);
WriteInputs(output_log);
//allocate and initialize arrays
float *u[4],*v[4],*p[4],*Res,*FX,*FY;
for(int i = 0; i<4; i++){
u[i] = (float *)malloc(XDIM*YDIM*sizeof(float));
v[i] = (float *)malloc(XDIM*YDIM*sizeof(float));
p[i] = (float *)malloc(XDIM*YDIM*sizeof(float));
}
Res = (float *)malloc(TMAX*sizeof(float));
FX = (float *)malloc(TMAX*sizeof(float));
FY = (float *)malloc(TMAX*sizeof(float));
//initialize host memory
for(int i = 0; i<4; i++){
for(int j = 0; j<XDIM*YDIM; j++){
u[i][j] = UMAX;
v[i][j] = 0.f;
p[i][j] = 0.f;
}
}
for(int j = 0; j<TMAX; j++){
Res[j] = 0.f;
FX[j] = 0.f;
FY[j] = 0.f;
}
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch *= sizeof(float);//pitch*sizeof(float);
size_t pitch_e = pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
dim3 u_threads(UBLOCKSIZEX, UBLOCKSIZEY,1);
dim3 u_grid (((XDIM+UBLOCKSIZEX-1)/UBLOCKSIZEX),((YDIM+UBLOCKSIZEY-1)/UBLOCKSIZEY),1);
dim3 threads(BLOCKSIZEX, BLOCKSIZEY,1);
dim3 grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 f1_threads (OBSTD,1,1);
dim3 f1_grid (1,OBSTD,1);
dim3 f2_threads (1,OBSTD,1);
dim3 f2_grid (1,1,1);
hipStream_t compute;
hipStream_t transfer;
hipStreamCreate(&compute);
hipStreamCreate(&transfer);
float *u_d[4],*v_d[4],*p_d[4], *Res_d, *FX_d, *FY_d;
float *FX_intm, *FY_intm;
float *test;
for(int i = 0; i<4; i++){
hipMalloc((void **) &u_d[i], pitch_e*YDIM*sizeof(float));
hipMalloc((void **) &v_d[i], pitch_e*YDIM*sizeof(float));
hipMalloc((void **) &p_d[i], pitch_e*YDIM*sizeof(float));
}
hipMalloc((void **) &test, pitch_e*YDIM*sizeof(float));
hipMalloc((void **) &FX_intm, int(OBSTD)*sizeof(float));
hipMalloc((void **) &FY_intm, int(OBSTD)*sizeof(float));
hipMalloc((void **) &Res_d,TMAX*sizeof(float));
hipMalloc((void **) &FX_d, TMAX*sizeof(float));
hipMalloc((void **) &FY_d, TMAX*sizeof(float));
for(int i = 0; i<4; i++){
hipMemcpy2D(u_d[i],pitch,u[i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM,hipMemcpyHostToDevice);
hipMemcpy2D(v_d[i],pitch,v[i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM,hipMemcpyHostToDevice);
hipMemcpy2D(p_d[i],pitch,p[i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM,hipMemcpyHostToDevice);
}
hipMemcpy(Res_d,Res,TMAX*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(FX_d,FX,TMAX*sizeof(float),hipMemcpyHostToDevice);
hipMemcpy(FY_d,FY,TMAX*sizeof(float),hipMemcpyHostToDevice);
hipFuncSetCacheConfig(ACM_U_Shared_Single,hipFuncCachePreferShared);
int A,B,C,D;
int its;
A = 0; B = 1; C = 2; D = 3;
struct timeval tdr0,tdr1;
double restime;
hipDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//time loop
for(int t = 0; t<TMAX; t++){
//for(int it = 0; it<MAXIT; it++){
swap(A,B);
int it = 10;
//if(it > 0)
if(TIMEMARCH == 1){
//ACM_U_Shared_Single<<<u_grid,u_threads,0,compute>>>(u_d[B],v_d[B],Res_d,u_d[A],v_d[A],p_d[A],nu,it,t,pitch_e);
hipLaunchKernelGGL(( ACM_U_Shared_Single2), dim3(u_grid),dim3(u_threads),0,compute, u_d[B],v_d[B],Res_d,u_d[A],v_d[A],p_d[A],u_d[A],v_d[A],nu,DTAU,it,t,pitch_e);
hipLaunchKernelGGL(( ACM_P), dim3(grid),dim3(threads),0,compute, p_d[B],Res_d,u_d[B],v_d[B],p_d[A],DTAU,it,t,pitch_e);
}
else if(TIMEMARCH == 2){
hipLaunchKernelGGL(( ACM_U_Shared_Single2), dim3(u_grid),dim3(u_threads),0,compute, u_d[C],v_d[C],Res_d,u_d[A],v_d[A],p_d[A],u_d[A],v_d[A],nu,DTAU*0.5f,it,t,pitch_e);
hipLaunchKernelGGL(( ACM_P), dim3(grid),dim3(threads),0,compute, p_d[C],Res_d,u_d[C],v_d[C],p_d[A],DTAU*0.5f,it,t,pitch_e);
hipLaunchKernelGGL(( ACM_U_Shared_Single2), dim3(u_grid),dim3(u_threads),0,compute, u_d[B],v_d[B],Res_d,u_d[C],v_d[C],p_d[C],u_d[A],v_d[A],nu,DTAU,it,t,pitch_e);
hipLaunchKernelGGL(( ACM_P), dim3(grid),dim3(threads),0,compute, p_d[B],Res_d,u_d[B],v_d[B],p_d[A],DTAU,it,t,pitch_e);
}
hipDeviceSynchronize();
hipMemcpyAsync(&Res[t],&Res_d[t],sizeof(float),hipMemcpyDeviceToHost,compute);
// if(it > 0){
// //if(sqrt(Res[t])/float((XDIM-2*HALO)*(YDIM-2*HALO)) < MAXRES || it == MAXIT-1){
// if(Res[t]<1 || it == MAXIT-1){
// its = it; it = MAXIT;
// }
// }
//}//end iteration
hipLaunchKernelGGL(( ACM_Forces1), dim3(f1_grid),dim3(f1_threads),0,compute, FX_intm,FY_intm,u_d[A],v_d[A],p_d[A],nu,t,pitch_e,test);
hipLaunchKernelGGL(( ACM_Forces2), dim3(f2_grid),dim3(f2_threads),0,compute, FX_d,FY_d,FX_intm,FY_intm,t,pitch_e,test);
hipMemcpyAsync(&FX[t],&FX_d[t],sizeof(float),hipMemcpyDeviceToHost,compute);
hipMemcpyAsync(&FY[t],&FY_d[t],sizeof(float),hipMemcpyDeviceToHost,compute);
if(t%1000==0 && t>0) cout<<"finished time step "<<t<<endl;
hipDeviceSynchronize();
//output_residual<<t<<", "<<its<<", "<<sqrt(Res[t])/((XDIM-2*HALO)*(YDIM-2*HALO))<<endl;
output_residual<<t<<", "<<its<<", "<<Res[t]<<endl;
output_force<<t<<", "<<FX[t]<<", "<<FY[t]<<endl;
//swap(C,D);
//swap(C,A);
//cout<<A<<", "<<B<<" "<<C<<", "<<D<<endl;
}//end time loop
hipDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MNUPS)\n";
for(int i = 0; i<4; i++){
for(int j = 0; j<XDIM*YDIM; j++){
u[i][j] = 1000.f;
v[i][j] = 1000.f;
p[i][j] = 1000.f;
}
}
//Copy results from device to host
for(int i = 0; i<4; i++){
hipMemcpy2D(u[i],XDIM*sizeof(float),u_d[i],pitch,XDIM*sizeof(float),YDIM,hipMemcpyDeviceToHost);
hipMemcpy2D(v[i],XDIM*sizeof(float),v_d[i],pitch,XDIM*sizeof(float),YDIM,hipMemcpyDeviceToHost);
hipMemcpy2D(p[i],XDIM*sizeof(float),p_d[i],pitch,XDIM*sizeof(float),YDIM,hipMemcpyDeviceToHost);
}
//hipMemcpy2D(p[0],XDIM*sizeof(float),test,pitch,XDIM*sizeof(float),YDIM,hipMemcpyDeviceToHost);
hipMemcpy(Res,Res_d,TMAX*sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(FX,FX_d,TMAX*sizeof(float),hipMemcpyDeviceToHost);
hipMemcpy(FY,FY_d,TMAX*sizeof(float),hipMemcpyDeviceToHost);
WriteResults(output_results,u[0],v[0],p[0]);
// WriteResults(output_results,u[3],v[3],p[3]);
//WriteForces(output_force,FX,FY);
output_log.close();
output_results.close();
output_residual.close();
output_vel.close();
return 0;
}
| 60c7a5e4251e7e28749da2731bd1d5d0e5951638.cu | #include <cuda.h>
#include <iostream>
#include <ostream>
#include <fstream>
#include <sys/time.h>
#include <time.h>
using namespace std;
#define CASENAME "rk1"
#define BLOCKSIZEX 192
#define BLOCKSIZEY 1
#define UBLOCKSIZEX 32
#define UBLOCKSIZEY 10
#define HALO 2
#define XDIM 576
#define YDIM 165
#define OBSTD 20 //.f
#define OBSTX 122 //.f
#define OBSTY 72 //.f
#define TMAX 20000
#define MAXIT 5000
#define MAXRES 0.0001
#define RE 100
#define UMAX 1.f
#define BETA 0.01f //beta = 1/c^2
#define DTAU 0.003f
#define DT 1.f
#define CONV 2 //1:UDS 2:Hybrid
#define TIMEMARCH 1 //1:Explicit Euler 2:RK2
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-2*HALO)*0.5f;
float result = -1.0f*(((1.0f-(x-HALO)/radius))*((1.0f-(x-HALO)/radius))-1.0f);
return (result);
// return 1.f;
}
void AllocateArray(float ****f,int x,int y)
{
float ***array = new float **[4];
for(int i = 0;i<4;i++)
{
array[i] = new float *[x];
for(int j = 0;j<x;j++)
{
array[i][j] = new float [y];
for(int k = 0;k<y;k++)
array[i][j][k] = 0.f;
}
}
*f = array;
}
void DeallocateArray(float ***f,int x)
{
for(int i = 0;i<4;i++)
{
for(int j = 0;j<x;j++)
delete [] f[i][j];
delete [] f[i];
}
delete [] f;
}
void WriteResults(ostream &output, float *u, float *v, float *p)
{
output<<"VARIABLES = \"X\",\"Y\",\"u\",\"v\",\"p\"\n";
output<<"ZONE F=POINT, I="<<XDIM-HALO*2<<", J="<<YDIM-HALO*2<<endl;
float dx = 1;
float dy = 1;
float uval,vval,pval;
for(int j = HALO; j<YDIM-HALO; j++){
for(int i = HALO; i<XDIM-HALO; i++)
{
float xc = 0.5f*dx+(i)*dx;
float yc = 0.5f*dy+(j)*dy;
uval = 0.5f*(u[i+j*XDIM]+u[i-1 +j*XDIM]);
vval = 0.5f*(v[i+j*XDIM]+v[i+(j-1)*XDIM]);
pval = p[i+j*XDIM];
if(xc>OBSTX && xc<OBSTX+OBSTD && yc>OBSTY && yc<OBSTY+OBSTD)
{
uval = 0.f; vval = 0.f; pval = 0.f;
}
output<<xc<<", "<<yc<<", "<<uval<<", "<<vval<<", "<<pval<<endl;
}
}
}
void WriteResiduals(ostream &output, float *Res)
{
for(int i = 0; i<TMAX; i++)
output<<i<<", "<<sqrt(Res[i])/((XDIM-2*HALO)*(YDIM-2*HALO))<<endl;
}
void WriteInputs(ostream &output)
{
output<<"Domain size: \t"<<XDIM<<"x"<<YDIM<<endl;
output<<"Halo size: \t"<<HALO<<endl;
output<<"Target residual: \t"<<MAXRES<<endl;
output<<"Pseudo time step size: \t"<<DTAU<<endl;
output<<"Maximum iterations: \t"<<MAXIT<<endl;
output<<"Real time step size: \t"<<DT<<endl;
output<<"Maximum time steps: \t"<<TMAX<<endl;
output<<"Re: \t"<<RE<<endl;
output<<"uMax: \t"<<UMAX<<endl;
string scheme;
if(CONV == 0) scheme = "CDS ";
if(CONV == 1) scheme = "UDS ";
if(CONV == 2) scheme = "Hybrid";
if(CONV == 3) scheme = "QUICK ";
output<<"Convective discretization: \t"<<scheme<<endl;
}
__global__ void ACM_U_Shared_Single(float* uA, float* vA, float* Res, float* uB, float* vB, float* pB, float nu, int it, int t, size_t pitch)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int tx = threadIdx.x+1;
int ty = threadIdx.y+1;
__shared__ float u[UBLOCKSIZEX+2][UBLOCKSIZEY+2];
__shared__ float v[UBLOCKSIZEX+2][UBLOCKSIZEY+2];
//if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){
if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){
if(threadIdx.x == 0){
u[0][ty] = uB[ x-1+ y *pitch];
v[0][ty] = vB[ x-1+ y *pitch];
if(threadIdx.y == blockDim.y-1){
u[0][ty+1] = uB[ x-1+ (y+1)*pitch];
}
}
if(threadIdx.x == blockDim.x-1){
u[UBLOCKSIZEX+1][ty] = uB[ x+1+ y *pitch];
v[UBLOCKSIZEX+1][ty] = vB[ x+1+ y *pitch];
}
if(threadIdx.y == 0){
u[tx][0] = uB[ x+ (y-1)*pitch];
v[tx][0] = vB[ x+ (y-1)*pitch];
if(threadIdx.x == blockDim.x-1){
v[tx+1][0] = vB[ x+1+(y-1)*pitch];
}
}
if(threadIdx.y == blockDim.y-1){
u[tx][UBLOCKSIZEY+1] = uB[ x+ (y+1)*pitch];
v[tx][UBLOCKSIZEY+1] = vB[ x+ (y+1)*pitch];
}
u[tx][ty] = uB[ x+ (y )*pitch];
v[tx][ty] = vB[ x+ (y )*pitch];
}
syncthreads();
//if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){
if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){
float Ae,Aw,An,As;
float AP_Ue,AE_Ue,AW_Ue,AN_Ue,AS_Ue; //A coeff for East node on u of east face
float AP_Vn,AE_Vn,AW_Vn,AN_Vn,AS_Vn;
float Fe,Fw,Fn,Fs;
float De,Dw,Dn,Ds;
float B_Ue,B_Vn;
float dx = 1.f/OBSTD;
float dy = 1.f/OBSTD;
Ae = dy; Aw = dy; An = dx; As = dx;
Fe = 0.5f*(u[tx ][ty ]+u[tx+1][ty ])*Ae;
Fw = 0.5f*(u[tx-1][ty ]+u[tx ][ty ])*Aw;
Fn = 0.5f*(v[tx ][ty ]+v[tx+1][ty ])*An;
Fs = 0.5f*(v[tx ][ty-1]+v[tx+1][ty-1])*As;
De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy;
if(CONV == 1){
AE_Ue = max(-Fe,0.f)+De;
AW_Ue = max( Fw,0.f)+Dw;
AN_Ue = max(-Fn,0.f)+Dn;
AS_Ue = max( Fs,0.f)+Ds;
AP_Ue = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds;
}
else if(CONV == 2){
AE_Ue = max(-Fe*0.5f+De,max(-Fe,0.f));
AW_Ue = max( Fw*0.5f+Dw,max( Fw,0.f));
AN_Ue = max(-Fn*0.5f+Dn,max(-Fn,0.f));
AS_Ue = max( Fs*0.5f+Ds,max( Fs,0.f));
AP_Ue = AE_Ue+AW_Ue+AN_Ue+AS_Ue;
}
B_Ue = Ae*(pB[x +(y )*pitch]-pB[x+1+(y )*pitch]);
uA[ x + y *pitch] =((AE_Ue*u[tx+1][ty ]+AW_Ue*u[tx-1][ty ]
+AN_Ue*u[tx ][ty+1]+AS_Ue*u[tx ][ty-1]
-AP_Ue*u[tx ][ty ]+ B_Ue)/(dx*dy)
)*DTAU
+ u[tx ][ty ];
Fe = 0.5f*(u[tx ][ty ]+u[tx ][ty+1])*Ae;
Fw = 0.5f*(u[tx-1][ty ]+u[tx-1][ty+1])*Aw;
Fn = 0.5f*(v[tx ][ty ]+v[tx ][ty+1])*An;
Fs = 0.5f*(v[tx ][ty ]+v[tx ][ty-1])*As;
De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy;
if(CONV == 1){
AE_Vn = max(-Fe,0.f)+De;
AW_Vn = max( Fw,0.f)+Dw;
AN_Vn = max(-Fn,0.f)+Dn;
AS_Vn = max( Fs,0.f)+Ds;
AP_Vn = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds;
}
else if(CONV == 2){
AE_Vn = max(-Fe*0.5f+De,max(-Fe,0.f));
AW_Vn = max( Fw*0.5f+Dw,max( Fw,0.f));
AN_Vn = max(-Fn*0.5f+Dn,max(-Fn,0.f));
AS_Vn = max( Fs*0.5f+Ds,max( Fs,0.f));
AP_Vn = AE_Vn+AW_Vn+AN_Vn+AS_Vn;
}
B_Vn = An*(pB[x +(y )*pitch]-pB[x +(y+1)*pitch]);
vA[ x + y *pitch] =((AE_Vn*v[tx+1][ty ]+AW_Vn*v[tx-1][ty ]
+AN_Vn*v[tx ][ty+1]+AS_Vn*v[tx ][ty-1]
-AP_Vn*v[tx ][ty ]+ B_Vn)/(dx*dy)
)*DTAU
+ v[tx ][ty ];
if(y < HALO)
{
vA[ x + y *pitch] = 0.f;
uA[ x + y *pitch] = -u[tx ][ty+1];
}
if(y > YDIM-HALO-2)
{
vA[ x + y *pitch] = 0.f;
uA[ x + y *pitch] = -u[tx ][ty-1];
}
if(y > YDIM-HALO-3)
vA[ x + y *pitch] = 0.f;
if(x < HALO)
{
uA[ x + y *pitch] = UMAX*PoisProf(y);
vA[ x + y *pitch] = 0.f;
}
if(x > XDIM-HALO-2)
{
uA[ x + y *pitch] = u[tx-1][ty ];
vA[ x + y *pitch] = v[tx-1][ty ];
}
if(x > XDIM-HALO-3)
uA[ x + y *pitch] = u[tx-1][ty ];
if(x == OBSTX-1 && y>=OBSTY & y<OBSTY+OBSTD)
uA[ x + y *pitch] = 0.f;
if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD)
uA[ x + y *pitch] = 0.f;
if(y == OBSTY-1 && x>=OBSTX & x<OBSTX+OBSTD)
vA[ x + y *pitch] = 0.f;
if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD)
vA[ x + y *pitch] = 0.f;
if(x == OBSTX && y>=OBSTY & y<OBSTY+OBSTD-1)
vA[ x + y *pitch] = -v[tx-1][ty ];
if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD-1)
vA[ x + y *pitch] = -v[tx+1][ty ];
if(y == OBSTY && x>=OBSTX & x<OBSTX+OBSTD-1){
if(t < 500) uA[ x + y *pitch] = u[tx][ty-1];
else
uA[ x + y *pitch] = -u[tx ][ty-1];
}
if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD-1)
uA[ x + y *pitch] = -u[tx ][ty+1];
}
if(x == 0 && y == 0) Res[t] = 0.f;
}
__global__ void ACM_U_Shared_Single1(float* uA, float* vA, float* Res, float* uB, float* vB, float* pB, float nu, float dtau, int it, int t, size_t pitch)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int tx = threadIdx.x+1;
int ty = threadIdx.y+1;
__shared__ float u[UBLOCKSIZEX+2][UBLOCKSIZEY+2];
__shared__ float v[UBLOCKSIZEX+2][UBLOCKSIZEY+2];
//if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){
if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){
if(threadIdx.x == 0){
u[0][ty] = uB[ x-1+ y *pitch];
v[0][ty] = vB[ x-1+ y *pitch];
if(threadIdx.y == blockDim.y-1){
u[0][ty+1] = uB[ x-1+ (y+1)*pitch];
}
}
if(threadIdx.x == blockDim.x-1){
u[UBLOCKSIZEX+1][ty] = uB[ x+1+ y *pitch];
v[UBLOCKSIZEX+1][ty] = vB[ x+1+ y *pitch];
}
if(threadIdx.y == 0){
u[tx][0] = uB[ x+ (y-1)*pitch];
v[tx][0] = vB[ x+ (y-1)*pitch];
if(threadIdx.x == blockDim.x-1){
v[tx+1][0] = vB[ x+1+(y-1)*pitch];
}
}
if(threadIdx.y == blockDim.y-1){
u[tx][UBLOCKSIZEY+1] = uB[ x+ (y+1)*pitch];
v[tx][UBLOCKSIZEY+1] = vB[ x+ (y+1)*pitch];
}
u[tx][ty] = uB[ x+ (y )*pitch];
v[tx][ty] = vB[ x+ (y )*pitch];
}
syncthreads();
//if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){
if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){
float Ae,Aw,An,As;
float AP_Ue,AE_Ue,AW_Ue,AN_Ue,AS_Ue; //A coeff for East node on u of east face
float AP_Vn,AE_Vn,AW_Vn,AN_Vn,AS_Vn;
float Fe,Fw,Fn,Fs;
float De,Dw,Dn,Ds;
float B_Ue,B_Vn;
float dx = 1.f/OBSTD;
float dy = 1.f/OBSTD;
Ae = dy; Aw = dy; An = dx; As = dx;
Fe = 0.5f*(u[tx ][ty ]+u[tx+1][ty ])*Ae;
Fw = 0.5f*(u[tx-1][ty ]+u[tx ][ty ])*Aw;
Fn = 0.5f*(v[tx ][ty ]+v[tx+1][ty ])*An;
Fs = 0.5f*(v[tx ][ty-1]+v[tx+1][ty-1])*As;
De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy;
if(CONV == 1){
AE_Ue = max(-Fe,0.f)+De;
AW_Ue = max( Fw,0.f)+Dw;
AN_Ue = max(-Fn,0.f)+Dn;
AS_Ue = max( Fs,0.f)+Ds;
AP_Ue = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds;
}
else if(CONV == 2){
AE_Ue = max(-Fe*0.5f+De,max(-Fe,0.f));
AW_Ue = max( Fw*0.5f+Dw,max( Fw,0.f));
AN_Ue = max(-Fn*0.5f+Dn,max(-Fn,0.f));
AS_Ue = max( Fs*0.5f+Ds,max( Fs,0.f));
AP_Ue = AE_Ue+AW_Ue+AN_Ue+AS_Ue;
}
B_Ue = Ae*(pB[x +(y )*pitch]-pB[x+1+(y )*pitch]);
uA[ x + y *pitch] =((AE_Ue*u[tx+1][ty ]+AW_Ue*u[tx-1][ty ]
+AN_Ue*u[tx ][ty+1]+AS_Ue*u[tx ][ty-1]
-AP_Ue*u[tx ][ty ]+ B_Ue)/(dx*dy)
)*dtau
+ u[tx ][ty ];
Fe = 0.5f*(u[tx ][ty ]+u[tx ][ty+1])*Ae;
Fw = 0.5f*(u[tx-1][ty ]+u[tx-1][ty+1])*Aw;
Fn = 0.5f*(v[tx ][ty ]+v[tx ][ty+1])*An;
Fs = 0.5f*(v[tx ][ty ]+v[tx ][ty-1])*As;
De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy;
if(CONV == 1){
AE_Vn = max(-Fe,0.f)+De;
AW_Vn = max( Fw,0.f)+Dw;
AN_Vn = max(-Fn,0.f)+Dn;
AS_Vn = max( Fs,0.f)+Ds;
AP_Vn = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds;
}
else if(CONV == 2){
AE_Vn = max(-Fe*0.5f+De,max(-Fe,0.f));
AW_Vn = max( Fw*0.5f+Dw,max( Fw,0.f));
AN_Vn = max(-Fn*0.5f+Dn,max(-Fn,0.f));
AS_Vn = max( Fs*0.5f+Ds,max( Fs,0.f));
AP_Vn = AE_Vn+AW_Vn+AN_Vn+AS_Vn;
}
B_Vn = An*(pB[x +(y )*pitch]-pB[x +(y+1)*pitch]);
vA[ x + y *pitch] =((AE_Vn*v[tx+1][ty ]+AW_Vn*v[tx-1][ty ]
+AN_Vn*v[tx ][ty+1]+AS_Vn*v[tx ][ty-1]
-AP_Vn*v[tx ][ty ]+ B_Vn)/(dx*dy)
)*dtau
+ v[tx ][ty ];
if(y < HALO)
{
vA[ x + y *pitch] = 0.f;
uA[ x + y *pitch] = -u[tx ][ty+1];
}
if(y > YDIM-HALO-2)
{
vA[ x + y *pitch] = 0.f;
uA[ x + y *pitch] = -u[tx ][ty-1];
}
if(y > YDIM-HALO-3)
vA[ x + y *pitch] = 0.f;
if(x < HALO)
{
uA[ x + y *pitch] = UMAX*PoisProf(y);
vA[ x + y *pitch] = 0.f;
}
if(x > XDIM-HALO-2)
{
uA[ x + y *pitch] = u[tx-1][ty ];
vA[ x + y *pitch] = v[tx-1][ty ];
}
if(x > XDIM-HALO-3)
uA[ x + y *pitch] = u[tx-1][ty ];
if(x == OBSTX-1 && y>=OBSTY & y<OBSTY+OBSTD)
uA[ x + y *pitch] = 0.f;
if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD)
uA[ x + y *pitch] = 0.f;
if(y == OBSTY-1 && x>=OBSTX & x<OBSTX+OBSTD)
vA[ x + y *pitch] = 0.f;
if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD)
vA[ x + y *pitch] = 0.f;
if(x == OBSTX && y>=OBSTY & y<OBSTY+OBSTD-1)
vA[ x + y *pitch] = -v[tx-1][ty ];
if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD-1)
vA[ x + y *pitch] = -v[tx+1][ty ];
if(y == OBSTY && x>=OBSTX & x<OBSTX+OBSTD-1){
if(t < 500) uA[ x + y *pitch] = u[tx][ty-1];
else
uA[ x + y *pitch] = -u[tx ][ty-1];
}
if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD-1)
uA[ x + y *pitch] = -u[tx ][ty+1];
}
if(x == 0 && y == 0) Res[t] = 0.f;
}
__global__ void ACM_U_Shared_Single2(float* uA, float* vA, float* Res, float* uB, float* vB, float* pB, float* uC, float* vC, float nu, float dtau, int it, int t, size_t pitch)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int tx = threadIdx.x+1;
int ty = threadIdx.y+1;
__shared__ float u[UBLOCKSIZEX+2][UBLOCKSIZEY+2];
__shared__ float v[UBLOCKSIZEX+2][UBLOCKSIZEY+2];
//if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){
if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){
if(threadIdx.x == 0){
u[0][ty] = uB[ x-1+ y *pitch];
v[0][ty] = vB[ x-1+ y *pitch];
if(threadIdx.y == blockDim.y-1){
u[0][ty+1] = uB[ x-1+ (y+1)*pitch];
}
}
if(threadIdx.x == blockDim.x-1){
u[UBLOCKSIZEX+1][ty] = uB[ x+1+ y *pitch];
v[UBLOCKSIZEX+1][ty] = vB[ x+1+ y *pitch];
}
if(threadIdx.y == 0){
u[tx][0] = uB[ x+ (y-1)*pitch];
v[tx][0] = vB[ x+ (y-1)*pitch];
if(threadIdx.x == blockDim.x-1){
v[tx+1][0] = vB[ x+1+(y-1)*pitch];
}
}
if(threadIdx.y == blockDim.y-1){
u[tx][UBLOCKSIZEY+1] = uB[ x+ (y+1)*pitch];
v[tx][UBLOCKSIZEY+1] = vB[ x+ (y+1)*pitch];
}
u[tx][ty] = uB[ x+ (y )*pitch];
v[tx][ty] = vB[ x+ (y )*pitch];
}
syncthreads();
//if(x > HALO-2 && x < XDIM-HALO && y > HALO-2 && y < YDIM-HALO){
if(x > 0 && x < XDIM-1 && y > 0 && y < YDIM-1){
float Ae,Aw,An,As;
float AP_Ue,AE_Ue,AW_Ue,AN_Ue,AS_Ue; //A coeff for East node on u of east face
float AP_Vn,AE_Vn,AW_Vn,AN_Vn,AS_Vn;
float Fe,Fw,Fn,Fs;
float De,Dw,Dn,Ds;
float B_Ue,B_Vn;
float dx = 1.f/OBSTD;
float dy = 1.f/OBSTD;
Ae = dy; Aw = dy; An = dx; As = dx;
Fe = 0.5f*(u[tx ][ty ]+u[tx+1][ty ])*Ae;
Fw = 0.5f*(u[tx-1][ty ]+u[tx ][ty ])*Aw;
Fn = 0.5f*(v[tx ][ty ]+v[tx+1][ty ])*An;
Fs = 0.5f*(v[tx ][ty-1]+v[tx+1][ty-1])*As;
De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy;
if(CONV == 1){
AE_Ue = max(-Fe,0.f)+De;
AW_Ue = max( Fw,0.f)+Dw;
AN_Ue = max(-Fn,0.f)+Dn;
AS_Ue = max( Fs,0.f)+Ds;
AP_Ue = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds;
}
else if(CONV == 2){
AE_Ue = max(-Fe*0.5f+De,max(-Fe,0.f));
AW_Ue = max( Fw*0.5f+Dw,max( Fw,0.f));
AN_Ue = max(-Fn*0.5f+Dn,max(-Fn,0.f));
AS_Ue = max( Fs*0.5f+Ds,max( Fs,0.f));
AP_Ue = AE_Ue+AW_Ue+AN_Ue+AS_Ue;
}
B_Ue = Ae*(pB[x +(y )*pitch]-pB[x+1+(y )*pitch]);
//B_Ue-=( 3.f*u[tx ][ty ]-4.f*uC[x +(y )*pitch]+uD[x +(y )*pitch])*0.5f/DT;
uA[ x + y *pitch] =((AE_Ue*u[tx+1][ty ]+AW_Ue*u[tx-1][ty ]
+AN_Ue*u[tx ][ty+1]+AS_Ue*u[tx ][ty-1]
-AP_Ue*u[tx ][ty ]+ B_Ue)/(dx*dy)
//-( 3.f*u[tx ][ty ]-4.f*uC[x +(y )*pitch]+uD[x +(y )*pitch])*0.5f/DT
//-( u[tx ][ty ]-uC[x +(y )*pitch])*0.5f/DT
)*dtau
+ uC[x+y*pitch];
Fe = 0.5f*(u[tx ][ty ]+u[tx ][ty+1])*Ae;
Fw = 0.5f*(u[tx-1][ty ]+u[tx-1][ty+1])*Aw;
Fn = 0.5f*(v[tx ][ty ]+v[tx ][ty+1])*An;
Fs = 0.5f*(v[tx ][ty ]+v[tx ][ty-1])*As;
De = nu*Ae/dx; Dw = nu*Aw/dx; Dn = nu*An/dy; Ds = nu*As/dy;
if(CONV == 1){
AE_Vn = max(-Fe,0.f)+De;
AW_Vn = max( Fw,0.f)+Dw;
AN_Vn = max(-Fn,0.f)+Dn;
AS_Vn = max( Fs,0.f)+Ds;
AP_Vn = max( Fe,0.f)+max(-Fw,0.f)+max( Fn,0.f)+max(-Fs,0.f)+Dw+De+Dn+Ds;
}
else if(CONV == 2){
AE_Vn = max(-Fe*0.5f+De,max(-Fe,0.f));
AW_Vn = max( Fw*0.5f+Dw,max( Fw,0.f));
AN_Vn = max(-Fn*0.5f+Dn,max(-Fn,0.f));
AS_Vn = max( Fs*0.5f+Ds,max( Fs,0.f));
AP_Vn = AE_Vn+AW_Vn+AN_Vn+AS_Vn;
}
B_Vn = An*(pB[x +(y )*pitch]-pB[x +(y+1)*pitch]);
//B_Vn-=( 3.f*v[tx ][ty ]-4.f*vC[x +(y )*pitch]+vD[x +(y )*pitch])*0.5f/DT;
vA[ x + y *pitch] =((AE_Vn*v[tx+1][ty ]+AW_Vn*v[tx-1][ty ]
+AN_Vn*v[tx ][ty+1]+AS_Vn*v[tx ][ty-1]
-AP_Vn*v[tx ][ty ]+ B_Vn)/(dx*dy)
//-( 3.f*v[tx ][ty ]-4.f*vC[x +(y )*pitch]+vD[x +(y )*pitch])*0.5f/DT
//-( v[tx ][ty ]-vC[x +(y )*pitch])*0.5f/DT
)*dtau
+ vC[x+y*pitch];
if(y < HALO)
{
vA[ x + y *pitch] = 0.f;
uA[ x + y *pitch] = -u[tx ][ty+1];
}
if(y > YDIM-HALO-2)
{
vA[ x + y *pitch] = 0.f;
uA[ x + y *pitch] = -u[tx ][ty-1];
}
if(y > YDIM-HALO-3)
vA[ x + y *pitch] = 0.f;
if(x < HALO)
{
uA[ x + y *pitch] = UMAX*PoisProf(y);
vA[ x + y *pitch] = 0.f;
}
if(x > XDIM-HALO-2)
{
uA[ x + y *pitch] = u[tx-1][ty ];
vA[ x + y *pitch] = v[tx-1][ty ];
}
if(x > XDIM-HALO-3)
uA[ x + y *pitch] = u[tx-1][ty ];
if(x == OBSTX-1 && y>=OBSTY & y<OBSTY+OBSTD)
uA[ x + y *pitch] = 0.f;
if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD)
uA[ x + y *pitch] = 0.f;
if(y == OBSTY-1 && x>=OBSTX & x<OBSTX+OBSTD)
vA[ x + y *pitch] = 0.f;
if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD)
vA[ x + y *pitch] = 0.f;
if(x == OBSTX && y>=OBSTY & y<OBSTY+OBSTD-1)
vA[ x + y *pitch] = -v[tx-1][ty ];
if(x == OBSTX+OBSTD-1 && y>=OBSTY && y<OBSTY+OBSTD-1)
vA[ x + y *pitch] = -v[tx+1][ty ];
if(y == OBSTY && x>=OBSTX & x<OBSTX+OBSTD-1){
if(t < 500) uA[ x + y *pitch] = u[tx][ty-1];
else
uA[ x + y *pitch] = -u[tx ][ty-1];
}
if(y == OBSTY+OBSTD-1 && x>=OBSTX && x<OBSTX+OBSTD-1)
uA[ x + y *pitch] = -u[tx ][ty+1];
}
if(x == 0 && y == 0) Res[t] = 0.f;
}
__global__ void ACM_P(float* pA, float* Res, float* uA, float* vA, float* pB, float dtau, int it, int t, size_t pitch)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
__shared__ float sumRes[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
syncthreads();
float dx = 1.f/OBSTD;
float dy = 1.f/OBSTD;
if(x > HALO-1 && x < XDIM-HALO && y > HALO-1 && y < YDIM-HALO){
float res = (dy*(uA[ x + y *pitch]-uA[ x-1+ y *pitch])
+dx*(vA[ x + y *pitch]-vA[ x +(y-1)*pitch]))/(dx*dy);
pA[ x + y *pitch] = -res*dtau/BETA+pB[ x + y *pitch];
if(x > HALO && x < XDIM-HALO-2 && y > HALO && y < YDIM-HALO-2
&& abs(res/UMAX)>MAXRES && !(x>= OBSTX && y>=OBSTY && x<OBSTX+OBSTD && y<OBSTY+OBSTD)){
check[0] = 1;
sumRes[threadIdx.x]=1.f;
}
else
sumRes[threadIdx.x]=0.f;
}
else{
sumRes[threadIdx.x]=0.f;
}
syncthreads();
if(check[0] == 1){
//reduction for residual
int nTotalThreads = blockDim.x;
while(nTotalThreads > 1){
int halfPoint = (nTotalThreads >> 1);
if(threadIdx.x < halfPoint){
sumRes[threadIdx.x] += sumRes[threadIdx.x+halfPoint];
}
syncthreads();
nTotalThreads = halfPoint;
}
if(threadIdx.x == 0){
atomicAdd(&Res[t],sumRes[0]);
}
}
}
__global__ void ACM_VelTransfer(float* uD, float* vD, float* uA, float* vA, size_t pitch)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
uD[ x + y *pitch] = uA[ x + y *pitch];
vD[ x + y *pitch] = vA[ x + y *pitch];
}
__global__ void ACM_Forces(float *FX, float *FY, float* uD, float* vD, float* pA, float nu, int t, size_t pitch, float *test)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
__shared__ float sumFX[BLOCKSIZEX],sumFY[BLOCKSIZEX];
__shared__ int check[1];
check[0] = 0;
sumFX[threadIdx.x] = 0.f;
sumFY[threadIdx.x] = 0.f;
syncthreads();
float dx = 1.f/OBSTD;
float dy = 1.f/OBSTD;
int a= 0;
//forces on bottom wall
if(x > OBSTX-1 && x<OBSTX+OBSTD && abs(y-OBSTY)<0.00001f){
a = 1;
check[0] = 1;
sumFY[threadIdx.x] += 0.f;//dx*pA[ x +(y-1)*pitch];
if(x<OBSTX+OBSTD-1)
sumFX[threadIdx.x] += dx*nu*2.f*uD[ x +(y )*pitch]/dy;
}
//forces on top wall
if(x > OBSTX-1 && x<OBSTX+OBSTD && abs(y-(OBSTY+OBSTD-1))<0.00001f){
a = 1;
check[0] = 1;
sumFY[threadIdx.x] -= 0.f;//dx*pA[ x +(y+1)*pitch];
if(x<OBSTX+OBSTD-1)
sumFX[threadIdx.x] += dx*nu*2.f*uD[ x +(y )*pitch]/dy;
}
//forces on left wall
if(y > OBSTY-1 && y<OBSTY+OBSTD && abs(x-OBSTX)<0.00001f){
//if(y > OBSTY-1 && y<OBSTY+OBSTD && x>OBSTX-1 && x<OBSTX+2){
//if(x>OBSTX-1.5 && x<OBSTX+1.5){
a = 1;
//if(x == OBSTX){
check[0] = 1;
sumFX[threadIdx.x] += dy*pA[ x-1+(y )*pitch];
//if(y<OBSTY+OBSTD-1)
sumFY[threadIdx.x] += 1.f;//dy*nu*2.f*vD[ x +(y )*pitch]/dx;
}
//forces on right wall
if(y > OBSTY-1 && y<OBSTY+OBSTD && abs(x-(OBSTX+OBSTD-1))<0.00001f){
a = 1;
check[0] = 1;
sumFX[threadIdx.x] -= dy*pA[ x+1+(y )*pitch];
if(y<OBSTY+OBSTD-1)
sumFY[threadIdx.x] += 0.f;//dy*nu*2.f*vD[ x +(y )*pitch]/dx;
}
syncthreads();
if(check[0] == 1){
//reduction for force
int nTotalThreads = blockDim.x;
// while(nTotalThreads > 1){
// int halfPoint = (nTotalThreads >> 1);
// if(threadIdx.x < halfPoint){
// sumFX[threadIdx.x] += sumFX[threadIdx.x+halfPoint];
// sumFY[threadIdx.x] += sumFY[threadIdx.x+halfPoint];
// }
// syncthreads();
// nTotalThreads = halfPoint;
// }
float sum = 0;
if(threadIdx.x == 0){
for(int i = 0; i<blockDim.x; i++)
sum += FY[threadIdx.x];
//test[x+y*pitch] = sum;
sumFY[0] = sum;
}
syncthreads();
atomicAdd(&FY[t],sum);
if(threadIdx.x == 0){
//atomicAdd(&FX[t],sumFX[0]);
//atomicAdd(&FY[t],sumFY[0]);
atomicAdd(&FX[t],sum);
atomicAdd(&FY[t],sum);
}
}
}
__global__ void ACM_Forces1(float *FX_intm, float *FY_intm, float* uD, float* vD, float* pA, float nu, int t, size_t pitch, float *test)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int xcoord = x+OBSTX;
int ycoord = y+OBSTY;
__shared__ float sumFX[OBSTD],sumFY[OBSTD];
sumFX[threadIdx.x] = 0.f;
sumFY[threadIdx.x] = 0.f;
syncthreads();
float dx = 1.f/OBSTD;
float dy = 1.f/OBSTD;
//forces on bottom wall
if(y == 0){
sumFY[threadIdx.x] += dx*pA[ xcoord +(ycoord-1)*pitch];
sumFX[threadIdx.x] -= dx*nu*2.f*uD[ xcoord +(ycoord )*pitch]/dy;
}
//forces on top wall
if(y == OBSTD-1){
sumFY[threadIdx.x] -= dx*pA[ xcoord +(ycoord+1)*pitch];
sumFX[threadIdx.x] -= dx*nu*2.f*uD[ xcoord +(ycoord )*pitch]/dy;
}
//forces on left wall
if(x == 0){
sumFX[threadIdx.x] += dy*pA[ xcoord-1+(ycoord )*pitch];
sumFY[threadIdx.x] -= dy*nu*2.f*vD[ xcoord +(ycoord )*pitch]/dy;
}
//forces on right wall
if(x == OBSTD-1){
sumFX[threadIdx.x] -= dy*pA[ xcoord+1+(ycoord )*pitch];
sumFY[threadIdx.x] -= dy*nu*2.f*vD[ xcoord +(ycoord )*pitch]/dx;
}
syncthreads();
//reduction for force
// int nTotalThreads = blockDim.x;
// while(nTotalThreads > 1){
// int halfPoint = (nTotalThreads >> 1);
// if(threadIdx.x < halfPoint){
// sumFX[threadIdx.x] += sumFX[threadIdx.x+halfPoint];
// sumFY[threadIdx.x] += sumFY[threadIdx.x+halfPoint];
// }
// syncthreads();
// nTotalThreads = halfPoint;
// }
float sum = 0;
if(threadIdx.x == 0){
for(int i = 0; i<blockDim.x; i++)
sum += sumFX[i];
}
sumFX[0] = sum;
sum = 0;
if(threadIdx.x == 0){
for(int i = 0; i<blockDim.x; i++)
sum += sumFY[i];
}
sumFY[0] = sum;
if(threadIdx.x == 0){
FX_intm[y] = sumFX[0];
FY_intm[y] = sumFY[0];
}
}
__global__ void ACM_Forces2(float *FX, float *FY, float *FX_intm, float *FY_intm, int t, size_t pitch, float *test)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int xcoord = x+OBSTX;
int ycoord = y+OBSTY;
__shared__ float sumFX[OBSTD],sumFY[OBSTD];
sumFX[threadIdx.y] = 0.f;
sumFY[threadIdx.y] = 0.f;
syncthreads();
sumFX[threadIdx.y] = FX_intm[threadIdx.y];
sumFY[threadIdx.y] = FY_intm[threadIdx.y];
//test[xcoord+ycoord*pitch] = 1.f;
syncthreads();
float sum = 0;
if(threadIdx.y == 0){
for(int i = 0; i<blockDim.y; i++)
sum += sumFX[i];
}
sumFX[0] = sum;
sum = 0;
if(threadIdx.y == 0){
for(int i = 0; i<blockDim.y; i++)
sum += sumFY[i];
}
sumFY[0] = sum;
if(threadIdx.y == 0){
FX[t] = sumFX[0];
FY[t] = sumFY[0];
}
}
int main()
{
ofstream output_log,output_results,output_residual,output_vel,output_force;
float nu = UMAX/RE;
float Ma = UMAX*sqrt(BETA);
cout<<"Ma = "<<Ma<<endl;
string FileName = CASENAME;
output_log.open ((FileName+".log").c_str());
output_results.open ((FileName+".dat").c_str());
output_residual.open ((FileName+".res").c_str());
output_vel.open ((FileName+".vel").c_str());
output_force.open ((FileName+".frc").c_str());
//write input parameters to console and log file
WriteInputs(cout);
WriteInputs(output_log);
//allocate and initialize arrays
float *u[4],*v[4],*p[4],*Res,*FX,*FY;
for(int i = 0; i<4; i++){
u[i] = (float *)malloc(XDIM*YDIM*sizeof(float));
v[i] = (float *)malloc(XDIM*YDIM*sizeof(float));
p[i] = (float *)malloc(XDIM*YDIM*sizeof(float));
}
Res = (float *)malloc(TMAX*sizeof(float));
FX = (float *)malloc(TMAX*sizeof(float));
FY = (float *)malloc(TMAX*sizeof(float));
//initialize host memory
for(int i = 0; i<4; i++){
for(int j = 0; j<XDIM*YDIM; j++){
u[i][j] = UMAX;
v[i][j] = 0.f;
p[i][j] = 0.f;
}
}
for(int j = 0; j<TMAX; j++){
Res[j] = 0.f;
FX[j] = 0.f;
FY[j] = 0.f;
}
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch *= sizeof(float);//pitch*sizeof(float);
size_t pitch_e = pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
dim3 u_threads(UBLOCKSIZEX, UBLOCKSIZEY,1);
dim3 u_grid (((XDIM+UBLOCKSIZEX-1)/UBLOCKSIZEX),((YDIM+UBLOCKSIZEY-1)/UBLOCKSIZEY),1);
dim3 threads(BLOCKSIZEX, BLOCKSIZEY,1);
dim3 grid (((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 f1_threads (OBSTD,1,1);
dim3 f1_grid (1,OBSTD,1);
dim3 f2_threads (1,OBSTD,1);
dim3 f2_grid (1,1,1);
cudaStream_t compute;
cudaStream_t transfer;
cudaStreamCreate(&compute);
cudaStreamCreate(&transfer);
float *u_d[4],*v_d[4],*p_d[4], *Res_d, *FX_d, *FY_d;
float *FX_intm, *FY_intm;
float *test;
for(int i = 0; i<4; i++){
cudaMalloc((void **) &u_d[i], pitch_e*YDIM*sizeof(float));
cudaMalloc((void **) &v_d[i], pitch_e*YDIM*sizeof(float));
cudaMalloc((void **) &p_d[i], pitch_e*YDIM*sizeof(float));
}
cudaMalloc((void **) &test, pitch_e*YDIM*sizeof(float));
cudaMalloc((void **) &FX_intm, int(OBSTD)*sizeof(float));
cudaMalloc((void **) &FY_intm, int(OBSTD)*sizeof(float));
cudaMalloc((void **) &Res_d,TMAX*sizeof(float));
cudaMalloc((void **) &FX_d, TMAX*sizeof(float));
cudaMalloc((void **) &FY_d, TMAX*sizeof(float));
for(int i = 0; i<4; i++){
cudaMemcpy2D(u_d[i],pitch,u[i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM,cudaMemcpyHostToDevice);
cudaMemcpy2D(v_d[i],pitch,v[i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM,cudaMemcpyHostToDevice);
cudaMemcpy2D(p_d[i],pitch,p[i],XDIM*sizeof(float),XDIM*sizeof(float),YDIM,cudaMemcpyHostToDevice);
}
cudaMemcpy(Res_d,Res,TMAX*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(FX_d,FX,TMAX*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(FY_d,FY,TMAX*sizeof(float),cudaMemcpyHostToDevice);
cudaFuncSetCacheConfig(ACM_U_Shared_Single,cudaFuncCachePreferShared);
int A,B,C,D;
int its;
A = 0; B = 1; C = 2; D = 3;
struct timeval tdr0,tdr1;
double restime;
cudaDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//time loop
for(int t = 0; t<TMAX; t++){
//for(int it = 0; it<MAXIT; it++){
swap(A,B);
int it = 10;
//if(it > 0)
if(TIMEMARCH == 1){
//ACM_U_Shared_Single<<<u_grid,u_threads,0,compute>>>(u_d[B],v_d[B],Res_d,u_d[A],v_d[A],p_d[A],nu,it,t,pitch_e);
ACM_U_Shared_Single2<<<u_grid,u_threads,0,compute>>>(u_d[B],v_d[B],Res_d,u_d[A],v_d[A],p_d[A],u_d[A],v_d[A],nu,DTAU,it,t,pitch_e);
ACM_P<<<grid,threads,0,compute>>>(p_d[B],Res_d,u_d[B],v_d[B],p_d[A],DTAU,it,t,pitch_e);
}
else if(TIMEMARCH == 2){
ACM_U_Shared_Single2<<<u_grid,u_threads,0,compute>>>(u_d[C],v_d[C],Res_d,u_d[A],v_d[A],p_d[A],u_d[A],v_d[A],nu,DTAU*0.5f,it,t,pitch_e);
ACM_P<<<grid,threads,0,compute>>>(p_d[C],Res_d,u_d[C],v_d[C],p_d[A],DTAU*0.5f,it,t,pitch_e);
ACM_U_Shared_Single2<<<u_grid,u_threads,0,compute>>>(u_d[B],v_d[B],Res_d,u_d[C],v_d[C],p_d[C],u_d[A],v_d[A],nu,DTAU,it,t,pitch_e);
ACM_P<<<grid,threads,0,compute>>>(p_d[B],Res_d,u_d[B],v_d[B],p_d[A],DTAU,it,t,pitch_e);
}
cudaDeviceSynchronize();
cudaMemcpyAsync(&Res[t],&Res_d[t],sizeof(float),cudaMemcpyDeviceToHost,compute);
// if(it > 0){
// //if(sqrt(Res[t])/float((XDIM-2*HALO)*(YDIM-2*HALO)) < MAXRES || it == MAXIT-1){
// if(Res[t]<1 || it == MAXIT-1){
// its = it; it = MAXIT;
// }
// }
//}//end iteration
ACM_Forces1<<<f1_grid,f1_threads,0,compute>>>(FX_intm,FY_intm,u_d[A],v_d[A],p_d[A],nu,t,pitch_e,test);
ACM_Forces2<<<f2_grid,f2_threads,0,compute>>>(FX_d,FY_d,FX_intm,FY_intm,t,pitch_e,test);
cudaMemcpyAsync(&FX[t],&FX_d[t],sizeof(float),cudaMemcpyDeviceToHost,compute);
cudaMemcpyAsync(&FY[t],&FY_d[t],sizeof(float),cudaMemcpyDeviceToHost,compute);
if(t%1000==0 && t>0) cout<<"finished time step "<<t<<endl;
cudaDeviceSynchronize();
//output_residual<<t<<", "<<its<<", "<<sqrt(Res[t])/((XDIM-2*HALO)*(YDIM-2*HALO))<<endl;
output_residual<<t<<", "<<its<<", "<<Res[t]<<endl;
output_force<<t<<", "<<FX[t]<<", "<<FY[t]<<endl;
//swap(C,D);
//swap(C,A);
//cout<<A<<", "<<B<<" "<<C<<", "<<D<<endl;
}//end time loop
cudaDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MNUPS)\n";
for(int i = 0; i<4; i++){
for(int j = 0; j<XDIM*YDIM; j++){
u[i][j] = 1000.f;
v[i][j] = 1000.f;
p[i][j] = 1000.f;
}
}
//Copy results from device to host
for(int i = 0; i<4; i++){
cudaMemcpy2D(u[i],XDIM*sizeof(float),u_d[i],pitch,XDIM*sizeof(float),YDIM,cudaMemcpyDeviceToHost);
cudaMemcpy2D(v[i],XDIM*sizeof(float),v_d[i],pitch,XDIM*sizeof(float),YDIM,cudaMemcpyDeviceToHost);
cudaMemcpy2D(p[i],XDIM*sizeof(float),p_d[i],pitch,XDIM*sizeof(float),YDIM,cudaMemcpyDeviceToHost);
}
//cudaMemcpy2D(p[0],XDIM*sizeof(float),test,pitch,XDIM*sizeof(float),YDIM,cudaMemcpyDeviceToHost);
cudaMemcpy(Res,Res_d,TMAX*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(FX,FX_d,TMAX*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(FY,FY_d,TMAX*sizeof(float),cudaMemcpyDeviceToHost);
WriteResults(output_results,u[0],v[0],p[0]);
// WriteResults(output_results,u[3],v[3],p[3]);
//WriteForces(output_force,FX,FY);
output_log.close();
output_results.close();
output_residual.close();
output_vel.close();
return 0;
}
|
ce38d58a64c2661d6a766a3987f9629bf5b3c64b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include "scanImpl.cu"
#include "../include/common.h"
#include "../include/gpuCudaLib.h"
#ifdef HAS_GMM
#include "gmm.h"
#else
#define GMM_BUFFER_COW 0
#endif
#define CHECK_POINTER(p) do { \
if(p == NULL){ \
perror("Failed to allocate host memory"); \
exit(-1); \
}} while(0)
/*
* stringCmp: Compare two strings on GPU using one single GPU thread.
* @buf1: the first input buffer
* @buf2: the second input buffer
* @size: the length of data to be compared
*
* Return
* 1 if buf1 is larger
* 0 if they are equal
* -1 if buf2 is larger
*/
extern char *col_buf;
__device__ static inline int stringCmp(char* buf1, char *buf2, int size){
int i;
int res = 0;
for(i=0;i<size;i++){
if(buf1[i] > buf2[i]){
res = 1;
break;
}else if (buf1[i] < buf2[i]){
res = -1;
break;
}
if(buf1[i] == 0 && buf2[i] == 0)
break;
}
return res;
}
/*
* testCon: evaluate one selection predicate using one GPU thread
* @buf1: input data to be tested
* @buf2: the test criterion, usually a number of a string.
* @size: the size of the input data buf1
* @type: the type of the input data buf1
* @rel: >,<, >=, <= or ==.
*
* Return:
* 0 if the input data meets the criteria
* 1 otherwise
*/
__device__ static inline int testCon(char *buf1, char* buf2, int size, int type, int rel){
int res = 1;
if (type == INT){
if(rel == EQ){
res = ( *((int*)buf1) == *(((int*)buf2)) );
}else if (rel == GTH){
res = ( *((int*)buf1) > *(((int*)buf2)) );
}else if (rel == LTH){
res = ( *((int*)buf1) < *(((int*)buf2)) );
}else if (rel == GEQ){
res = ( *((int*)buf1) >= *(((int*)buf2)) );
}else if (rel == LEQ){
res = ( *((int*)buf1) <= *(((int*)buf2)) );
}
}else if (type == FLOAT){
if(rel == EQ){
res = ( *((float*)buf1) == *(((float*)buf2)) );
}else if (rel == GTH){
res = ( *((float*)buf1) > *(((float*)buf2)) );
}else if (rel == LTH){
res = ( *((float*)buf1) < *(((float*)buf2)) );
}else if (rel == GEQ){
res = ( *((float*)buf1) >= *(((float*)buf2)) );
}else if (rel == LEQ){
res = ( *((float*)buf1) <= *(((float*)buf2)) );
}
}else{
int tmp = stringCmp(buf1,buf2,size);
if(rel == EQ){
res = (tmp == 0);
}else if (rel == GTH){
res = (tmp > 0);
}else if (rel == LTH){
res = (tmp < 0);
}else if (rel == GEQ){
res = (tmp >= 0);
}else if (rel == LEQ){
res = (tmp <= 0);
}
}
return res;
}
/*
* transform_dict_filter_and: merge the filter for dictionary-compressed predicate into the final filter.
* @dictFilter: the filter for the dictionary compressed data
* @dictFact: the compressed fact table column
* @tupleNum: the number of tuples in the column
* @filter: the filter for the uncompressed data
*/
__global__ static void transform_dict_filter_and(int * dictFilter, char *dictFact, long tupleNum, int dNum, int * filter, int byteNum){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int * fact = (int*)(dictFact + sizeof(struct dictHeader));
int numInt = (tupleNum * byteNum +sizeof(int) - 1) / sizeof(int) ;
for(long i=offset; i<numInt; i += stride){
int tmp = fact[i];
unsigned long bit = 1;
for(int j=0; j< sizeof(int)/byteNum; j++){
int t = (bit << ((j+1)*byteNum*8)) -1 - ((1<<(j*byteNum*8))-1);
int fkey = (tmp & t)>> (j*byteNum*8) ;
filter[i* sizeof(int)/byteNum + j] &= dictFilter[fkey];
}
}
}
__global__ static void transform_dict_filter_init(int * dictFilter, char *dictFact, long tupleNum, int dNum, int * filter,int byteNum){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int * fact = (int*)(dictFact + sizeof(struct dictHeader));
int numInt = (tupleNum * byteNum +sizeof(int) - 1) / sizeof(int) ;
for(long i=offset; i<numInt; i += stride){
int tmp = fact[i];
unsigned long bit = 1;
for(int j=0; j< sizeof(int)/byteNum; j++){
int t = (bit << ((j+1)*byteNum*8)) -1 - ((1<<(j*byteNum*8))-1);
int fkey = (tmp & t)>> (j*byteNum*8) ;
filter[i* sizeof(int)/byteNum + j] = dictFilter[fkey];
}
}
}
__global__ static void transform_dict_filter_or(int * dictFilter, char *fact, long tupleNum, int dNum, int * filter,int byteNum){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int numInt = (tupleNum * byteNum +sizeof(int) - 1) / sizeof(int) ;
for(long i=offset; i<numInt; i += stride){
int tmp = ((int *)fact)[i];
unsigned long bit = 1;
for(int j=0; j< sizeof(int)/byteNum; j++){
int t = (bit << ((j+1)*byteNum*8)) -1 - ((1<<(j*byteNum*8))-1);
int fkey = (tmp & t)>> (j*byteNum*8) ;
filter[i* sizeof(int)/byteNum + j] |= dictFilter[fkey];
}
}
}
/*
* genScanFilter_dict_init: generate the filter for dictionary-compressed predicate
*/
__global__ static void genScanFilter_dict_init(struct dictHeader *dheader, int colSize, int colType, int dNum, struct whereExp *where, int *dfilter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(int i=tid;i<dNum;i+=stride){
int fkey = dheader->hash[i];
con = testCon((char *)&fkey,where->content,colSize,colType,where->relation);
dfilter[i] = con;
}
}
__global__ static void genScanFilter_dict_or(struct dictHeader *dheader, int colSize, int colType, int dNum, struct whereExp *where, int *dfilter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(int i=tid;i<dNum;i+=stride){
int fkey = dheader->hash[i];
con = testCon((char *)&fkey,where->content,colSize,colType,where->relation);
dfilter[i] |= con;
}
}
__global__ static void genScanFilter_dict_and(struct dictHeader *dheader, int colSize, int colType, int dNum, struct whereExp *where, int *dfilter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(int i=tid;i<dNum;i+=stride){
int fkey = dheader->hash[i];
con = testCon((char *)&fkey,where->content,colSize,colType,where->relation);
dfilter[i] &= con;
}
}
__global__ static void genScanFilter_rle(char *col, int colSize, int colType, long tupleNum, struct whereExp *where, int andOr, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
struct rleHeader *rheader = (struct rleHeader *) col;
int dNum = rheader->dictNum;
for(int i = tid; i<dNum; i += stride){
int fkey = ((int *)(col+sizeof(struct rleHeader)))[i];
int fcount = ((int *)(col+sizeof(struct rleHeader)))[i + dNum];
int fpos = ((int *)(col+sizeof(struct rleHeader)))[i + 2*dNum];
con = testCon((char *)&fkey,where->content,colSize,colType,where->relation);
for(int k=0;k<fcount;k++){
if(andOr == AND)
filter[fpos+k] &= con;
else
filter[fpos+k] |= con;
}
}
}
__global__ static void genScanFilter_and_eq(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content, colSize) == 0);
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_gth(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content, colSize) > 0);
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_lth(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content, colSize) < 0);
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_geq(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content, colSize) >= 0);
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_leq(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content, colSize) <= 0);
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_soa(char *col, int colSize, int colType, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int rel = where->relation;
int con;
for(long i = tid; i<tupleNum;i+=stride){
int cmp = 0;
for(int j=0;j<colSize;j++){
int pos = j*tupleNum + i;
if(col[pos] > where->content[j]){
cmp = 1;
break;
}else if (col[pos] < where->content[j]){
cmp = -1;
break;
}
}
if (rel == EQ){
con = (cmp == 0);
}else if(rel == LTH){
con = (cmp <0);
}else if(rel == GTH){
con = (cmp >0);
}else if (rel == LEQ){
con = (cmp <=0);
}else if (rel == GEQ){
con = (cmp >=0);
}
filter[i] &= con;
}
}
__global__ static void genScanFilter_init_int_eq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] == where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_float_eq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] == where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_int_gth(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] > where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_float_gth(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] > where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_int_lth(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] < where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_float_lth(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] < where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_int_geq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] >= where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_float_geq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] >= where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_int_leq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] <= where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_float_leq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] <= where;
filter[i] = con;
}
}
__global__ static void genScanFilter_and_int_eq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] == where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_float_eq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] == where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_int_geq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] >= where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_float_geq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] >= where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_int_leq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] <= where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_float_leq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] <= where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_int_gth(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] > where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_float_gth(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] > where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_int_lth(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] < where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_float_lth(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] < where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_init_eq(char *col, int colSize,long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content,colSize) == 0);
filter[i] = con;
}
}
__global__ static void genScanFilter_init_gth(char *col, int colSize,long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content,colSize) > 0);
filter[i] = con;
}
}
__global__ static void genScanFilter_init_lth(char *col, int colSize,long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content,colSize) < 0);
filter[i] = con;
}
}
__global__ static void genScanFilter_init_geq(char *col, int colSize,long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content,colSize) >= 0);
filter[i] = con;
}
}
__global__ static void genScanFilter_init_leq(char *col, int colSize,long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content,colSize) <= 0);
filter[i] = con;
}
}
__global__ static void genScanFilter_or_eq(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize *i, where->content, colSize) == 0);
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_gth(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize *i, where->content, colSize)> 0);
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_lth(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize *i, where->content, colSize) < 0);
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_geq(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize *i, where->content, colSize) >= 0);
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_leq(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize *i, where->content, colSize) <= 0);
filter[i] |= con;
}
}
/*
* This is only for testing the performance of soa in certain cases.
*/
__global__ static void genScanFilter_or_soa(char *col, int colSize, int colType, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int rel = where->relation;
int con;
for(long i = tid; i<tupleNum;i+=stride){
int cmp = 0;
for(int j=0;j<colSize;j++){
int pos = j*tupleNum + i;
if(col[pos] > where->content[j]){
cmp = 1;
break;
}else if (col[pos] < where->content[j]){
cmp = -1;
break;
}
}
if (rel == EQ){
con = (cmp == 0);
}else if(rel == LTH){
con = (cmp <0);
}else if(rel == GTH){
con = (cmp >0);
}else if (rel == LEQ){
con = (cmp <=0);
}else if (rel == GEQ){
con = (cmp >=0);
}
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_int_eq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] == where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_float_eq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] == where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_int_gth(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] > where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_float_gth(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] > where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_int_lth(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] < where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_float_lth(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] < where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_int_geq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] >= where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_float_geq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] >= where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_int_leq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] <= where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_float_leq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] <= where;
filter[i] |= con;
}
}
/*
* countScanNum: count the number of results that each thread generates.
*/
__global__ static void countScanNum(int *filter, long tupleNum, int * count){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int localCount = 0;
for(long i = tid; i<tupleNum; i += stride){
localCount += filter[i];
}
count[tid] = localCount;
}
/*
* scan_dict_other: generate the result for dictionary-compressed column.
*/
__global__ static void scan_dict_other(char *col, struct dictHeader * dheader, int byteNum,int colSize, long tupleNum, int *psum, long resultNum, int * filter, char * result){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int pos = psum[tid] * colSize;
for(long i = tid; i<tupleNum; i+= stride){
if(filter[i] == 1){
int key = 0;
memcpy(&key, col + sizeof(struct dictHeader) + i* dheader->bitNum/8, dheader->bitNum/8);
memcpy(result+pos,&dheader->hash[key],colSize);
pos += colSize;
}
}
}
__global__ static void scan_dict_int(char *col, struct dictHeader * dheader,int byteNum,int colSize, long tupleNum, int *psum, long resultNum, int * filter, char * result){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int localCount = psum[tid];
for(long i = tid; i<tupleNum; i+= stride){
if(filter[i] == 1){
int key = 0;
memcpy(&key, col + sizeof(struct dictHeader) + i*byteNum, byteNum);
((int *)result)[localCount] = dheader->hash[key];
localCount ++;
}
}
}
/*
* scan_other: generate scan result for uncompressed column.
*/
__global__ static void scan_other(char *col, int colSize, long tupleNum, int *psum, long resultNum, int * filter, char * result){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int pos = psum[tid] * colSize;
for(long i = tid; i<tupleNum;i+=stride){
if(filter[i] == 1){
memcpy(result+pos,col+i*colSize,colSize);
pos += colSize;
}
}
}
__global__ static void scan_other_soa(char *col, int colSize, long tupleNum, int *psum, long resultNum, int * filter, char * result){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tNum = psum[tid];
for(long i = tid; i<tupleNum;i+=stride){
if(filter[i] == 1){
for(int j=0;j<colSize;j++){
long inPos = j*tupleNum + i;
long outPos = j*resultNum + tNum;
result[outPos] = col[inPos];
}
}
}
}
__global__ static void scan_int(char *col, int colSize, long tupleNum, int *psum, long resultNum, int * filter, char * result){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int localCount = psum[tid] ;
for(long i = tid; i<tupleNum;i+=stride){
if(filter[i] == 1){
((int*)result)[localCount] = ((int*)col)[i];
localCount ++;
}
}
}
__global__ void static unpack_rle(char * fact, char * rle, long tupleNum, int dNum){
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i=offset; i<dNum; i+=stride){
int fvalue = ((int *)(fact+sizeof(struct rleHeader)))[i];
int fcount = ((int *)(fact+sizeof(struct rleHeader)))[i + dNum];
int fpos = ((int *)(fact+sizeof(struct rleHeader)))[i + 2*dNum];
for(int k=0;k<fcount;k++){
((int*)rle)[fpos+ k] = fvalue;
}
}
}
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
void profilescan(int *h_a,
int *h_b,
int *d,
int *filter,
int n,
int where,
char *desc,
unsigned int loopTotal)
{
dim3 block(256);
dim3 grid((n + block.x - 1) / block.x);
float time,stime;
// events for timing
hipEvent_t startEvent, stopEvent;
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
stime=0;
int bytes=n * sizeof(int);
for(int i = 1; i <= loopTotal; i++){
checkCuda( hipEventRecord(startEvent, 0) );
checkCuda( hipMemcpy(d, h_a, bytes, hipMemcpyHostToDevice) );
// printf("%d\n", clock());
hipLaunchKernelGGL(( genScanFilter_init_int_lth), dim3(grid),dim3(block), 0, 0, (char *)d, n, where, filter);
checkCuda(hipDeviceSynchronize());
checkCuda( hipMemcpy(h_b, d, bytes, hipMemcpyDeviceToHost) );
checkCuda( hipEventRecord(stopEvent, 0) );
checkCuda( hipEventSynchronize(stopEvent) );
checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) );
stime += time;
//printf("%f\n",stime);
}
printf("%f\n" ,bytes * 1e-6/(stime / loopTotal));
checkCuda( hipEventDestroy(startEvent) );
checkCuda( hipEventDestroy(stopEvent) );
}
int main(int argc, char ** argv)
{
int inputN;
sscanf(argv[1],"%d",&inputN);
unsigned int nElements = inputN;
const unsigned int bytes = nElements * sizeof(int);
// host arrays
int *h_aPageable, *h_bPageable;
int *h_aPinned, *h_bPinned;
// device array
int *d_a,*filter;
// allocate and initialize
h_aPageable = (int*)malloc(bytes );
h_bPageable = (int*)malloc(bytes ); // host pageable
checkCuda( hipHostMalloc((void**)&h_aPinned, bytes ) ); // host pinned
checkCuda( hipHostMalloc((void**)&h_bPinned, bytes ) );
checkCuda( hipMalloc((void**)&d_a, bytes ) ); // device
checkCuda( hipMalloc((void**)&filter, bytes ) ); // device return
for (int i = 0; i < nElements; ++i) h_aPageable[i] = i;
memcpy(h_aPinned, h_aPageable, bytes );
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
// output device info and transfer size
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, 0) );
// perform scan eq
// profilescan(h_aPageable, h_bPageable, d_a, filter, nElements, constC,"Pageable",20);
profilescan(h_aPinned, h_bPinned, d_a, filter,nElements, constC,"Pinned",20);
// cleanup
hipFree(filter);
hipHostFree(h_aPinned);
hipHostFree(h_bPinned);
free(h_aPageable);
return 0;
}
| ce38d58a64c2661d6a766a3987f9629bf5b3c64b.cu | /*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#include "scanImpl.cu"
#include "../include/common.h"
#include "../include/gpuCudaLib.h"
#ifdef HAS_GMM
#include "gmm.h"
#else
#define GMM_BUFFER_COW 0
#endif
#define CHECK_POINTER(p) do { \
if(p == NULL){ \
perror("Failed to allocate host memory"); \
exit(-1); \
}} while(0)
/*
* stringCmp: Compare two strings on GPU using one single GPU thread.
* @buf1: the first input buffer
* @buf2: the second input buffer
* @size: the length of data to be compared
*
* Return
* 1 if buf1 is larger
* 0 if they are equal
* -1 if buf2 is larger
*/
extern char *col_buf;
__device__ static inline int stringCmp(char* buf1, char *buf2, int size){
int i;
int res = 0;
for(i=0;i<size;i++){
if(buf1[i] > buf2[i]){
res = 1;
break;
}else if (buf1[i] < buf2[i]){
res = -1;
break;
}
if(buf1[i] == 0 && buf2[i] == 0)
break;
}
return res;
}
/*
* testCon: evaluate one selection predicate using one GPU thread
* @buf1: input data to be tested
* @buf2: the test criterion, usually a number of a string.
* @size: the size of the input data buf1
* @type: the type of the input data buf1
* @rel: >,<, >=, <= or ==.
*
* Return:
* 0 if the input data meets the criteria
* 1 otherwise
*/
__device__ static inline int testCon(char *buf1, char* buf2, int size, int type, int rel){
int res = 1;
if (type == INT){
if(rel == EQ){
res = ( *((int*)buf1) == *(((int*)buf2)) );
}else if (rel == GTH){
res = ( *((int*)buf1) > *(((int*)buf2)) );
}else if (rel == LTH){
res = ( *((int*)buf1) < *(((int*)buf2)) );
}else if (rel == GEQ){
res = ( *((int*)buf1) >= *(((int*)buf2)) );
}else if (rel == LEQ){
res = ( *((int*)buf1) <= *(((int*)buf2)) );
}
}else if (type == FLOAT){
if(rel == EQ){
res = ( *((float*)buf1) == *(((float*)buf2)) );
}else if (rel == GTH){
res = ( *((float*)buf1) > *(((float*)buf2)) );
}else if (rel == LTH){
res = ( *((float*)buf1) < *(((float*)buf2)) );
}else if (rel == GEQ){
res = ( *((float*)buf1) >= *(((float*)buf2)) );
}else if (rel == LEQ){
res = ( *((float*)buf1) <= *(((float*)buf2)) );
}
}else{
int tmp = stringCmp(buf1,buf2,size);
if(rel == EQ){
res = (tmp == 0);
}else if (rel == GTH){
res = (tmp > 0);
}else if (rel == LTH){
res = (tmp < 0);
}else if (rel == GEQ){
res = (tmp >= 0);
}else if (rel == LEQ){
res = (tmp <= 0);
}
}
return res;
}
/*
* transform_dict_filter_and: merge the filter for dictionary-compressed predicate into the final filter.
* @dictFilter: the filter for the dictionary compressed data
* @dictFact: the compressed fact table column
* @tupleNum: the number of tuples in the column
* @filter: the filter for the uncompressed data
*/
__global__ static void transform_dict_filter_and(int * dictFilter, char *dictFact, long tupleNum, int dNum, int * filter, int byteNum){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int * fact = (int*)(dictFact + sizeof(struct dictHeader));
int numInt = (tupleNum * byteNum +sizeof(int) - 1) / sizeof(int) ;
for(long i=offset; i<numInt; i += stride){
int tmp = fact[i];
unsigned long bit = 1;
for(int j=0; j< sizeof(int)/byteNum; j++){
int t = (bit << ((j+1)*byteNum*8)) -1 - ((1<<(j*byteNum*8))-1);
int fkey = (tmp & t)>> (j*byteNum*8) ;
filter[i* sizeof(int)/byteNum + j] &= dictFilter[fkey];
}
}
}
__global__ static void transform_dict_filter_init(int * dictFilter, char *dictFact, long tupleNum, int dNum, int * filter,int byteNum){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int * fact = (int*)(dictFact + sizeof(struct dictHeader));
int numInt = (tupleNum * byteNum +sizeof(int) - 1) / sizeof(int) ;
for(long i=offset; i<numInt; i += stride){
int tmp = fact[i];
unsigned long bit = 1;
for(int j=0; j< sizeof(int)/byteNum; j++){
int t = (bit << ((j+1)*byteNum*8)) -1 - ((1<<(j*byteNum*8))-1);
int fkey = (tmp & t)>> (j*byteNum*8) ;
filter[i* sizeof(int)/byteNum + j] = dictFilter[fkey];
}
}
}
__global__ static void transform_dict_filter_or(int * dictFilter, char *fact, long tupleNum, int dNum, int * filter,int byteNum){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int numInt = (tupleNum * byteNum +sizeof(int) - 1) / sizeof(int) ;
for(long i=offset; i<numInt; i += stride){
int tmp = ((int *)fact)[i];
unsigned long bit = 1;
for(int j=0; j< sizeof(int)/byteNum; j++){
int t = (bit << ((j+1)*byteNum*8)) -1 - ((1<<(j*byteNum*8))-1);
int fkey = (tmp & t)>> (j*byteNum*8) ;
filter[i* sizeof(int)/byteNum + j] |= dictFilter[fkey];
}
}
}
/*
* genScanFilter_dict_init: generate the filter for dictionary-compressed predicate
*/
__global__ static void genScanFilter_dict_init(struct dictHeader *dheader, int colSize, int colType, int dNum, struct whereExp *where, int *dfilter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(int i=tid;i<dNum;i+=stride){
int fkey = dheader->hash[i];
con = testCon((char *)&fkey,where->content,colSize,colType,where->relation);
dfilter[i] = con;
}
}
__global__ static void genScanFilter_dict_or(struct dictHeader *dheader, int colSize, int colType, int dNum, struct whereExp *where, int *dfilter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(int i=tid;i<dNum;i+=stride){
int fkey = dheader->hash[i];
con = testCon((char *)&fkey,where->content,colSize,colType,where->relation);
dfilter[i] |= con;
}
}
__global__ static void genScanFilter_dict_and(struct dictHeader *dheader, int colSize, int colType, int dNum, struct whereExp *where, int *dfilter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(int i=tid;i<dNum;i+=stride){
int fkey = dheader->hash[i];
con = testCon((char *)&fkey,where->content,colSize,colType,where->relation);
dfilter[i] &= con;
}
}
__global__ static void genScanFilter_rle(char *col, int colSize, int colType, long tupleNum, struct whereExp *where, int andOr, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
struct rleHeader *rheader = (struct rleHeader *) col;
int dNum = rheader->dictNum;
for(int i = tid; i<dNum; i += stride){
int fkey = ((int *)(col+sizeof(struct rleHeader)))[i];
int fcount = ((int *)(col+sizeof(struct rleHeader)))[i + dNum];
int fpos = ((int *)(col+sizeof(struct rleHeader)))[i + 2*dNum];
con = testCon((char *)&fkey,where->content,colSize,colType,where->relation);
for(int k=0;k<fcount;k++){
if(andOr == AND)
filter[fpos+k] &= con;
else
filter[fpos+k] |= con;
}
}
}
__global__ static void genScanFilter_and_eq(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content, colSize) == 0);
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_gth(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content, colSize) > 0);
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_lth(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content, colSize) < 0);
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_geq(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content, colSize) >= 0);
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_leq(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content, colSize) <= 0);
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_soa(char *col, int colSize, int colType, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int rel = where->relation;
int con;
for(long i = tid; i<tupleNum;i+=stride){
int cmp = 0;
for(int j=0;j<colSize;j++){
int pos = j*tupleNum + i;
if(col[pos] > where->content[j]){
cmp = 1;
break;
}else if (col[pos] < where->content[j]){
cmp = -1;
break;
}
}
if (rel == EQ){
con = (cmp == 0);
}else if(rel == LTH){
con = (cmp <0);
}else if(rel == GTH){
con = (cmp >0);
}else if (rel == LEQ){
con = (cmp <=0);
}else if (rel == GEQ){
con = (cmp >=0);
}
filter[i] &= con;
}
}
__global__ static void genScanFilter_init_int_eq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] == where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_float_eq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] == where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_int_gth(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] > where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_float_gth(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] > where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_int_lth(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] < where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_float_lth(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] < where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_int_geq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] >= where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_float_geq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] >= where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_int_leq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] <= where;
filter[i] = con;
}
}
__global__ static void genScanFilter_init_float_leq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] <= where;
filter[i] = con;
}
}
__global__ static void genScanFilter_and_int_eq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] == where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_float_eq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] == where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_int_geq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] >= where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_float_geq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] >= where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_int_leq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] <= where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_float_leq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] <= where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_int_gth(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] > where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_float_gth(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] > where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_int_lth(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] < where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_and_float_lth(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] < where;
filter[i] &= con;
}
}
__global__ static void genScanFilter_init_eq(char *col, int colSize,long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content,colSize) == 0);
filter[i] = con;
}
}
__global__ static void genScanFilter_init_gth(char *col, int colSize,long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content,colSize) > 0);
filter[i] = con;
}
}
__global__ static void genScanFilter_init_lth(char *col, int colSize,long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content,colSize) < 0);
filter[i] = con;
}
}
__global__ static void genScanFilter_init_geq(char *col, int colSize,long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content,colSize) >= 0);
filter[i] = con;
}
}
__global__ static void genScanFilter_init_leq(char *col, int colSize,long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize*i, where->content,colSize) <= 0);
filter[i] = con;
}
}
__global__ static void genScanFilter_or_eq(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize *i, where->content, colSize) == 0);
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_gth(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize *i, where->content, colSize)> 0);
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_lth(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize *i, where->content, colSize) < 0);
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_geq(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize *i, where->content, colSize) >= 0);
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_leq(char *col, int colSize, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = (stringCmp(col+colSize *i, where->content, colSize) <= 0);
filter[i] |= con;
}
}
/*
* This is only for testing the performance of soa in certain cases.
*/
__global__ static void genScanFilter_or_soa(char *col, int colSize, int colType, long tupleNum, struct whereExp * where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int rel = where->relation;
int con;
for(long i = tid; i<tupleNum;i+=stride){
int cmp = 0;
for(int j=0;j<colSize;j++){
int pos = j*tupleNum + i;
if(col[pos] > where->content[j]){
cmp = 1;
break;
}else if (col[pos] < where->content[j]){
cmp = -1;
break;
}
}
if (rel == EQ){
con = (cmp == 0);
}else if(rel == LTH){
con = (cmp <0);
}else if(rel == GTH){
con = (cmp >0);
}else if (rel == LEQ){
con = (cmp <=0);
}else if (rel == GEQ){
con = (cmp >=0);
}
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_int_eq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] == where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_float_eq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] == where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_int_gth(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] > where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_float_gth(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] > where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_int_lth(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] < where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_float_lth(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] < where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_int_geq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] >= where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_float_geq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] >= where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_int_leq(char *col, long tupleNum, int where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((int*)col)[i] <= where;
filter[i] |= con;
}
}
__global__ static void genScanFilter_or_float_leq(char *col, long tupleNum, float where, int * filter){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int con;
for(long i = tid; i<tupleNum;i+=stride){
con = ((float*)col)[i] <= where;
filter[i] |= con;
}
}
/*
* countScanNum: count the number of results that each thread generates.
*/
__global__ static void countScanNum(int *filter, long tupleNum, int * count){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int localCount = 0;
for(long i = tid; i<tupleNum; i += stride){
localCount += filter[i];
}
count[tid] = localCount;
}
/*
* scan_dict_other: generate the result for dictionary-compressed column.
*/
__global__ static void scan_dict_other(char *col, struct dictHeader * dheader, int byteNum,int colSize, long tupleNum, int *psum, long resultNum, int * filter, char * result){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int pos = psum[tid] * colSize;
for(long i = tid; i<tupleNum; i+= stride){
if(filter[i] == 1){
int key = 0;
memcpy(&key, col + sizeof(struct dictHeader) + i* dheader->bitNum/8, dheader->bitNum/8);
memcpy(result+pos,&dheader->hash[key],colSize);
pos += colSize;
}
}
}
__global__ static void scan_dict_int(char *col, struct dictHeader * dheader,int byteNum,int colSize, long tupleNum, int *psum, long resultNum, int * filter, char * result){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int localCount = psum[tid];
for(long i = tid; i<tupleNum; i+= stride){
if(filter[i] == 1){
int key = 0;
memcpy(&key, col + sizeof(struct dictHeader) + i*byteNum, byteNum);
((int *)result)[localCount] = dheader->hash[key];
localCount ++;
}
}
}
/*
* scan_other: generate scan result for uncompressed column.
*/
__global__ static void scan_other(char *col, int colSize, long tupleNum, int *psum, long resultNum, int * filter, char * result){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int pos = psum[tid] * colSize;
for(long i = tid; i<tupleNum;i+=stride){
if(filter[i] == 1){
memcpy(result+pos,col+i*colSize,colSize);
pos += colSize;
}
}
}
__global__ static void scan_other_soa(char *col, int colSize, long tupleNum, int *psum, long resultNum, int * filter, char * result){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tNum = psum[tid];
for(long i = tid; i<tupleNum;i+=stride){
if(filter[i] == 1){
for(int j=0;j<colSize;j++){
long inPos = j*tupleNum + i;
long outPos = j*resultNum + tNum;
result[outPos] = col[inPos];
}
}
}
}
__global__ static void scan_int(char *col, int colSize, long tupleNum, int *psum, long resultNum, int * filter, char * result){
int stride = blockDim.x * gridDim.x;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int localCount = psum[tid] ;
for(long i = tid; i<tupleNum;i+=stride){
if(filter[i] == 1){
((int*)result)[localCount] = ((int*)col)[i];
localCount ++;
}
}
}
__global__ void static unpack_rle(char * fact, char * rle, long tupleNum, int dNum){
int offset = blockIdx.x*blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i=offset; i<dNum; i+=stride){
int fvalue = ((int *)(fact+sizeof(struct rleHeader)))[i];
int fcount = ((int *)(fact+sizeof(struct rleHeader)))[i + dNum];
int fpos = ((int *)(fact+sizeof(struct rleHeader)))[i + 2*dNum];
for(int k=0;k<fcount;k++){
((int*)rle)[fpos+ k] = fvalue;
}
}
}
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
void profilescan(int *h_a,
int *h_b,
int *d,
int *filter,
int n,
int where,
char *desc,
unsigned int loopTotal)
{
dim3 block(256);
dim3 grid((n + block.x - 1) / block.x);
float time,stime;
// events for timing
cudaEvent_t startEvent, stopEvent;
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
stime=0;
int bytes=n * sizeof(int);
for(int i = 1; i <= loopTotal; i++){
checkCuda( cudaEventRecord(startEvent, 0) );
checkCuda( cudaMemcpy(d, h_a, bytes, cudaMemcpyHostToDevice) );
// printf("%d\n", clock());
genScanFilter_init_int_lth<<<grid,block>>>((char *)d, n, where, filter);
checkCuda(cudaThreadSynchronize());
checkCuda( cudaMemcpy(h_b, d, bytes, cudaMemcpyDeviceToHost) );
checkCuda( cudaEventRecord(stopEvent, 0) );
checkCuda( cudaEventSynchronize(stopEvent) );
checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) );
stime += time;
//printf("%f\n",stime);
}
printf("%f\n" ,bytes * 1e-6/(stime / loopTotal));
checkCuda( cudaEventDestroy(startEvent) );
checkCuda( cudaEventDestroy(stopEvent) );
}
int main(int argc, char ** argv)
{
int inputN;
sscanf(argv[1],"%d",&inputN);
unsigned int nElements = inputN;
const unsigned int bytes = nElements * sizeof(int);
// host arrays
int *h_aPageable, *h_bPageable;
int *h_aPinned, *h_bPinned;
// device array
int *d_a,*filter;
// allocate and initialize
h_aPageable = (int*)malloc(bytes );
h_bPageable = (int*)malloc(bytes ); // host pageable
checkCuda( cudaMallocHost((void**)&h_aPinned, bytes ) ); // host pinned
checkCuda( cudaMallocHost((void**)&h_bPinned, bytes ) );
checkCuda( cudaMalloc((void**)&d_a, bytes ) ); // device
checkCuda( cudaMalloc((void**)&filter, bytes ) ); // device return
for (int i = 0; i < nElements; ++i) h_aPageable[i] = i;
memcpy(h_aPinned, h_aPageable, bytes );
memset(h_bPageable, 0, bytes);
memset(h_bPinned, 0, bytes);
// output device info and transfer size
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, 0) );
// perform scan eq
// profilescan(h_aPageable, h_bPageable, d_a, filter, nElements, constC,"Pageable",20);
profilescan(h_aPinned, h_bPinned, d_a, filter,nElements, constC,"Pinned",20);
// cleanup
cudaFree(filter);
cudaFreeHost(h_aPinned);
cudaFreeHost(h_bPinned);
free(h_aPageable);
return 0;
}
|
740a861411d772f2e46e57b08bb4f7fb43772f06.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2017-2023 by XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "../collective/device_communicator.cuh"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/cuda_context.cuh" // CUDAContext
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/io.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "constraints.cuh"
#include "driver.h"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/expand_entry.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "param.h"
#include "split_evaluator.h"
#include "updater_gpu_common.cuh"
#include "xgboost/base.h"
#include "xgboost/context.h"
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/task.h" // for ObjInfo
#include "xgboost/tree_model.h"
namespace xgboost::tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogramStorage
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <size_t kStopGrowingSize = 1 << 28>
class DeviceHistogramStorage {
private:
using GradientSumT = GradientPairInt64;
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
// Large buffer of zeroed memory, caches histograms
dh::device_vector<typename GradientSumT::ValueT> data_;
// If we run out of storage allocate one histogram at a time
// in overflow. Not cached, overwritten when a new histogram
// is requested
dh::device_vector<typename GradientSumT::ValueT> overflow_;
std::map<int, size_t> overflow_nidx_map_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2, "Number of items in gradient type should be 2.");
public:
// Start with about 16mb
DeviceHistogramStorage() { data_.reserve(1 << 22); }
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(data_.size(), [=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
overflow_nidx_map_.clear();
}
[[nodiscard]] bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend() ||
overflow_nidx_map_.find(nidx) != overflow_nidx_map_.cend();
}
[[nodiscard]] int Bins() const { return n_bins_; }
[[nodiscard]] size_t HistogramSize() const { return n_bins_ * kNumItemsInGradientSum; }
dh::device_vector<typename GradientSumT::ValueT>& Data() { return data_; }
void AllocateHistograms(const std::vector<int>& new_nidxs) {
for (int nidx : new_nidxs) {
CHECK(!HistogramExists(nidx));
}
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize() * new_nidxs.size();
if (used_size >= kStopGrowingSize) {
// Use overflow
// Delete previous entries
overflow_nidx_map_.clear();
overflow_.resize(HistogramSize() * new_nidxs.size());
// Zero memory
auto d_data = overflow_.data().get();
dh::LaunchN(overflow_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0; });
// Append new histograms
for (int nidx : new_nidxs) {
overflow_nidx_map_[nidx] = overflow_nidx_map_.size() * HistogramSize();
}
} else {
CHECK_GE(data_.size(), used_size);
// Expand if necessary
if (data_.size() < new_used_size) {
data_.resize(::max(data_.size() * 2, new_used_size));
}
// Append new histograms
for (int nidx : new_nidxs) {
nidx_map_[nidx] = nidx_map_.size() * HistogramSize();
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
if (nidx_map_.find(nidx) != nidx_map_.cend()) {
// Fetch from normal cache
auto ptr = data_.data().get() + nidx_map_.at(nidx);
return {reinterpret_cast<GradientSumT*>(ptr), static_cast<std::size_t>(n_bins_)};
} else {
// Fetch from overflow
auto ptr = overflow_.data().get() + overflow_nidx_map_.at(nidx);
return {reinterpret_cast<GradientSumT*>(ptr), static_cast<std::size_t>(n_bins_)};
}
}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
private:
GPUHistEvaluator evaluator_;
Context const* ctx_;
public:
EllpackPageImpl const* page;
common::Span<FeatureType const> feature_types;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogramStorage<> hist{};
dh::device_vector<GradientPair> d_gpair; // storage for gpair;
common::Span<GradientPair> gpair;
dh::device_vector<int> monotone_constraints;
// node idx for each sample
dh::device_vector<bst_node_t> positions;
TrainParam param;
std::unique_ptr<GradientQuantiser> quantiser;
dh::PinnedMemory pinned;
dh::PinnedMemory pinned2;
common::Monitor monitor;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
GPUHistMakerDevice(Context const* ctx, EllpackPageImpl const* _page,
common::Span<FeatureType const> _feature_types, bst_uint _n_rows,
TrainParam _param, uint32_t column_sampler_seed, uint32_t n_features,
BatchParam _batch_param)
: evaluator_{_param, n_features, ctx->gpu_id},
ctx_(ctx),
page(_page),
feature_types{_feature_types},
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
batch_param(std::move(_batch_param)) {
sampler.reset(new GradientBasedSampler(page, _n_rows, batch_param, param.subsample,
param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
// Init histogram
hist.Init(ctx_->gpu_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(ctx_->gpu_id));
feature_groups.reset(new FeatureGroups(page->Cuts(), page->is_dense,
dh::MaxSharedMemoryOptin(ctx_->gpu_id),
sizeof(GradientSumT)));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler.Init(ctx_, num_columns, info.feature_weights.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
this->evaluator_.Reset(page->Cuts(), feature_types, dmat->Info().num_col_, param,
ctx_->gpu_id);
this->interaction_constraints.Reset();
if (d_gpair.size() != dh_gpair->Size()) {
d_gpair.resize(dh_gpair->Size());
}
dh::safe_cuda(hipMemcpyAsync(
d_gpair.data().get(), dh_gpair->ConstDevicePointer(),
dh_gpair->Size() * sizeof(GradientPair), hipMemcpyDeviceToDevice));
auto sample = sampler->Sample(dh::ToSpan(d_gpair), dmat);
page = sample.page;
gpair = sample.gpair;
quantiser.reset(new GradientQuantiser(this->gpair));
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(ctx_->gpu_id, sample.sample_rows));
hist.Reset();
}
GPUExpandEntry EvaluateRootSplit(GradientPairInt64 root_sum) {
int nidx = RegTree::kRoot;
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitInputs inputs{nidx, 0, root_sum, feature_set, hist.GetNodeHistogram(nidx)};
EvaluateSplitSharedInputs shared_inputs{
gpu_param,
*quantiser,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
matrix.is_dense
};
auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs);
return split;
}
void EvaluateSplits(const std::vector<GPUExpandEntry>& candidates, const RegTree& tree,
common::Span<GPUExpandEntry> pinned_candidates_out) {
if (candidates.empty()) return;
dh::TemporaryArray<EvaluateSplitInputs> d_node_inputs(2 * candidates.size());
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2 * candidates.size());
std::vector<bst_node_t> nidx(2 * candidates.size());
auto h_node_inputs = pinned2.GetSpan<EvaluateSplitInputs>(2 * candidates.size());
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitSharedInputs shared_inputs{
GPUTrainingParam{param}, *quantiser, feature_types, matrix.feature_segments,
matrix.gidx_fvalue_map, matrix.min_fvalue,
matrix.is_dense
};
dh::TemporaryArray<GPUExpandEntry> entries(2 * candidates.size());
// Store the feature set ptrs so they dont go out of scope before the kernel is called
std::vector<std::shared_ptr<HostDeviceVector<bst_feature_t>>> feature_sets;
for (size_t i = 0; i < candidates.size(); i++) {
auto candidate = candidates.at(i);
int left_nidx = tree[candidate.nid].LeftChild();
int right_nidx = tree[candidate.nid].RightChild();
nidx[i * 2] = left_nidx;
nidx[i * 2 + 1] = right_nidx;
auto left_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(ctx_->gpu_id);
feature_sets.emplace_back(left_sampled_features);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx);
auto right_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(ctx_->gpu_id);
feature_sets.emplace_back(right_sampled_features);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
right_nidx);
h_node_inputs[i * 2] = {left_nidx, candidate.depth + 1,
candidate.split.left_sum, left_feature_set,
hist.GetNodeHistogram(left_nidx)};
h_node_inputs[i * 2 + 1] = {right_nidx, candidate.depth + 1,
candidate.split.right_sum, right_feature_set,
hist.GetNodeHistogram(right_nidx)};
}
bst_feature_t max_active_features = 0;
for (auto input : h_node_inputs) {
max_active_features =
::max(max_active_features, static_cast<bst_feature_t>(input.feature_set.size()));
}
dh::safe_cuda(hipMemcpyAsync(
d_node_inputs.data().get(), h_node_inputs.data(),
h_node_inputs.size() * sizeof(EvaluateSplitInputs), hipMemcpyDefault));
this->evaluator_.EvaluateSplits(nidx, max_active_features,
dh::ToSpan(d_node_inputs), shared_inputs,
dh::ToSpan(entries));
dh::safe_cuda(hipMemcpyAsync(pinned_candidates_out.data(),
entries.data().get(), sizeof(GPUExpandEntry) * entries.size(),
hipMemcpyDeviceToHost));
dh::DefaultStream().Sync();
}
void BuildHist(int nidx) {
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(ctx_->CUDACtx(), page->GetDeviceAccessor(ctx_->gpu_id),
feature_groups->DeviceAccessor(ctx_->gpu_id), gpair, d_ridx, d_node_hist,
*quantiser);
}
// Attempt to do subtraction trick
// return true if succeeded
bool SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) {
if (!hist.HistogramExists(nidx_histogram) || !hist.HistogramExists(nidx_parent)) {
return false;
}
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
return true;
}
// Extra data for each node that is passed
// to the update position function
struct NodeSplitData {
RegTree::Node split_node;
FeatureType split_type;
common::CatBitField node_cats;
};
void UpdatePosition(const std::vector<GPUExpandEntry>& candidates, RegTree* p_tree) {
if (candidates.empty()) return;
std::vector<int> nidx(candidates.size());
std::vector<int> left_nidx(candidates.size());
std::vector<int> right_nidx(candidates.size());
std::vector<NodeSplitData> split_data(candidates.size());
for (size_t i = 0; i < candidates.size(); i++) {
auto& e = candidates[i];
RegTree::Node split_node = (*p_tree)[e.nid];
auto split_type = p_tree->NodeSplitType(e.nid);
nidx.at(i) = e.nid;
left_nidx.at(i) = split_node.LeftChild();
right_nidx.at(i) = split_node.RightChild();
split_data.at(i) = NodeSplitData{split_node, split_type, e.split.split_cats};
}
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
row_partitioner->UpdatePositionBatch(
nidx, left_nidx, right_nidx, split_data,
[=] __device__(bst_uint ridx, const NodeSplitData& data) {
// given a row index, returns the node id it belongs to
bst_float cut_value = d_matrix.GetFvalue(ridx, data.split_node.SplitIndex());
// Missing value
bool go_left = true;
if (isnan(cut_value)) {
go_left = data.split_node.DefaultLeft();
} else {
if (data.split_type == FeatureType::kCategorical) {
go_left = common::Decision(data.node_cats.Bits(), cut_value);
} else {
go_left = cut_value <= data.split_node.SplitCond();
}
}
return go_left;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat, ObjInfo task,
HostDeviceVector<bst_node_t>* p_out_position) {
// Prediction cache will not be used with external memory
if (!p_fmat->SingleColBlock()) {
if (task.UpdateTreeLeaf()) {
LOG(FATAL) << "Current objective function can not be used with external memory.";
}
p_out_position->Resize(0);
positions.clear();
return;
}
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::CategoricalSplitMatrix::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
FinalisePositionInPage(page, dh::ToSpan(d_nodes), dh::ToSpan(d_split_types),
dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments),
p_out_position);
}
void FinalisePositionInPage(
EllpackPageImpl const* page, const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types, common::Span<uint32_t const> categories,
common::Span<RegTree::CategoricalSplitMatrix::Segment> categories_segments,
HostDeviceVector<bst_node_t>* p_out_position) {
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
auto d_gpair = this->gpair;
p_out_position->SetDevice(ctx_->gpu_id);
p_out_position->Resize(row_partitioner->GetRows().size());
auto new_position_op = [=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats = categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision(node_cats, element);
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
}; // NOLINT
auto d_out_position = p_out_position->DeviceSpan();
row_partitioner->FinalisePosition(d_out_position, new_position_op);
auto s_position = p_out_position->ConstDeviceSpan();
positions.resize(s_position.size());
dh::safe_cuda(hipMemcpyAsync(positions.data().get(), s_position.data(),
s_position.size_bytes(), hipMemcpyDeviceToDevice,
ctx_->CUDACtx()->Stream()));
dh::LaunchN(row_partitioner->GetRows().size(), [=] __device__(size_t idx) {
bst_node_t position = d_out_position[idx];
bool is_row_sampled = d_gpair[idx].GetHess() - .0f == 0.f;
d_out_position[idx] = is_row_sampled ? ~position : position;
});
}
bool UpdatePredictionCache(linalg::MatrixView<float> out_preds_d, RegTree const* p_tree) {
if (positions.empty()) {
return false;
}
CHECK(p_tree);
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
CHECK_EQ(out_preds_d.DeviceIdx(), ctx_->gpu_id);
auto d_position = dh::ToSpan(positions);
CHECK_EQ(out_preds_d.Size(), d_position.size());
auto const& h_nodes = p_tree->GetNodes();
dh::caching_device_vector<RegTree::Node> nodes(h_nodes.size());
dh::safe_cuda(hipMemcpyAsync(nodes.data().get(), h_nodes.data(),
h_nodes.size() * sizeof(RegTree::Node), hipMemcpyHostToDevice,
ctx_->CUDACtx()->Stream()));
auto d_nodes = dh::ToSpan(nodes);
CHECK_EQ(out_preds_d.Shape(1), 1);
dh::LaunchN(d_position.size(), ctx_->CUDACtx()->Stream(),
[=] XGBOOST_DEVICE(std::size_t idx) mutable {
bst_node_t nidx = d_position[idx];
auto weight = d_nodes[nidx].LeafValue();
out_preds_d(idx, 0) += weight;
});
return true;
}
// num histograms is the number of contiguous histograms in memory to reduce over
void AllReduceHist(int nidx, collective::DeviceCommunicator* communicator, int num_histograms) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
using ReduceT = typename std::remove_pointer<decltype(d_node_hist)>::type::ValueT;
communicator->AllReduceSum(reinterpret_cast<ReduceT*>(d_node_hist),
page->Cuts().TotalBins() * 2 * num_histograms);
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(std::vector<GPUExpandEntry> const& candidates,
collective::DeviceCommunicator* communicator, const RegTree& tree) {
if (candidates.empty()) return;
// Some nodes we will manually compute histograms
// others we will do by subtraction
std::vector<int> hist_nidx;
std::vector<int> subtraction_nidx;
for (auto& e : candidates) {
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = e.split.right_sum.GetQuantisedHess() < e.split.left_sum.GetQuantisedHess();
if (fewer_right) {
hist_nidx.emplace_back(tree[e.nid].RightChild());
subtraction_nidx.emplace_back(tree[e.nid].LeftChild());
} else {
hist_nidx.emplace_back(tree[e.nid].LeftChild());
subtraction_nidx.emplace_back(tree[e.nid].RightChild());
}
}
std::vector<int> all_new = hist_nidx;
all_new.insert(all_new.end(), subtraction_nidx.begin(), subtraction_nidx.end());
// Allocate the histograms
// Guaranteed contiguous memory
hist.AllocateHistograms(all_new);
for (auto nidx : hist_nidx) {
this->BuildHist(nidx);
}
// Reduce all in one go
// This gives much better latency in a distributed setting
// when processing a large batch
this->AllReduceHist(hist_nidx.at(0), communicator, hist_nidx.size());
for (size_t i = 0; i < subtraction_nidx.size(); i++) {
auto build_hist_nidx = hist_nidx.at(i);
auto subtraction_trick_nidx = subtraction_nidx.at(i);
auto parent_nidx = candidates.at(i).nid;
if (!this->SubtractionTrick(parent_nidx, build_hist_nidx, subtraction_trick_nidx)) {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, communicator, 1);
}
}
}
void ApplySplit(const GPUExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
// Sanity check - have we created a leaf with no training instances?
if (!collective::IsDistributed() && row_partitioner) {
CHECK(row_partitioner->GetRows(candidate.nid).size() > 0)
<< "No training instances in this leaf!";
}
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto parent_hess = quantiser
->ToFloatingPoint(candidate.split.left_sum +
candidate.split.right_sum)
.GetHess();
auto left_hess =
quantiser->ToFloatingPoint(candidate.split.left_sum).GetHess();
auto right_hess =
quantiser->ToFloatingPoint(candidate.split.right_sum).GetHess();
auto is_cat = candidate.split.is_cat;
if (is_cat) {
// should be set to nan in evaluation split.
CHECK(common::CheckNAN(candidate.split.fvalue));
std::vector<common::CatBitField::value_type> split_cats;
CHECK_GT(candidate.split.split_cats.Bits().size(), 0);
auto h_cats = this->evaluator_.GetHostNodeCats(candidate.nid);
auto n_bins_feature = page->Cuts().FeatureBins(candidate.split.findex);
split_cats.resize(common::CatBitField::ComputeStorageSize(n_bins_feature), 0);
CHECK_LE(split_cats.size(), h_cats.size());
std::copy(h_cats.data(), h_cats.data() + split_cats.size(), split_cats.data());
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_hess,
left_hess, right_hess);
} else {
CHECK(!common::CheckNAN(candidate.split.fvalue));
tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue,
candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_hess,
left_hess, right_hess);
}
evaluator_.ApplyTreeSplit(candidate, p_tree);
const auto& parent = tree[candidate.nid];
std::size_t max_nidx = ::max(parent.LeftChild(), parent.RightChild());
interaction_constraints.Split(candidate.nid, parent.SplitIndex(), parent.LeftChild(),
parent.RightChild());
}
GPUExpandEntry InitRoot(RegTree* p_tree, collective::DeviceCommunicator* communicator) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
auto quantiser = *this->quantiser;
auto gpair_it = dh::MakeTransformIterator<GradientPairInt64>(
dh::tbegin(gpair), [=] __device__(auto const &gpair) {
return quantiser.ToFixedPoint(gpair);
});
GradientPairInt64 root_sum_quantised =
dh::Reduce(ctx_->CUDACtx()->CTP(), gpair_it, gpair_it + gpair.size(),
GradientPairInt64{}, thrust::plus<GradientPairInt64>{});
using ReduceT = typename decltype(root_sum_quantised)::ValueT;
collective::Allreduce<collective::Operation::kSum>(
reinterpret_cast<ReduceT *>(&root_sum_quantised), 2);
hist.AllocateHistograms({kRootNIdx});
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, communicator, 1);
// Remember root stats
auto root_sum = quantiser.ToFloatingPoint(root_sum_quantised);
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto root_entry = this->EvaluateRootSplit(root_sum_quantised);
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
ObjInfo const* task, RegTree* p_tree,
collective::DeviceCommunicator* communicator,
HostDeviceVector<bst_node_t>* p_out_position) {
auto& tree = *p_tree;
// Process maximum 32 nodes at a time
Driver<GPUExpandEntry> driver(param, 32);
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, communicator) });
monitor.Stop("InitRoot");
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
for (auto& candidate : expand_set) {
this->ApplySplit(candidate, p_tree);
}
// Get the candidates we are allowed to expand further
// e.g. We do not bother further processing nodes whose children are beyond max depth
std::vector<GPUExpandEntry> filtered_expand_set;
std::copy_if(expand_set.begin(), expand_set.end(), std::back_inserter(filtered_expand_set),
[&](const auto& e) { return driver.IsChildValid(e); });
auto new_candidates =
pinned.GetSpan<GPUExpandEntry>(filtered_expand_set.size() * 2, GPUExpandEntry());
monitor.Start("UpdatePosition");
// Update position is only run when child is valid, instead of right after apply
// split (as in approx tree method). Hense we have the finalise position call
// in GPU Hist.
this->UpdatePosition(filtered_expand_set, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(filtered_expand_set, communicator, tree);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateSplits(filtered_expand_set, *p_tree, new_candidates);
monitor.Stop("EvaluateSplits");
dh::DefaultStream().Sync();
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat, *task, p_out_position);
monitor.Stop("FinalisePosition");
}
};
class GPUHistMaker : public TreeUpdater {
using GradientSumT = GradientPairPrecise;
public:
explicit GPUHistMaker(Context const* ctx, ObjInfo const* task)
: TreeUpdater(ctx), task_{task} {};
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
initialised_ = false;
monitor_.Init("updater_gpu_hist");
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
initialised_ = false;
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
}
~GPUHistMaker() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(TrainParam const* param, HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update");
// build tree
try {
size_t t_idx{0};
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(param, gpair, dmat, tree, &out_position[t_idx]);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
++t_idx;
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
monitor_.Stop("Update");
}
void InitDataOnce(TrainParam const* param, DMatrix* dmat) {
CHECK_GE(ctx_->gpu_id, 0) << "Must have at least one device";
info_ = &dmat->Info();
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
collective::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
ctx_->gpu_id,
param->max_bin,
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(hipSetDevice(ctx_->gpu_id));
info_->feature_types.SetDevice(ctx_->gpu_id);
maker.reset(new GPUHistMakerDevice<GradientSumT>(
ctx_, page, info_->feature_types.ConstDeviceSpan(), info_->num_row_, *param,
column_sampling_seed, info_->num_col_, batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(TrainParam const* param, DMatrix* dmat, RegTree const* p_tree) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(param, dmat);
monitor_.Stop("InitDataOnce");
}
p_last_tree_ = p_tree;
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = collective::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
collective::Broadcast(&s_model, 0);
RegTree reference_tree{}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(TrainParam const* param, HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree, HostDeviceVector<bst_node_t>* p_out_position) {
monitor_.Start("InitData");
this->InitData(param, p_fmat, p_tree);
monitor_.Stop("InitData");
gpair->SetDevice(ctx_->gpu_id);
auto* communicator = collective::Communicator::GetDevice(ctx_->gpu_id);
maker->UpdateTree(gpair, p_fmat, task_, p_tree, communicator, p_out_position);
}
bool UpdatePredictionCache(const DMatrix* data,
linalg::MatrixView<bst_float> p_out_preds) override {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
bool result = maker->UpdatePredictionCache(p_out_preds, p_last_tree_);
monitor_.Stop("UpdatePredictionCache");
return result;
}
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
[[nodiscard]] char const* Name() const override { return "grow_gpu_hist"; }
[[nodiscard]] bool HasNodePosition() const override { return true; }
private:
bool initialised_{false};
GPUHistMakerTrainParam hist_maker_param_;
DMatrix* p_last_fmat_{nullptr};
RegTree const* p_last_tree_{nullptr};
ObjInfo const* task_{nullptr};
common::Monitor monitor_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([](Context const* ctx, ObjInfo const* task) {
return new GPUHistMaker(ctx, task);
});
#endif // !defined(GTEST_TEST)
} // namespace xgboost::tree
| 740a861411d772f2e46e57b08bb4f7fb43772f06.cu | /**
* Copyright 2017-2023 by XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "../collective/device_communicator.cuh"
#include "../common/bitfield.h"
#include "../common/categorical.h"
#include "../common/cuda_context.cuh" // CUDAContext
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/io.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "constraints.cuh"
#include "driver.h"
#include "gpu_hist/evaluate_splits.cuh"
#include "gpu_hist/expand_entry.cuh"
#include "gpu_hist/feature_groups.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "param.h"
#include "split_evaluator.h"
#include "updater_gpu_common.cuh"
#include "xgboost/base.h"
#include "xgboost/context.h"
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/task.h" // for ObjInfo
#include "xgboost/tree_model.h"
namespace xgboost::tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
/**
* \struct DeviceHistogramStorage
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <size_t kStopGrowingSize = 1 << 28>
class DeviceHistogramStorage {
private:
using GradientSumT = GradientPairInt64;
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
// Large buffer of zeroed memory, caches histograms
dh::device_vector<typename GradientSumT::ValueT> data_;
// If we run out of storage allocate one histogram at a time
// in overflow. Not cached, overwritten when a new histogram
// is requested
dh::device_vector<typename GradientSumT::ValueT> overflow_;
std::map<int, size_t> overflow_nidx_map_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2, "Number of items in gradient type should be 2.");
public:
// Start with about 16mb
DeviceHistogramStorage() { data_.reserve(1 << 22); }
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(data_.size(), [=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
overflow_nidx_map_.clear();
}
[[nodiscard]] bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend() ||
overflow_nidx_map_.find(nidx) != overflow_nidx_map_.cend();
}
[[nodiscard]] int Bins() const { return n_bins_; }
[[nodiscard]] size_t HistogramSize() const { return n_bins_ * kNumItemsInGradientSum; }
dh::device_vector<typename GradientSumT::ValueT>& Data() { return data_; }
void AllocateHistograms(const std::vector<int>& new_nidxs) {
for (int nidx : new_nidxs) {
CHECK(!HistogramExists(nidx));
}
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize() * new_nidxs.size();
if (used_size >= kStopGrowingSize) {
// Use overflow
// Delete previous entries
overflow_nidx_map_.clear();
overflow_.resize(HistogramSize() * new_nidxs.size());
// Zero memory
auto d_data = overflow_.data().get();
dh::LaunchN(overflow_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0; });
// Append new histograms
for (int nidx : new_nidxs) {
overflow_nidx_map_[nidx] = overflow_nidx_map_.size() * HistogramSize();
}
} else {
CHECK_GE(data_.size(), used_size);
// Expand if necessary
if (data_.size() < new_used_size) {
data_.resize(std::max(data_.size() * 2, new_used_size));
}
// Append new histograms
for (int nidx : new_nidxs) {
nidx_map_[nidx] = nidx_map_.size() * HistogramSize();
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
if (nidx_map_.find(nidx) != nidx_map_.cend()) {
// Fetch from normal cache
auto ptr = data_.data().get() + nidx_map_.at(nidx);
return {reinterpret_cast<GradientSumT*>(ptr), static_cast<std::size_t>(n_bins_)};
} else {
// Fetch from overflow
auto ptr = overflow_.data().get() + overflow_nidx_map_.at(nidx);
return {reinterpret_cast<GradientSumT*>(ptr), static_cast<std::size_t>(n_bins_)};
}
}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
private:
GPUHistEvaluator evaluator_;
Context const* ctx_;
public:
EllpackPageImpl const* page;
common::Span<FeatureType const> feature_types;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogramStorage<> hist{};
dh::device_vector<GradientPair> d_gpair; // storage for gpair;
common::Span<GradientPair> gpair;
dh::device_vector<int> monotone_constraints;
// node idx for each sample
dh::device_vector<bst_node_t> positions;
TrainParam param;
std::unique_ptr<GradientQuantiser> quantiser;
dh::PinnedMemory pinned;
dh::PinnedMemory pinned2;
common::Monitor monitor;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
std::unique_ptr<GradientBasedSampler> sampler;
std::unique_ptr<FeatureGroups> feature_groups;
GPUHistMakerDevice(Context const* ctx, EllpackPageImpl const* _page,
common::Span<FeatureType const> _feature_types, bst_uint _n_rows,
TrainParam _param, uint32_t column_sampler_seed, uint32_t n_features,
BatchParam _batch_param)
: evaluator_{_param, n_features, ctx->gpu_id},
ctx_(ctx),
page(_page),
feature_types{_feature_types},
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
batch_param(std::move(_batch_param)) {
sampler.reset(new GradientBasedSampler(page, _n_rows, batch_param, param.subsample,
param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
// Init histogram
hist.Init(ctx_->gpu_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(ctx_->gpu_id));
feature_groups.reset(new FeatureGroups(page->Cuts(), page->is_dense,
dh::MaxSharedMemoryOptin(ctx_->gpu_id),
sizeof(GradientSumT)));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
auto const& info = dmat->Info();
this->column_sampler.Init(ctx_, num_columns, info.feature_weights.HostVector(),
param.colsample_bynode, param.colsample_bylevel,
param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
this->evaluator_.Reset(page->Cuts(), feature_types, dmat->Info().num_col_, param,
ctx_->gpu_id);
this->interaction_constraints.Reset();
if (d_gpair.size() != dh_gpair->Size()) {
d_gpair.resize(dh_gpair->Size());
}
dh::safe_cuda(cudaMemcpyAsync(
d_gpair.data().get(), dh_gpair->ConstDevicePointer(),
dh_gpair->Size() * sizeof(GradientPair), cudaMemcpyDeviceToDevice));
auto sample = sampler->Sample(dh::ToSpan(d_gpair), dmat);
page = sample.page;
gpair = sample.gpair;
quantiser.reset(new GradientQuantiser(this->gpair));
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(ctx_->gpu_id, sample.sample_rows));
hist.Reset();
}
GPUExpandEntry EvaluateRootSplit(GradientPairInt64 root_sum) {
int nidx = RegTree::kRoot;
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(ctx_->gpu_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitInputs inputs{nidx, 0, root_sum, feature_set, hist.GetNodeHistogram(nidx)};
EvaluateSplitSharedInputs shared_inputs{
gpu_param,
*quantiser,
feature_types,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
matrix.is_dense
};
auto split = this->evaluator_.EvaluateSingleSplit(inputs, shared_inputs);
return split;
}
void EvaluateSplits(const std::vector<GPUExpandEntry>& candidates, const RegTree& tree,
common::Span<GPUExpandEntry> pinned_candidates_out) {
if (candidates.empty()) return;
dh::TemporaryArray<EvaluateSplitInputs> d_node_inputs(2 * candidates.size());
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2 * candidates.size());
std::vector<bst_node_t> nidx(2 * candidates.size());
auto h_node_inputs = pinned2.GetSpan<EvaluateSplitInputs>(2 * candidates.size());
auto matrix = page->GetDeviceAccessor(ctx_->gpu_id);
EvaluateSplitSharedInputs shared_inputs{
GPUTrainingParam{param}, *quantiser, feature_types, matrix.feature_segments,
matrix.gidx_fvalue_map, matrix.min_fvalue,
matrix.is_dense
};
dh::TemporaryArray<GPUExpandEntry> entries(2 * candidates.size());
// Store the feature set ptrs so they dont go out of scope before the kernel is called
std::vector<std::shared_ptr<HostDeviceVector<bst_feature_t>>> feature_sets;
for (size_t i = 0; i < candidates.size(); i++) {
auto candidate = candidates.at(i);
int left_nidx = tree[candidate.nid].LeftChild();
int right_nidx = tree[candidate.nid].RightChild();
nidx[i * 2] = left_nidx;
nidx[i * 2 + 1] = right_nidx;
auto left_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(ctx_->gpu_id);
feature_sets.emplace_back(left_sampled_features);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(), left_nidx);
auto right_sampled_features = column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(ctx_->gpu_id);
feature_sets.emplace_back(right_sampled_features);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
right_nidx);
h_node_inputs[i * 2] = {left_nidx, candidate.depth + 1,
candidate.split.left_sum, left_feature_set,
hist.GetNodeHistogram(left_nidx)};
h_node_inputs[i * 2 + 1] = {right_nidx, candidate.depth + 1,
candidate.split.right_sum, right_feature_set,
hist.GetNodeHistogram(right_nidx)};
}
bst_feature_t max_active_features = 0;
for (auto input : h_node_inputs) {
max_active_features =
std::max(max_active_features, static_cast<bst_feature_t>(input.feature_set.size()));
}
dh::safe_cuda(cudaMemcpyAsync(
d_node_inputs.data().get(), h_node_inputs.data(),
h_node_inputs.size() * sizeof(EvaluateSplitInputs), cudaMemcpyDefault));
this->evaluator_.EvaluateSplits(nidx, max_active_features,
dh::ToSpan(d_node_inputs), shared_inputs,
dh::ToSpan(entries));
dh::safe_cuda(cudaMemcpyAsync(pinned_candidates_out.data(),
entries.data().get(), sizeof(GPUExpandEntry) * entries.size(),
cudaMemcpyDeviceToHost));
dh::DefaultStream().Sync();
}
void BuildHist(int nidx) {
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(ctx_->CUDACtx(), page->GetDeviceAccessor(ctx_->gpu_id),
feature_groups->DeviceAccessor(ctx_->gpu_id), gpair, d_ridx, d_node_hist,
*quantiser);
}
// Attempt to do subtraction trick
// return true if succeeded
bool SubtractionTrick(int nidx_parent, int nidx_histogram, int nidx_subtraction) {
if (!hist.HistogramExists(nidx_histogram) || !hist.HistogramExists(nidx_parent)) {
return false;
}
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
return true;
}
// Extra data for each node that is passed
// to the update position function
struct NodeSplitData {
RegTree::Node split_node;
FeatureType split_type;
common::CatBitField node_cats;
};
void UpdatePosition(const std::vector<GPUExpandEntry>& candidates, RegTree* p_tree) {
if (candidates.empty()) return;
std::vector<int> nidx(candidates.size());
std::vector<int> left_nidx(candidates.size());
std::vector<int> right_nidx(candidates.size());
std::vector<NodeSplitData> split_data(candidates.size());
for (size_t i = 0; i < candidates.size(); i++) {
auto& e = candidates[i];
RegTree::Node split_node = (*p_tree)[e.nid];
auto split_type = p_tree->NodeSplitType(e.nid);
nidx.at(i) = e.nid;
left_nidx.at(i) = split_node.LeftChild();
right_nidx.at(i) = split_node.RightChild();
split_data.at(i) = NodeSplitData{split_node, split_type, e.split.split_cats};
}
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
row_partitioner->UpdatePositionBatch(
nidx, left_nidx, right_nidx, split_data,
[=] __device__(bst_uint ridx, const NodeSplitData& data) {
// given a row index, returns the node id it belongs to
bst_float cut_value = d_matrix.GetFvalue(ridx, data.split_node.SplitIndex());
// Missing value
bool go_left = true;
if (isnan(cut_value)) {
go_left = data.split_node.DefaultLeft();
} else {
if (data.split_type == FeatureType::kCategorical) {
go_left = common::Decision(data.node_cats.Bits(), cut_value);
} else {
go_left = cut_value <= data.split_node.SplitCond();
}
}
return go_left;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat, ObjInfo task,
HostDeviceVector<bst_node_t>* p_out_position) {
// Prediction cache will not be used with external memory
if (!p_fmat->SingleColBlock()) {
if (task.UpdateTreeLeaf()) {
LOG(FATAL) << "Current objective function can not be used with external memory.";
}
p_out_position->Resize(0);
positions.clear();
return;
}
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpyAsync(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
auto const& h_split_types = p_tree->GetSplitTypes();
auto const& categories = p_tree->GetSplitCategories();
auto const& categories_segments = p_tree->GetSplitCategoriesPtr();
dh::caching_device_vector<FeatureType> d_split_types;
dh::caching_device_vector<uint32_t> d_categories;
dh::caching_device_vector<RegTree::CategoricalSplitMatrix::Segment> d_categories_segments;
if (!categories.empty()) {
dh::CopyToD(h_split_types, &d_split_types);
dh::CopyToD(categories, &d_categories);
dh::CopyToD(categories_segments, &d_categories_segments);
}
FinalisePositionInPage(page, dh::ToSpan(d_nodes), dh::ToSpan(d_split_types),
dh::ToSpan(d_categories), dh::ToSpan(d_categories_segments),
p_out_position);
}
void FinalisePositionInPage(
EllpackPageImpl const* page, const common::Span<RegTree::Node> d_nodes,
common::Span<FeatureType const> d_feature_types, common::Span<uint32_t const> categories,
common::Span<RegTree::CategoricalSplitMatrix::Segment> categories_segments,
HostDeviceVector<bst_node_t>* p_out_position) {
auto d_matrix = page->GetDeviceAccessor(ctx_->gpu_id);
auto d_gpair = this->gpair;
p_out_position->SetDevice(ctx_->gpu_id);
p_out_position->Resize(row_partitioner->GetRows().size());
auto new_position_op = [=] __device__(size_t row_id, int position) {
// What happens if user prune the tree?
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
bool go_left = true;
if (common::IsCat(d_feature_types, position)) {
auto node_cats = categories.subspan(categories_segments[position].beg,
categories_segments[position].size);
go_left = common::Decision(node_cats, element);
} else {
go_left = element <= node.SplitCond();
}
if (go_left) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
}; // NOLINT
auto d_out_position = p_out_position->DeviceSpan();
row_partitioner->FinalisePosition(d_out_position, new_position_op);
auto s_position = p_out_position->ConstDeviceSpan();
positions.resize(s_position.size());
dh::safe_cuda(cudaMemcpyAsync(positions.data().get(), s_position.data(),
s_position.size_bytes(), cudaMemcpyDeviceToDevice,
ctx_->CUDACtx()->Stream()));
dh::LaunchN(row_partitioner->GetRows().size(), [=] __device__(size_t idx) {
bst_node_t position = d_out_position[idx];
bool is_row_sampled = d_gpair[idx].GetHess() - .0f == 0.f;
d_out_position[idx] = is_row_sampled ? ~position : position;
});
}
bool UpdatePredictionCache(linalg::MatrixView<float> out_preds_d, RegTree const* p_tree) {
if (positions.empty()) {
return false;
}
CHECK(p_tree);
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
CHECK_EQ(out_preds_d.DeviceIdx(), ctx_->gpu_id);
auto d_position = dh::ToSpan(positions);
CHECK_EQ(out_preds_d.Size(), d_position.size());
auto const& h_nodes = p_tree->GetNodes();
dh::caching_device_vector<RegTree::Node> nodes(h_nodes.size());
dh::safe_cuda(cudaMemcpyAsync(nodes.data().get(), h_nodes.data(),
h_nodes.size() * sizeof(RegTree::Node), cudaMemcpyHostToDevice,
ctx_->CUDACtx()->Stream()));
auto d_nodes = dh::ToSpan(nodes);
CHECK_EQ(out_preds_d.Shape(1), 1);
dh::LaunchN(d_position.size(), ctx_->CUDACtx()->Stream(),
[=] XGBOOST_DEVICE(std::size_t idx) mutable {
bst_node_t nidx = d_position[idx];
auto weight = d_nodes[nidx].LeafValue();
out_preds_d(idx, 0) += weight;
});
return true;
}
// num histograms is the number of contiguous histograms in memory to reduce over
void AllReduceHist(int nidx, collective::DeviceCommunicator* communicator, int num_histograms) {
monitor.Start("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
using ReduceT = typename std::remove_pointer<decltype(d_node_hist)>::type::ValueT;
communicator->AllReduceSum(reinterpret_cast<ReduceT*>(d_node_hist),
page->Cuts().TotalBins() * 2 * num_histograms);
monitor.Stop("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(std::vector<GPUExpandEntry> const& candidates,
collective::DeviceCommunicator* communicator, const RegTree& tree) {
if (candidates.empty()) return;
// Some nodes we will manually compute histograms
// others we will do by subtraction
std::vector<int> hist_nidx;
std::vector<int> subtraction_nidx;
for (auto& e : candidates) {
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = e.split.right_sum.GetQuantisedHess() < e.split.left_sum.GetQuantisedHess();
if (fewer_right) {
hist_nidx.emplace_back(tree[e.nid].RightChild());
subtraction_nidx.emplace_back(tree[e.nid].LeftChild());
} else {
hist_nidx.emplace_back(tree[e.nid].LeftChild());
subtraction_nidx.emplace_back(tree[e.nid].RightChild());
}
}
std::vector<int> all_new = hist_nidx;
all_new.insert(all_new.end(), subtraction_nidx.begin(), subtraction_nidx.end());
// Allocate the histograms
// Guaranteed contiguous memory
hist.AllocateHistograms(all_new);
for (auto nidx : hist_nidx) {
this->BuildHist(nidx);
}
// Reduce all in one go
// This gives much better latency in a distributed setting
// when processing a large batch
this->AllReduceHist(hist_nidx.at(0), communicator, hist_nidx.size());
for (size_t i = 0; i < subtraction_nidx.size(); i++) {
auto build_hist_nidx = hist_nidx.at(i);
auto subtraction_trick_nidx = subtraction_nidx.at(i);
auto parent_nidx = candidates.at(i).nid;
if (!this->SubtractionTrick(parent_nidx, build_hist_nidx, subtraction_trick_nidx)) {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, communicator, 1);
}
}
}
void ApplySplit(const GPUExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
// Sanity check - have we created a leaf with no training instances?
if (!collective::IsDistributed() && row_partitioner) {
CHECK(row_partitioner->GetRows(candidate.nid).size() > 0)
<< "No training instances in this leaf!";
}
auto base_weight = candidate.base_weight;
auto left_weight = candidate.left_weight * param.learning_rate;
auto right_weight = candidate.right_weight * param.learning_rate;
auto parent_hess = quantiser
->ToFloatingPoint(candidate.split.left_sum +
candidate.split.right_sum)
.GetHess();
auto left_hess =
quantiser->ToFloatingPoint(candidate.split.left_sum).GetHess();
auto right_hess =
quantiser->ToFloatingPoint(candidate.split.right_sum).GetHess();
auto is_cat = candidate.split.is_cat;
if (is_cat) {
// should be set to nan in evaluation split.
CHECK(common::CheckNAN(candidate.split.fvalue));
std::vector<common::CatBitField::value_type> split_cats;
CHECK_GT(candidate.split.split_cats.Bits().size(), 0);
auto h_cats = this->evaluator_.GetHostNodeCats(candidate.nid);
auto n_bins_feature = page->Cuts().FeatureBins(candidate.split.findex);
split_cats.resize(common::CatBitField::ComputeStorageSize(n_bins_feature), 0);
CHECK_LE(split_cats.size(), h_cats.size());
std::copy(h_cats.data(), h_cats.data() + split_cats.size(), split_cats.data());
tree.ExpandCategorical(
candidate.nid, candidate.split.findex, split_cats, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight, candidate.split.loss_chg, parent_hess,
left_hess, right_hess);
} else {
CHECK(!common::CheckNAN(candidate.split.fvalue));
tree.ExpandNode(candidate.nid, candidate.split.findex, candidate.split.fvalue,
candidate.split.dir == kLeftDir, base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_hess,
left_hess, right_hess);
}
evaluator_.ApplyTreeSplit(candidate, p_tree);
const auto& parent = tree[candidate.nid];
std::size_t max_nidx = std::max(parent.LeftChild(), parent.RightChild());
interaction_constraints.Split(candidate.nid, parent.SplitIndex(), parent.LeftChild(),
parent.RightChild());
}
GPUExpandEntry InitRoot(RegTree* p_tree, collective::DeviceCommunicator* communicator) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
auto quantiser = *this->quantiser;
auto gpair_it = dh::MakeTransformIterator<GradientPairInt64>(
dh::tbegin(gpair), [=] __device__(auto const &gpair) {
return quantiser.ToFixedPoint(gpair);
});
GradientPairInt64 root_sum_quantised =
dh::Reduce(ctx_->CUDACtx()->CTP(), gpair_it, gpair_it + gpair.size(),
GradientPairInt64{}, thrust::plus<GradientPairInt64>{});
using ReduceT = typename decltype(root_sum_quantised)::ValueT;
collective::Allreduce<collective::Operation::kSum>(
reinterpret_cast<ReduceT *>(&root_sum_quantised), 2);
hist.AllocateHistograms({kRootNIdx});
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, communicator, 1);
// Remember root stats
auto root_sum = quantiser.ToFloatingPoint(root_sum_quantised);
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Generate first split
auto root_entry = this->EvaluateRootSplit(root_sum_quantised);
return root_entry;
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
ObjInfo const* task, RegTree* p_tree,
collective::DeviceCommunicator* communicator,
HostDeviceVector<bst_node_t>* p_out_position) {
auto& tree = *p_tree;
// Process maximum 32 nodes at a time
Driver<GPUExpandEntry> driver(param, 32);
monitor.Start("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.Stop("Reset");
monitor.Start("InitRoot");
driver.Push({ this->InitRoot(p_tree, communicator) });
monitor.Stop("InitRoot");
// The set of leaves that can be expanded asynchronously
auto expand_set = driver.Pop();
while (!expand_set.empty()) {
for (auto& candidate : expand_set) {
this->ApplySplit(candidate, p_tree);
}
// Get the candidates we are allowed to expand further
// e.g. We do not bother further processing nodes whose children are beyond max depth
std::vector<GPUExpandEntry> filtered_expand_set;
std::copy_if(expand_set.begin(), expand_set.end(), std::back_inserter(filtered_expand_set),
[&](const auto& e) { return driver.IsChildValid(e); });
auto new_candidates =
pinned.GetSpan<GPUExpandEntry>(filtered_expand_set.size() * 2, GPUExpandEntry());
monitor.Start("UpdatePosition");
// Update position is only run when child is valid, instead of right after apply
// split (as in approx tree method). Hense we have the finalise position call
// in GPU Hist.
this->UpdatePosition(filtered_expand_set, p_tree);
monitor.Stop("UpdatePosition");
monitor.Start("BuildHist");
this->BuildHistLeftRight(filtered_expand_set, communicator, tree);
monitor.Stop("BuildHist");
monitor.Start("EvaluateSplits");
this->EvaluateSplits(filtered_expand_set, *p_tree, new_candidates);
monitor.Stop("EvaluateSplits");
dh::DefaultStream().Sync();
driver.Push(new_candidates.begin(), new_candidates.end());
expand_set = driver.Pop();
}
monitor.Start("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat, *task, p_out_position);
monitor.Stop("FinalisePosition");
}
};
class GPUHistMaker : public TreeUpdater {
using GradientSumT = GradientPairPrecise;
public:
explicit GPUHistMaker(Context const* ctx, ObjInfo const* task)
: TreeUpdater(ctx), task_{task} {};
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
initialised_ = false;
monitor_.Init("updater_gpu_hist");
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
initialised_ = false;
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
}
~GPUHistMaker() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(TrainParam const* param, HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
common::Span<HostDeviceVector<bst_node_t>> out_position,
const std::vector<RegTree*>& trees) override {
monitor_.Start("Update");
// build tree
try {
size_t t_idx{0};
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(param, gpair, dmat, tree, &out_position[t_idx]);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
++t_idx;
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
monitor_.Stop("Update");
}
void InitDataOnce(TrainParam const* param, DMatrix* dmat) {
CHECK_GE(ctx_->gpu_id, 0) << "Must have at least one device";
info_ = &dmat->Info();
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
collective::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
ctx_->gpu_id,
param->max_bin,
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(cudaSetDevice(ctx_->gpu_id));
info_->feature_types.SetDevice(ctx_->gpu_id);
maker.reset(new GPUHistMakerDevice<GradientSumT>(
ctx_, page, info_->feature_types.ConstDeviceSpan(), info_->num_row_, *param,
column_sampling_seed, info_->num_col_, batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(TrainParam const* param, DMatrix* dmat, RegTree const* p_tree) {
if (!initialised_) {
monitor_.Start("InitDataOnce");
this->InitDataOnce(param, dmat);
monitor_.Stop("InitDataOnce");
}
p_last_tree_ = p_tree;
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = collective::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
collective::Broadcast(&s_model, 0);
RegTree reference_tree{}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(TrainParam const* param, HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree, HostDeviceVector<bst_node_t>* p_out_position) {
monitor_.Start("InitData");
this->InitData(param, p_fmat, p_tree);
monitor_.Stop("InitData");
gpair->SetDevice(ctx_->gpu_id);
auto* communicator = collective::Communicator::GetDevice(ctx_->gpu_id);
maker->UpdateTree(gpair, p_fmat, task_, p_tree, communicator, p_out_position);
}
bool UpdatePredictionCache(const DMatrix* data,
linalg::MatrixView<bst_float> p_out_preds) override {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.Start("UpdatePredictionCache");
bool result = maker->UpdatePredictionCache(p_out_preds, p_last_tree_);
monitor_.Stop("UpdatePredictionCache");
return result;
}
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
[[nodiscard]] char const* Name() const override { return "grow_gpu_hist"; }
[[nodiscard]] bool HasNodePosition() const override { return true; }
private:
bool initialised_{false};
GPUHistMakerTrainParam hist_maker_param_;
DMatrix* p_last_fmat_{nullptr};
RegTree const* p_last_tree_{nullptr};
ObjInfo const* task_{nullptr};
common::Monitor monitor_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([](Context const* ctx, ObjInfo const* task) {
return new GPUHistMaker(ctx, task);
});
#endif // !defined(GTEST_TEST)
} // namespace xgboost::tree
|
0ae9aac79ab918ea4fbd932263fdfaaa69d1c5a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../common/book.h"
#define N (1024 * 1024)
#define FULL_DATA_SIZE (N * 20)
__global__ void kernel( int *a, int *b, int *c ) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}
int main( void ) {
hipDeviceProp_t prop;
int whichDevice;
HANDLE_ERROR( hipGetDevice( &whichDevice ) );
HANDLE_ERROR( hipGetDeviceProperties( &prop, whichDevice ) );
if (!prop.deviceOverlap) {
printf( "Device will not handle overlaps, so no speed up from streams\n" );
return 0;
}
hipEvent_t start, stop;
float elapsedTime;
// start the timers
HANDLE_ERROR( hipEventCreate( &start ) );
HANDLE_ERROR( hipEventCreate( &stop ) );
HANDLE_ERROR( hipEventRecord( start, 0 ) );
// initialize the streams
hipStream_t stream0, stream1;
HANDLE_ERROR( hipStreamCreate( &stream0 ) );
HANDLE_ERROR( hipStreamCreate( &stream1 ) );
int *host_a, *host_b, *host_c;
int *dev_a0, *dev_b0, *dev_c0; // GPU buffers for stream0
int *dev_a1, *dev_b1, *dev_c1; // GPU buffers for stream1
// allocate the memory on the GPU
HANDLE_ERROR( hipMalloc( (void**)&dev_a0, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b0, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_c0, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_a1, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_b1, N * sizeof(int) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_c1, N * sizeof(int) ) );
// allocate page-locked memory, used to stream
HANDLE_ERROR( hipHostMalloc( (void**)&host_a, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault ) );
HANDLE_ERROR( hipHostMalloc( (void**)&host_b, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault ) );
HANDLE_ERROR( hipHostMalloc( (void**)&host_c, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault ) );
for (int i = 0; i < FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
// now loop over full data, in bite-sized chunks
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
// enqueue copies of a in stream0 and stream1
HANDLE_ERROR( hipMemcpyAsync( dev_a0, host_a + i, N * sizeof(int), hipMemcpyHostToDevice, stream0 ) );
HANDLE_ERROR( hipMemcpyAsync( dev_a1, host_a + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream1 ) );
// enqueue the copies of b in stream0 and stream1
HANDLE_ERROR( hipMemcpyAsync( dev_b0, host_b + i, N * sizeof(int), hipMemcpyHostToDevice, stream0 ) );
HANDLE_ERROR( hipMemcpyAsync( dev_b1, host_b + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream1 ) );
// enqueue kernels in stream0 and stream1
hipLaunchKernelGGL(( kernel), dim3(N / 256), dim3(256), 0, stream0, dev_a0, dev_b0, dev_c0 );
hipLaunchKernelGGL(( kernel), dim3(N / 256), dim3(256), 0, stream1, dev_a1, dev_b1, dev_c1 );
// enqueue copies of c from device to locked memory
HANDLE_ERROR( hipMemcpyAsync( host_c + i, dev_c0, N * sizeof(int), hipMemcpyDeviceToHost, stream0 ) );
HANDLE_ERROR( hipMemcpyAsync( host_c + i + N, dev_c1, N * sizeof(int), hipMemcpyDeviceToHost, stream1 ) );
}
HANDLE_ERROR( hipStreamSynchronize( stream0 ) );
HANDLE_ERROR( hipStreamSynchronize( stream1 ) );
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf( "Time taken: %3.1f ms\n", elapsedTime );
// cleanup the streams and memory
HANDLE_ERROR( hipHostFree( host_a ) );
HANDLE_ERROR( hipHostFree( host_b ) );
HANDLE_ERROR( hipHostFree( host_c ) );
HANDLE_ERROR( hipFree( dev_a0 ) );
HANDLE_ERROR( hipFree( dev_b0 ) );
HANDLE_ERROR( hipFree( dev_c0 ) );
HANDLE_ERROR( hipFree( dev_a1 ) );
HANDLE_ERROR( hipFree( dev_b1 ) );
HANDLE_ERROR( hipFree( dev_c1 ) );
HANDLE_ERROR( hipStreamDestroy( stream0 ) );
HANDLE_ERROR( hipStreamDestroy( stream1 ) );
return 0;
}
| 0ae9aac79ab918ea4fbd932263fdfaaa69d1c5a5.cu | #include "../common/book.h"
#define N (1024 * 1024)
#define FULL_DATA_SIZE (N * 20)
__global__ void kernel( int *a, int *b, int *c ) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}
int main( void ) {
cudaDeviceProp prop;
int whichDevice;
HANDLE_ERROR( cudaGetDevice( &whichDevice ) );
HANDLE_ERROR( cudaGetDeviceProperties( &prop, whichDevice ) );
if (!prop.deviceOverlap) {
printf( "Device will not handle overlaps, so no speed up from streams\n" );
return 0;
}
cudaEvent_t start, stop;
float elapsedTime;
// start the timers
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
// initialize the streams
cudaStream_t stream0, stream1;
HANDLE_ERROR( cudaStreamCreate( &stream0 ) );
HANDLE_ERROR( cudaStreamCreate( &stream1 ) );
int *host_a, *host_b, *host_c;
int *dev_a0, *dev_b0, *dev_c0; // GPU buffers for stream0
int *dev_a1, *dev_b1, *dev_c1; // GPU buffers for stream1
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_a0, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b0, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c0, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_a1, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b1, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c1, N * sizeof(int) ) );
// allocate page-locked memory, used to stream
HANDLE_ERROR( cudaHostAlloc( (void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) );
HANDLE_ERROR( cudaHostAlloc( (void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) );
HANDLE_ERROR( cudaHostAlloc( (void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault ) );
for (int i = 0; i < FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
// now loop over full data, in bite-sized chunks
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
// enqueue copies of a in stream0 and stream1
HANDLE_ERROR( cudaMemcpyAsync( dev_a0, host_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0 ) );
HANDLE_ERROR( cudaMemcpyAsync( dev_a1, host_a + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1 ) );
// enqueue the copies of b in stream0 and stream1
HANDLE_ERROR( cudaMemcpyAsync( dev_b0, host_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0 ) );
HANDLE_ERROR( cudaMemcpyAsync( dev_b1, host_b + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1 ) );
// enqueue kernels in stream0 and stream1
kernel<<<N / 256, 256, 0, stream0>>> ( dev_a0, dev_b0, dev_c0 );
kernel<<<N / 256, 256, 0, stream1>>> ( dev_a1, dev_b1, dev_c1 );
// enqueue copies of c from device to locked memory
HANDLE_ERROR( cudaMemcpyAsync( host_c + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0 ) );
HANDLE_ERROR( cudaMemcpyAsync( host_c + i + N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1 ) );
}
HANDLE_ERROR( cudaStreamSynchronize( stream0 ) );
HANDLE_ERROR( cudaStreamSynchronize( stream1 ) );
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf( "Time taken: %3.1f ms\n", elapsedTime );
// cleanup the streams and memory
HANDLE_ERROR( cudaFreeHost( host_a ) );
HANDLE_ERROR( cudaFreeHost( host_b ) );
HANDLE_ERROR( cudaFreeHost( host_c ) );
HANDLE_ERROR( cudaFree( dev_a0 ) );
HANDLE_ERROR( cudaFree( dev_b0 ) );
HANDLE_ERROR( cudaFree( dev_c0 ) );
HANDLE_ERROR( cudaFree( dev_a1 ) );
HANDLE_ERROR( cudaFree( dev_b1 ) );
HANDLE_ERROR( cudaFree( dev_c1 ) );
HANDLE_ERROR( cudaStreamDestroy( stream0 ) );
HANDLE_ERROR( cudaStreamDestroy( stream1 ) );
return 0;
}
|
8903938fb170d5815c8d5240d9d6172d6acd809c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vector_swap.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
NUMBER *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
const int offset_x = 1;
const int stride_x = 1;
NUMBER *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
const int offset_y = 1;
const int stride_y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vector_swap), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vector_swap), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vector_swap), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8903938fb170d5815c8d5240d9d6172d6acd809c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vector_swap.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
NUMBER *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
const int offset_x = 1;
const int stride_x = 1;
NUMBER *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
const int offset_y = 1;
const int stride_y = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vector_swap<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vector_swap<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vector_swap<<<gridBlock,threadBlock>>>(n,x,offset_x,stride_x,y,offset_y,stride_y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
Median_Test.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-----------------------------------------------------------------------------
File: Median_Test.cpp
Desc: Runs Median Test
-----------------------------------------------------------------------------*/
/*-------------------------------------
Includes
-------------------------------------*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, CUDA
#include <cutil_inline.h>
// includes, project
#include "KD_API.h"
/*-------------------------------------
Global Variables
-------------------------------------*/
extern AppGlobals g_app;
/*-------------------------------------
CUDA Kernels
-------------------------------------*/
//#include <Median_GPU.cu>
/*-------------------------------------
Function Declarations
-------------------------------------*/
bool RunMedianTest();
/*---------------------------------------------------------
Name: RunMedianTest()
Desc: Run a simple test of "Median Partition"
functionality on CUDA GPU framework
---------------------------------------------------------*/
bool RunMedianTest()
{
bool bResult = false;
#if 0
/*---------------------------------
Step 0. Initialize Cuda
---------------------------------*/
hipError_t cuda_err = hipSuccess;
// set seed for rand()
srand( 2009 );
g_app.hTimer = 0;
sdkCreateTimer( &(g_app.hTimer) );
/*-------------------------------------------
Step 1. Setup Initial parameters
-------------------------------------------*/
// Hard Coded for now...
g_app.bgShape.nElems = g_app.nSearch;
g_app.bgShape.threadsPerRow = MEDIAN_THREADS_PER_ROW;
g_app.bgShape.rowsPerBlock = MEDIAN_ROWS_PER_BLOCK;
bResult = ComputeBlockShapeFromVector( g_app.bgShape );
if (false == bResult)
{
// Error
return false;
}
// Make sure Matrix + vector is not to big to use up all device memory // 768 Meg on Display Card
int sizePoints = g_app.bgShape.nPadded * sizeof(float4);
int sizeDists = g_app.bgShape.nPadded * sizeof(float2);
int totalMem = sizePoints + (2*sizeDists);
// Make sure memory required to perform this operation doesn't exceed display device memory
if (totalMem >= g_app.cudaProps.totalGlobalMem)
{
// Error - not enough memory to perform operation
printf( "Matrix + Vector are too large for available device memory, running test will crash..." );
return false;
}
// Setup GPU Kernel execution parameters
// Median Sort Kernel
dim3 dimBlock( g_app.bgShape.threadsPerRow, g_app.bgShape.rowsPerBlock, 1 );
dim3 dimGrid( g_app.bgShape.blocksPerRow, g_app.bgShape.rowsPerGrid, 1 );
/*-------------------------------------------
Step 2. Allocate Vectors
-------------------------------------------*/
int nOrig = g_app.bgShape.nElems;
int nPad = g_app.bgShape.nPadded;
int w = g_app.bgShape.W;
int h = g_app.bgShape.H;
/*-----------------------
Host Memory
-----------------------*/
// allocate host memory for original points (before median sort)
int mem_size_Points = nPad * sizeof(float4);
float4* h_Points_Orig = (float4*) malloc( (size_t)mem_size_Points );
// allocate host memory for point results (after median sort)
float4 *h_Points_Result = (float4*) malloc( mem_size_Points );
// allocate host memory for CPU point results (after median sort)
float4 *h_Points_CPU = (float4*) malloc( mem_size_Points );
// Allocate host memory for singleton median index result
unsigned int mem_size_Result = 16 * sizeof(I32);
I32 *h_result_GPU = (I32 *) malloc( mem_size_Result );
h_result_GPU[0] = -1;
/*-----------------------
Device Memory
-----------------------*/
// allocate device memory for points
float4* d_Points;
checkCudaErrors( hipMalloc( (void**) &d_Points, mem_size_Points ) );
// allocate device memory for points
I32* d_result_GPU;
checkCudaErrors( hipMalloc( (void**) &d_result_GPU, mem_size_Result ) );
// allocate device memory for Reduction Vector
// Used for reduction
// IE Ping Pong between dists vector and reduce vector to get answer
// to get final answer
//bool bPingPong = true;
//float4* d_Reduce;
//checkCudaErrors( hipMalloc( (void **) &d_Reduce, mem_size_Points ) );
/*-------------------------------------------
Step 3. Initialize Vectors
-------------------------------------------*/
// Initialize Input points (to query against)
int i;
for (i = 0; i < nOrig; i++) // Original Points
{
// BUGBUG - for now just randomly generate points
// In future - we should read them in from a file...
h_Points_Orig[i].x = RandomFloat( 0.0, 1.0 );
h_Points_Orig[i].y = RandomFloat( 0.0, 1.0 );
h_Points_Orig[i].z = RandomFloat( 0.0, 1.0 );
// Store point index in this channel
h_Points_Orig[i].w = (float)i;
}
// Initialize padded points (to query against)
for (i = nOrig; i < nPad; i++) // Padded points
{
// We want padded points to always fail...
// 1st Approach,
// Use a point that is so far away it is guranteed to never get picked
// Cons: Requires advance knowledge of input point range
// and query point range to pick a point
// so far outside range it doesn't matter
// 2nd Approach,
// Duplicate the 1st point many times
// Cons: Can fail because of numerical round-off errors
// IE what if the 1st point is really the closest to the query point
// which point wins (1st point or one of it's duplicates)
//
// 1st Approach
//
h_Points[i].x = 400.0f; // Note: Any number much larger than 46,000 and we will overflow on squaring the float
h_Points[i].y = 400.0f;
h_Points[i].z = 400.0f;
h_Points[i].w = (float)-1; // Store invalid point index in this channel
//
// 2nd Approach
//
//h_Points[i].x = h_Points[0].x;
//h_Points[i].y = h_Points[0].y;
//h_Points[i].z = h_Points[0].z;
//h_Points[i].w = h_Points[0].w;
}
//
// Profile Performance Metric Initialization
//
float MED_PNT_onto_device = 0.0f;
float MED_PNT_from_device = 0.0f;
float MED_M_from_device = 0.0f;
float MED_GPU_Kernel = 0.0f;
float MED_CPU_Kernel = 0.0f;
bool checkMedianResults = true;
// Result values
int gpuMedianIdx; // Index of Median Point as computed on GPU
int cpuMedianIdx; // Index of Median Point as computed on CPU
// Profile Measurement Loop
unsigned int currIter;
for (currIter = 0; currIter < g_app.profileActualLoops; currIter++)
{
//-------------------------------------------------------
// Step 3. Move Points (& indices)
// from main memory to device memory
//-------------------------------------------------------
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Copy 'Points' vector from host memory to device memory
checkCudaErrors( hipMemcpy( d_Points, h_Points_Orig, mem_size_Points, hipMemcpyHostToDevice ) );
// Copy 'Initial' result vector from host memory to device memory
checkCudaErrors( hipMemcpy( d_result_GPU, h_result_GPU, mem_size_Results, hipMemcpyHostToDevice ) );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_PNT_onto_device += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_PNT_onto_device += sdkGetTimerValue( g_app.hTimer );
}
}
//---------------------------------
// Step 4. Call Kernel Function
//---------------------------------
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Excute the Brute Force Distance Kernel
hipLaunchKernelGGL(( MedianSort_GPU), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_Points, w, h );
// Check if GPU kernel execution generated an error
cuda_err = hipGetLastError();
if( hipSuccess != cuda_err)
{
fprintf( stderr, "Cuda error: %s in file '%s' in line %i : %s.\n",
"MedianSort_GPU() failed", __FILE__, __LINE__, hipGetErrorString( cuda_err ) );
exit( EXIT_FAILURE );
}
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_GPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_GPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
//-------------------------------------------------
// Step 5. Copy result vector (partitioned points)
// from device memory to main memory
//-------------------------------------------------
if (g_app.doubleCheckDists)
{
// BUGBUG - this is a temporary step to verify brute force distance calculation
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// copy result vector from device to host
checkCudaErrors( hipMemcpy( (void *) h_Points_Results, d_Points, mem_size_Points, hipMemcpyDeviceToHost ) );
// copy singleton median index from device to host
checkCudaErrors( hipMemcpy( (void *) h_results_GPU, d_results_GPU, mem_size_Results, hipMemcpyDeviceToHost ) );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_PNT_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_PNT_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
}
/*-------------------------------------------------
Step 6. Double check GPU result
against CPU result
-------------------------------------------------*/
if (g_app.doubleCheckDists)
{
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Compute reference solution (distances) on CPU
h_CPU_Idx = MedianSort_CPU( h_Points_CPU, h_Points_Orig, w, h );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_CPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_CPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
// Double check GPU Result against CPU result (for distances)
int NCheck = nPad;
int i;
for (i = 0; i < NCheck; i++)
{
const float eps = 1.0e-2f;
//printf( "[%d] GPU=%f, CPU=%f \n", i, gVal, cVal );
if ( ((cVal - eps) >= gVal) ||
((cVal + eps) <= gVal) )
{
// Error - Out of tolerance check range
printf( "[%d] GPU %f != CPU %f \n", i, gVal, cVal );
checkDistResults = false;
}
}
} // double check distances
/*-------------------------------------------------
Step 7. GPU Kernel to reduce distances
(& index) vector
to single best result
-------------------------------------------------*/
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Copy 'Distances' vector to 'Reduction' vector
// This is currently necessary to avoid garbage
// results in output caused by unitialized values
checkCudaErrors( hipMemcpy( d_Reduce, d_Dists, mem_size_Dists_GPU, hipMemcpyDeviceToDevice ) );
int reduceElems = nPad;
dim3 reduceThreads;
dim3 reduceGrid;
BlockGridShape reduceShape;
// Compute Initial Grid Shape
reduceShape.nElems = reduceElems;
reduceShape.threadsPerRow = BFMR_THREADS_PER_ROW;
reduceShape.rowsPerBlock = BFMR_ROWS_PER_BLOCK;
ComputeBlockShapeFromVector( reduceShape );
// Make sure we have an even number of blocks to work on
if ((reduceShape.blocksPerRow % 2) != 0)
{
// Error - not an even number of blocks
fprintf( stderr, "Error - not an even number of blocks\n" );
return false;
}
reduceThreads.x = reduceShape.threadsPerRow;
reduceThreads.y = reduceShape.rowsPerBlock;
reduceThreads.z = 1;
reduceGrid.x = reduceShape.blocksPerRow / 2; // Divide by 2 (algorithm works on 2 blocks at a time)
reduceGrid.y = reduceShape.rowsPerGrid;
reduceGrid.z = 1;
bool bReduced = false;
bPingPong = true;
while (!bReduced)
{
// Ping Pong between "Distances" and "Reduce" vectors
if (bPingPong)
{
bPingPong = false;
// Call GPU Kernel to reduce result vector by THREADS_PER_BLOCK
hipLaunchKernelGGL(( Reduce_Min_GPU), dim3(reduceGrid), dim3(reduceThreads) , 0, 0, d_Reduce, d_Dists );
}
else
{
bPingPong = true;
// Call GPU Kernel to reduce result vector by THREADS_PER_BLOCK
hipLaunchKernelGGL(( Reduce_Min_GPU), dim3(reduceGrid), dim3(reduceThreads) , 0, 0, d_Dists, d_Reduce );
}
// Check if GPU kernel execution generated an error
cuda_err = hipGetLastError();
if( hipSuccess != cuda_err)
{
fprintf( stderr, "Cuda error: %s in file '%s' in line %i : %s.\n",
"PLQ_GPU_BF_DIST() failed", __FILE__, __LINE__, hipGetErrorString( cuda_err ) );
exit( EXIT_FAILURE );
}
// Update Number of elements in reduction vector
reduceElems = reduceShape.blocksPerGrid / 2; // Divide by 2 - Algorithm works on 2 columns of blocks at a time
if (reduceElems == 1)
{
bReduced = true;
}
else
{
// Update Shape of Grid
reduceShape.nElems = reduceElems;
reduceShape.threadsPerRow = BFMR_THREADS_PER_ROW;
reduceShape.rowsPerBlock = BFMR_ROWS_PER_BLOCK;
ComputeBlockShapeFromVector( reduceShape );
// Make sure we have an even number of blocks to work on
if ((reduceShape.blocksPerRow % 2) != 0)
{
// Error - not even number of blocks
fprintf( stderr, "Error - not an even number of blocks" );
return false;
}
reduceThreads.x = reduceShape.threadsPerRow;
reduceThreads.y = reduceShape.rowsPerBlock;
reduceThreads.z = 1;
reduceGrid.x = reduceShape.blocksPerRow / 2; // Divide by 2 (algorithm works on 2 blocks at a time)
reduceGrid.y = reduceShape.rowsPerGrid;
reduceGrid.z = 1;
}
}
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
BF_GPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
BF_GPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
/*-------------------------------------------------
Step 8. Read Result from GPU
-------------------------------------------------*/
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Copy closest point result from device to host memory (singleton distance & index)
if (!bPingPong)
{
cuda_err = hipMemcpy( h_result_GPU, d_Reduce, mem_size_Result, hipMemcpyDeviceToHost );
}
else
{
cuda_err = hipMemcpy( h_result_GPU, d_Dists, mem_size_Result, hipMemcpyDeviceToHost );
}
if (hipSuccess != cuda_err)
{
fprintf( stderr, "Cuda error in file '%s' in line %i : %s.\n",
__FILE__, __LINE__, hipGetErrorString( cuda_err ) );
exit( EXIT_FAILURE );
}
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
BF_M_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
BF_M_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
// Save Results
gpuMinDist = h_result_GPU[0].x;
gpuMinIdx = (unsigned int)(h_result_GPU[0].y);
/*-------------------------------------------------
Step 9. Double check GPU result
against CPU result
-------------------------------------------------*/
if (g_app.doubleCheckMin)
{
// BUGBUG - this is a temporary step to verify brute force distance calculation
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Compute reference solution (distances) on CPU
Reduce_Min_CPU( cpuMinIdx, cpuMinDist, h_Points, queryPoint, nOrig );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
BF_CPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
BF_CPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
//
// Double check GPU Result against CPU result
//
// Index check
if (gpuMinIdx != cpuMinIdx)
{
// Warning - Indices are not the same
// Note: This is not truly an error unless
// the closest distances also don't match
printf( "WARN - MIN GPU IDX %d != MIN CPU IDX %d \n", gpuMinIdx, cpuMinIdx );
}
// Distance Check
const float minEps = 1.0e-4f;
gVal = gpuMinDist;
cVal = cpuMinDist;
if ( ((cVal - minEps) >= gVal) ||
((cVal + minEps) <= gVal) )
{
// Error - Out of tolerance check range
printf( "ERR - MIN GPU DIST %f != MIN CPU DIST %f \n", i, gVal, cVal );
checkMinResults = false;
}
}
} // Profile Loops
/*--------------------------------------------------------
Step 11. Print out Results
--------------------------------------------------------*/
int vectLen = g_app.nSearch;
printf( "\n" );
printf( "Search Vector Length = %d\n", vectLen );
printf( "Query Point: <%f %f %f>\n",
queryPoint.x, queryPoint.y, queryPoint.z );
printf( "GPU Closest Distance: %f\n", gpuMinDist );
printf( "GPU Closest Index: %d\n", gpuMinIdx );
printf( "GPU Closest Point: <%f %f %f>\n",
h_Points[gpuMinIdx].x, h_Points[gpuMinIdx].y, h_Points[gpuMinIdx].z );
if (g_app.doubleCheckMin)
{
printf( "CPU Closest Distance: %f\n", cpuMinDist );
printf( "CPU Closest Index: %d\n", cpuMinIdx );
printf( "CPU Closest Point: <%f %f %f>\n",
h_Points[cpuMinIdx].x, h_Points[cpuMinIdx].y, h_Points[cpuMinIdx].z );
}
printf( "\n" );
/*--------------------------------------------------------
Step 12. Print out Profile Performance Metrics
--------------------------------------------------------*/
// Does GPU Distance Kernel match up with CPU ?!?
if (g_app.doubleCheckDists)
{
if (true == checkDistResults)
{
printf( "Distance check: CPU and GPU results agree within tolerance.\n" );
}
else
{
printf( "Distance check: CPU and GPU results don't agree within tolerance !!!\n" );
}
}
// Does GPU Min Distance Kernel match up with CPU ?!?
if (g_app.doubleCheckMin)
{
if (true == checkMinResults)
{
printf( "Min Distance check: CPU and GPU results agree within tolerance.\n" );
}
else
{
printf( "Min Distance check: CPU and GPU results don't agree within tolerance !!!\n" );
}
}
// Dump Profile Info
if (g_app.profile)
{
float loops = (float)g_app.profileActualLoops;
float o_l = 1.0f / loops;
float avgP = BF_P_onto_device * o_l;
float avgD = BF_D_from_device * o_l;
float avgM = BF_M_from_device * o_l;
float avgGPUdist = BF_GPU_dist * o_l;
float avgCPUdist = BF_CPU_dist * o_l;
float avgGPUmin = BF_GPU_min * o_l;
float avgCPUmin = BF_CPU_min * o_l;
// Verbose
printf( "Number of profile loops = %f.\n", loops );
printf( "BF - Copy 'Point' vector onto GPU, time: %f msecs.\n", avgP );
printf( "BF - Copy 'Dists' vector from GPU, time: %f msecs.\n", avgD );
printf( "BF - Copy 'Results' from GPU, time: %f msecs.\n", avgM );
printf( "BF - GPU Distance computation, time: %f msecs.\n", avgGPUdist );
printf( "BF - CPU Distance computation, time: %f msecs.\n", avgCPUdist );
printf( "BF - GPU Min Distance computation, time: %f msecs.\n", avgGPUmin );
printf( "BF - CPU Min Distance computation, time: %f msecs.\n\n", avgCPUmin );
// Terse
//printf( "BF - P, D, M, G_D, C_D, G_M, C_M\n" );
//printf( " %f, %f, %f, %f, %f, %f, %f\n\n", avgP, avgD, avgM, avgGPUdist, avgCPUdist, avgGPUmin, avgCPUmin );
}
else
{
printf( "BF - Copy 'Point' vector onto GPU, time: %f msecs.\n", BF_P_onto_device );
printf( "BF - Copy 'Dists' vector from GPU, time: %f msecs.\n", BF_D_from_device );
printf( "BF - Copy 'Results' from GPU, time: %f msecs.\n", BF_M_from_device );
printf( "BF - GPU Distance computation, time: %f msecs.\n", BF_GPU_dist );
printf( "BF - CPU Distance computation, time: %f msecs.\n", BF_CPU_dist );
printf( "BF - GPU Min Distance computation, time: %f msecs.\n", BF_GPU_min );
printf( "BF - CPU Min Distance computation, time: %f msecs.\n\n", BF_CPU_min );
}
/*---------------------------------
Step 13. Cleanup vector memory
---------------------------------*/
printf( "Shutting Down...\n" );
// clean up allocations
free( h_Points );
free( h_Dists_GPU );
free( h_Dists_CPU );
free( h_result_GPU );
sdkDeleteTimer( g_app.hTimer );
checkCudaErrors( hipFree( d_Points ) );
checkCudaErrors( hipFree( d_Dists ) );
checkCudaErrors( hipFree( d_Reduce ) );
printf( "Shutdown done...\n\n" );
#endif
// Success
return true;
}
/*---------------------------------------------------------
Name: ComputeBlockShapeFromVector
Desc:
Notes:
0. Assumes following members are initialized properly
before this funciton is called
shape.nElems = Number of original elements in vector
shape.tpr = threads per row
shape.rpb = rows per block
1. Block Limits
Thread block has at most 512 theads per block
2. Grid Limits
Grid has at most 65,535 blocks in any dimension
So a 1D grid is at most 65,535 x 1
and a 2D grid is at most 65,535 x 65,535
We use next smallest even number to these limits
IE 65,535 - 1
IE (65,535*65,535 - 1)
It's useful to have an even number of columns
in grid structure when doing reductions
---------------------------------------------------------*/
bool ComputeBlockShapeFromVector
(
BlockGridShape & bgShape // IN/OUT - bgShape
)
{
unsigned int remainder, extra;
//---------------------------------
// 1. Compute Threads Per Block
//---------------------------------
if ((bgShape.threadsPerRow > 512) || (bgShape.rowsPerBlock > 512))
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) > 512 TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
}
// Compute threads per block
bgShape.threadsPerBlock = bgShape.threadsPerRow * bgShape.rowsPerBlock;
// Make sure we don't exceed block limits
if (bgShape.threadsPerBlock > 512)
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) = %d TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock, bgShape.threadsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
return false;
}
//---------------------------------
// 2. Compute GRID structure
//---------------------------------
// Compute number of blocks needed to contain all elements in vector
bgShape.blocksPerGrid = bgShape.nElems / bgShape.threadsPerBlock;
remainder = bgShape.nElems % bgShape.threadsPerBlock;
extra = ((0 == remainder) ? 0 : 1);
bgShape.blocksPerGrid += extra;
// Check if need a 1D Grid or 2D Grid structure
// to contain all blocks in grid
if (bgShape.blocksPerGrid <= 65534)
{
// We can use a simple 1D grid of blocks
bgShape.blocksPerRow = bgShape.blocksPerGrid;
bgShape.rowsPerGrid = 1;
}
else if (bgShape.blocksPerGrid <= 4294836224) // 4294836224 = (65535 * 65535 - 1)
{
// We need to use a 2D Grid structure instead...
// Use square root as an approximation for shape of 2D grid
float sq_r = sqrtf( (float)( bgShape.blocksPerGrid ) );
unsigned int uiSqrt = (unsigned int)sq_r;
bgShape.blocksPerRow = uiSqrt;
bgShape.rowsPerGrid = uiSqrt;
// Increment # of columns until we have enough space
// in grid layout for all elements in vector
while ((bgShape.blocksPerRow * bgShape.rowsPerGrid) < bgShape.blocksPerGrid)
{
bgShape.blocksPerRow++;
}
}
else
{
// Error - Vector is too large for 2D Grid
printf( "Vector is way too large...\n" );
return false;
}
// Make sure # of columns in 1D or 2D grid is even
// Which is useful to avoid special cases in reduction kernel
remainder = bgShape.blocksPerRow % 2;
extra = ((0 == remainder) ? 0 : 1);
bgShape.blocksPerRow += extra;
// Compute # of padded blocks in Grid
bgShape.blocksPerGrid = bgShape.blocksPerRow * bgShape.rowsPerGrid;
// Make sure we don't exceed grid limits
if ((bgShape.blocksPerRow >= 65535) || (bgShape.rowsPerGrid >= 65535))
{
// Error - Grid can't have more than 65535 blocks in any dimension
printf( "Grid (%d BPR x %d RPG) = %d BPG\n",
bgShape.blocksPerRow, bgShape.rowsPerGrid, bgShape.blocksPerGrid );
printf( "Error - can't have more than 65535 blocks in any dimension\n" );
return false;
}
// Compute Width and Height of 2D vector structure
bgShape.W = bgShape.threadsPerRow * bgShape.blocksPerRow; // Width (in elements) of 2D vector
bgShape.H = bgShape.rowsPerBlock * bgShape.rowsPerGrid; // Height (in elements) of 2D vector
// Compute padded length of 2D vector
unsigned int sizeWH = bgShape.W * bgShape.H;
unsigned int sizeBG = bgShape.threadsPerBlock * bgShape.blocksPerGrid;
if (sizeWH != sizeBG)
{
// Programmer error-
printf( "Error - sizes don't match\n" );
return false;
}
// Compute number of elements in padded block structure
bgShape.nPadded = sizeWH;
// Success
return true;
}
/*---------------------------------------------------------
Name: ComputeBlockShapeFromQueryVector
Desc:
Notes:
0. Assumes following members are initialized properly
before this function is called
shape.nElems = Number of original elements in query vector
shape.tpr = threads per row
shape.rpb = rows per block
1. Block Limits
Thread block has at most 512 theads per block
2. Grid Limits
Grid has at most 65,535 blocks in any dimension
So a 1D grid is at most 65,535 x 1
and a 2D grid is at most 65,535 x 65,535
We use next smallest even number to these limits
IE 65,535 - 1
IE (65,535*65,535 - 1)
---------------------------------------------------------*/
bool ComputeBlockShapeFromQueryVector
(
BlockGridShape & bgShape // IN/OUT - bgShape
)
{
unsigned int remainder, extra;
//---------------------------------
// 1. Compute Threads Per Block
//---------------------------------
if ((bgShape.threadsPerRow > 512) || (bgShape.rowsPerBlock > 512))
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) > 512 TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
}
// Compute threads per block
bgShape.threadsPerBlock = bgShape.threadsPerRow * bgShape.rowsPerBlock;
// Make sure we don't exceed block limits
if (bgShape.threadsPerBlock > 512)
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) = %d TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock, bgShape.threadsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
return false;
}
//---------------------------------
// 2. Compute GRID structure
//---------------------------------
// Compute number of blocks needed to contain all elements in vector
bgShape.blocksPerGrid = bgShape.nElems / bgShape.threadsPerBlock;
remainder = bgShape.nElems % bgShape.threadsPerBlock;
extra = ((0 == remainder) ? 0 : 1);
bgShape.blocksPerGrid += extra;
// Check if need a 1D Grid or 2D Grid structure
// to contain all blocks in grid
if (bgShape.blocksPerGrid <= 65534)
{
// We can use a simple 1D grid of blocks
bgShape.blocksPerRow = bgShape.blocksPerGrid;
bgShape.rowsPerGrid = 1;
}
else if (bgShape.blocksPerGrid <= 4294836224) // 4294836224 = (65535 * 65535 - 1)
{
// We need to use a 2D Grid structure instead...
// Use square root as an approximation for shape of 2D grid
float sq_r = sqrtf( (float)( bgShape.blocksPerGrid ) );
unsigned int uiSqrt = (unsigned int)sq_r;
bgShape.blocksPerRow = uiSqrt;
bgShape.rowsPerGrid = uiSqrt;
// Increment # of columns until we have enough space
// in grid layout for all elements in vector
while ((bgShape.blocksPerRow * bgShape.rowsPerGrid) < bgShape.blocksPerGrid)
{
bgShape.blocksPerRow++;
}
}
else
{
// Error - Vector is too large for 2D Grid
printf( "Vector is way too large...\n" );
return false;
}
// Make sure we don't exceed grid limits
if ((bgShape.blocksPerRow >= 65535) || (bgShape.rowsPerGrid >= 65535))
{
// Error - Grid can't have more than 65535 blocks in any dimension
printf( "Grid (%d BPR x %d RPG) = %d BPG\n",
bgShape.blocksPerRow, bgShape.rowsPerGrid, bgShape.blocksPerGrid );
printf( "Error - can't have more than 65535 blocks in any dimension\n" );
return false;
}
// Compute # of padded blocks in Grid
bgShape.blocksPerGrid = bgShape.blocksPerRow * bgShape.rowsPerGrid;
// Compute Width and Height of 2D vector structure
bgShape.W = bgShape.threadsPerRow * bgShape.blocksPerRow; // Width (in elements) of 2D vector
bgShape.H = bgShape.rowsPerBlock * bgShape.rowsPerGrid; // Height (in elements) of 2D vector
// Compute padded length of 2D vector
unsigned int sizeWH = bgShape.W * bgShape.H;
unsigned int sizeBG = bgShape.threadsPerBlock * bgShape.blocksPerGrid;
if (sizeWH != sizeBG)
{
// Programmer error-
printf( "Error - sizes don't match\n" );
return false;
}
// Compute number of elements in padded block structure
bgShape.nPadded = sizeWH;
// Success
return true;
}
/*---------------------------------------------------------
Name: RandomFloat
Desc: Generates a random float value in range [low,high]
---------------------------------------------------------*/
float RandomFloat( float low, float high )
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
/*---------------------------------------------------------
Name: InitCUDA
Desc: Initialize CUDA system for GPU processing
---------------------------------------------------------*/
// Runtime API version...
bool InitCUDA( AppGlobals & g )
{
bool bResult = false;
int nDevices = 0;
int deviceToUse = 0;
unsigned int cudaContextFlags = 0;
hipError_t cudaResult = hipSuccess;
#if (CUDA_PLATFORM == CUDA_DEVICE)
hipError_t cuda_Result = hipSuccess;
// Initialize CUDA
unsigned int cudaFlags = 0;
cuda_Result = hipInit( cudaFlags );
if (hipSuccess != cuda_Result)
{
// Error - hipGetDeviceCount() failed
fprintf( stderr, "InitCuda() - hipInit() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Get count of CUDA Devices
cuda_Result = hipGetDeviceCount(&nDevices);
if (hipSuccess != cuda_Result)
{
// Error - hipGetDeviceCount() failed
fprintf( stderr, "InitCuda() - hipGetDeviceCount() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
if (nDevices <= 0)
{
// No Valid Display Device found
cuda_Result = hipErrorInvalidDevice;
fprintf( stderr, "InitCuda() - no valid display device found, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
else if (nDevices >= 2)
{
deviceToUse = 1;
}
// Get Specified Device
cuda_Result = hipDeviceGet( &(g.currDevice), deviceToUse );
if (hipSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - hipDeviceGet() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Get RAW Device Properties
cuda_Result = hipGetDeviceProperties( &(g.rawProps), g.currDevice );
if (hipSuccess != cuda_Result )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - hipGetDeviceProperties() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Set up the CUDA context
cuda_Result = hipCtxCreate( &g.currContext, cudaContextFlags, g.currDevice );
if ( hipSuccess != cuda_Result )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - hipCtxCreate() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Get CUDA Display Device Properties
cudaResult = hipGetDeviceProperties( &(g.cudaProps) , deviceToUse );
if ( hipSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - hipGetDeviceProperties() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
#elif (CUDA_PLATFORM == CUDA_CUDA)
// Pick Display Device to perform GPU calculations on...
cudaResult = hipGetDeviceCount( &nDevices );
if ( hipSuccess != cudaResult )
{
// Error - hipGetDeviceCount() failed
fprintf( stderr, "InitCuda() - hipGetDeviceCount() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
// Note: Assumes Device 0 = primary display device
// Assumes Device 1 = work horse for CUDA
if (nDevices <= 0)
{
// No Valid Display Device found
cudaResult = hipErrorInvalidDevice;
fprintf( stderr, "InitCuda() - no valid display device found, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
else if (nDevices >= 2)
{
deviceToUse = 1;
}
// Get Display Device Properties
cudaResult = hipGetDeviceProperties( &(g.cudaProps) , deviceToUse );
if ( hipSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - hipGetDeviceProperties() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
// Setup Display Device
cudaResult = hipSetDevice( deviceToUse );
if ( hipSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - hipSetDevice() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
#endif // CUDA_CUDA
// Success
bResult = true;
lblError:
return bResult;
}
//---------------------------------------------------------
// Name: FiniCUDA
// Desc: Cleanup CUDA system
//---------------------------------------------------------
bool FiniCUDA()
{
#if (CUDA_PLATFORM == CUDA_DEVICE)
// Detach CUDA from current thread
hipError_t cuda_Result = hipSuccess;
cuda_Result = hipCtxDetach( g_app.currContext );
if (hipSuccess != cuda_Result)
{
// Error - hipCtxDetach() failed
fprintf( stderr, "FiniCUDA() - hipCtxDetach() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
return false;
}
#endif
// Success
return true;
}
/*---------------------------------------------------------
Name: InitGlobals
Desc: Initialize Application Globals to Default
---------------------------------------------------------*/
bool InitGlobals( AppGlobals & g )
{
//
// Set Defaults
//
// Search Vectors
g.nSearch = 100;
g.searchList = NULL;
g.nQuery = 10;
g.queryList = NULL;
// Cuda Properties
size_t byteSize;
#if (CUDA_PLATFORM == CUDA_DEVICE)
g.currDevice = 0;
// Initialize cuda device props to zero
byteSize = sizeof( g.rawProps );
memset( &g.rawProps, 0, byteSize );
#endif
// Initialize cuda props to zero
byteSize = sizeof( g.cudaProps );
memset( &g.cudaProps, 0, byteSize );
// Init Block Grid Shape
InitShapeDefaults( g.bgShape );
// App Properties
g.nopromptOnExit = 0;
g.doubleCheckDists = 1;
// Profiling Info
g.hTimer = 0;
g.profile = 1;
g.profileSkipFirstLast = 0;
g.profileRequestedLoops = 1;
g.profileActualLoops = 1;
return true;
}
/*---------------------------------------------------------
Name: GetCommandLineParameters
Desc:
---------------------------------------------------------*/
bool GetCommandLineParams
(
int argc, // Count of Command Line Parameters
const char** argv, // List of Command Line Parameters
AppGlobals & g // Structure to store results in
)
{
int iVal;
// Prompt before exiting application ?!?
if (cutCheckCmdLineFlag( argc, argv, "noprompt") )
{
g.nopromptOnExit = true;
}
else
{
g.nopromptOnExit = false;
}
// Double Check Distances
if (cutCheckCmdLineFlag( argc, argv, "cdist") )
{
g.doubleCheckDists = true;
}
else
{
g.doubleCheckDists = false;
}
// Double Check Distances
if (cutCheckCmdLineFlag( argc, argv, "cmin") )
{
g.doubleCheckMin = true;
}
else
{
g.doubleCheckMin = false;
}
// Get # Threads Per Row (block shape)
if (cutGetCmdLineArgumenti( argc, argv, "TPR", &iVal ))
{
if (iVal < 1) { iVal = 1; }
g.bgShape.threadsPerRow = iVal;
}
// Get # Rows Per Block
if (cutGetCmdLineArgumenti( argc, argv, "RPB", &iVal ))
{
if (iVal < 1) { iVal = 1; }
g.bgShape.rowsPerBlock = iVal;
}
// Calculate Threads Per Block
g.bgShape.threadsPerBlock = g.bgShape.threadsPerRow * g.bgShape.rowsPerBlock;
if (g.bgShape.threadsPerBlock > 512)
{
// Error - Can't have more than 512 threads per block
printf( "Max Threads Per Block is 512!!!\n\n" );
return false;
}
// Get search Vector Length
if (cutGetCmdLineArgumenti( argc, argv, "N", &iVal ))
{
if (iVal < 1) { iVal = 10000; }
g.nSearch = (int)iVal;
}
// Get Query Vector Length
if (cutGetCmdLineArgumenti( argc, argv, "NQ", &iVal ))
{
if (iVal < 1) { iVal = 100; }
g.nQuery = (int)iVal;
}
// Should we do profiling (performance measurements) on application
if (cutCheckCmdLineFlag( argc, argv, "profile") )
{
g.profile = true;
}
else
{
g.profile = false;
}
if (g.profile)
{
// Get Skip First Last flag
if (cutCheckCmdLineFlag( argc, argv, "skip") )
{
g.profileSkipFirstLast = true;
}
else
{
g.profileSkipFirstLast = false;
}
// Get Number of Iterations for Profiling performance
if (cutGetCmdLineArgumenti( argc, argv, "profile", &iVal ))
{
if (iVal < 1) { iVal = 100; }
g.profileRequestedLoops = iVal;
if (g.profileSkipFirstLast)
{
g.profileActualLoops = g.profileRequestedLoops + 2;
}
else
{
g.profileActualLoops = g.profileRequestedLoops;
}
}
}
// Success
return true;
}
/*---------------------------------------------------------
Name: InitShapeDefaults
---------------------------------------------------------*/
void InitShapeDefaults( BlockGridShape & bgShape )
{
// Default Thread, Grid, Vector Properties
bgShape.nElems = 100;
bgShape.threadsPerRow = 1;
bgShape.rowsPerBlock = 1;
bgShape.threadsPerBlock = bgShape.threadsPerRow * bgShape.rowsPerBlock;
bgShape.blocksPerRow = 100;
bgShape.rowsPerGrid = 1;
bgShape.blocksPerGrid = bgShape.blocksPerRow * bgShape.rowsPerGrid;
bgShape.W = bgShape.threadsPerRow * bgShape.blocksPerRow;
bgShape.H = bgShape.rowsPerBlock * bgShape.rowsPerGrid;
bgShape.nPadded = bgShape.W * bgShape.H;
}
/*---------------------------------------------------------
Name: DumpBlockGridShape
---------------------------------------------------------*/
void DumpBlockGridShape( BlockGridShape & bgShape )
{
printf( "N = %d, NPadded = %d\n",
bgShape.nElems, bgShape.nPadded );
printf( "Block (%d TPR x %d RPB) = %d TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock,
bgShape.threadsPerBlock );
printf( "Grid (%d BPR x %d RPG) = %d BPG\n",
bgShape.blocksPerRow, bgShape.rowsPerGrid,
bgShape.blocksPerGrid );
printf( "W = %d, H = %d\n",
bgShape.W, bgShape.H );
}
| Median_Test.cu | /*-----------------------------------------------------------------------------
File: Median_Test.cpp
Desc: Runs Median Test
-----------------------------------------------------------------------------*/
/*-------------------------------------
Includes
-------------------------------------*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, CUDA
#include <cutil_inline.h>
// includes, project
#include "KD_API.h"
/*-------------------------------------
Global Variables
-------------------------------------*/
extern AppGlobals g_app;
/*-------------------------------------
CUDA Kernels
-------------------------------------*/
//#include <Median_GPU.cu>
/*-------------------------------------
Function Declarations
-------------------------------------*/
bool RunMedianTest();
/*---------------------------------------------------------
Name: RunMedianTest()
Desc: Run a simple test of "Median Partition"
functionality on CUDA GPU framework
---------------------------------------------------------*/
bool RunMedianTest()
{
bool bResult = false;
#if 0
/*---------------------------------
Step 0. Initialize Cuda
---------------------------------*/
cudaError_t cuda_err = cudaSuccess;
// set seed for rand()
srand( 2009 );
g_app.hTimer = 0;
sdkCreateTimer( &(g_app.hTimer) );
/*-------------------------------------------
Step 1. Setup Initial parameters
-------------------------------------------*/
// Hard Coded for now...
g_app.bgShape.nElems = g_app.nSearch;
g_app.bgShape.threadsPerRow = MEDIAN_THREADS_PER_ROW;
g_app.bgShape.rowsPerBlock = MEDIAN_ROWS_PER_BLOCK;
bResult = ComputeBlockShapeFromVector( g_app.bgShape );
if (false == bResult)
{
// Error
return false;
}
// Make sure Matrix + vector is not to big to use up all device memory // 768 Meg on Display Card
int sizePoints = g_app.bgShape.nPadded * sizeof(float4);
int sizeDists = g_app.bgShape.nPadded * sizeof(float2);
int totalMem = sizePoints + (2*sizeDists);
// Make sure memory required to perform this operation doesn't exceed display device memory
if (totalMem >= g_app.cudaProps.totalGlobalMem)
{
// Error - not enough memory to perform operation
printf( "Matrix + Vector are too large for available device memory, running test will crash..." );
return false;
}
// Setup GPU Kernel execution parameters
// Median Sort Kernel
dim3 dimBlock( g_app.bgShape.threadsPerRow, g_app.bgShape.rowsPerBlock, 1 );
dim3 dimGrid( g_app.bgShape.blocksPerRow, g_app.bgShape.rowsPerGrid, 1 );
/*-------------------------------------------
Step 2. Allocate Vectors
-------------------------------------------*/
int nOrig = g_app.bgShape.nElems;
int nPad = g_app.bgShape.nPadded;
int w = g_app.bgShape.W;
int h = g_app.bgShape.H;
/*-----------------------
Host Memory
-----------------------*/
// allocate host memory for original points (before median sort)
int mem_size_Points = nPad * sizeof(float4);
float4* h_Points_Orig = (float4*) malloc( (size_t)mem_size_Points );
// allocate host memory for point results (after median sort)
float4 *h_Points_Result = (float4*) malloc( mem_size_Points );
// allocate host memory for CPU point results (after median sort)
float4 *h_Points_CPU = (float4*) malloc( mem_size_Points );
// Allocate host memory for singleton median index result
unsigned int mem_size_Result = 16 * sizeof(I32);
I32 *h_result_GPU = (I32 *) malloc( mem_size_Result );
h_result_GPU[0] = -1;
/*-----------------------
Device Memory
-----------------------*/
// allocate device memory for points
float4* d_Points;
checkCudaErrors( cudaMalloc( (void**) &d_Points, mem_size_Points ) );
// allocate device memory for points
I32* d_result_GPU;
checkCudaErrors( cudaMalloc( (void**) &d_result_GPU, mem_size_Result ) );
// allocate device memory for Reduction Vector
// Used for reduction
// IE Ping Pong between dists vector and reduce vector to get answer
// to get final answer
//bool bPingPong = true;
//float4* d_Reduce;
//checkCudaErrors( cudaMalloc( (void **) &d_Reduce, mem_size_Points ) );
/*-------------------------------------------
Step 3. Initialize Vectors
-------------------------------------------*/
// Initialize Input points (to query against)
int i;
for (i = 0; i < nOrig; i++) // Original Points
{
// BUGBUG - for now just randomly generate points
// In future - we should read them in from a file...
h_Points_Orig[i].x = RandomFloat( 0.0, 1.0 );
h_Points_Orig[i].y = RandomFloat( 0.0, 1.0 );
h_Points_Orig[i].z = RandomFloat( 0.0, 1.0 );
// Store point index in this channel
h_Points_Orig[i].w = (float)i;
}
// Initialize padded points (to query against)
for (i = nOrig; i < nPad; i++) // Padded points
{
// We want padded points to always fail...
// 1st Approach,
// Use a point that is so far away it is guranteed to never get picked
// Cons: Requires advance knowledge of input point range
// and query point range to pick a point
// so far outside range it doesn't matter
// 2nd Approach,
// Duplicate the 1st point many times
// Cons: Can fail because of numerical round-off errors
// IE what if the 1st point is really the closest to the query point
// which point wins (1st point or one of it's duplicates)
//
// 1st Approach
//
h_Points[i].x = 400.0f; // Note: Any number much larger than 46,000 and we will overflow on squaring the float
h_Points[i].y = 400.0f;
h_Points[i].z = 400.0f;
h_Points[i].w = (float)-1; // Store invalid point index in this channel
//
// 2nd Approach
//
//h_Points[i].x = h_Points[0].x;
//h_Points[i].y = h_Points[0].y;
//h_Points[i].z = h_Points[0].z;
//h_Points[i].w = h_Points[0].w;
}
//
// Profile Performance Metric Initialization
//
float MED_PNT_onto_device = 0.0f;
float MED_PNT_from_device = 0.0f;
float MED_M_from_device = 0.0f;
float MED_GPU_Kernel = 0.0f;
float MED_CPU_Kernel = 0.0f;
bool checkMedianResults = true;
// Result values
int gpuMedianIdx; // Index of Median Point as computed on GPU
int cpuMedianIdx; // Index of Median Point as computed on CPU
// Profile Measurement Loop
unsigned int currIter;
for (currIter = 0; currIter < g_app.profileActualLoops; currIter++)
{
//-------------------------------------------------------
// Step 3. Move Points (& indices)
// from main memory to device memory
//-------------------------------------------------------
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Copy 'Points' vector from host memory to device memory
checkCudaErrors( cudaMemcpy( d_Points, h_Points_Orig, mem_size_Points, cudaMemcpyHostToDevice ) );
// Copy 'Initial' result vector from host memory to device memory
checkCudaErrors( cudaMemcpy( d_result_GPU, h_result_GPU, mem_size_Results, cudaMemcpyHostToDevice ) );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_PNT_onto_device += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_PNT_onto_device += sdkGetTimerValue( g_app.hTimer );
}
}
//---------------------------------
// Step 4. Call Kernel Function
//---------------------------------
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Excute the Brute Force Distance Kernel
MedianSort_GPU<<< dimGrid, dimBlock >>>( d_Points, w, h );
// Check if GPU kernel execution generated an error
cuda_err = cudaGetLastError();
if( cudaSuccess != cuda_err)
{
fprintf( stderr, "Cuda error: %s in file '%s' in line %i : %s.\n",
"MedianSort_GPU() failed", __FILE__, __LINE__, cudaGetErrorString( cuda_err ) );
exit( EXIT_FAILURE );
}
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_GPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_GPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
//-------------------------------------------------
// Step 5. Copy result vector (partitioned points)
// from device memory to main memory
//-------------------------------------------------
if (g_app.doubleCheckDists)
{
// BUGBUG - this is a temporary step to verify brute force distance calculation
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// copy result vector from device to host
checkCudaErrors( cudaMemcpy( (void *) h_Points_Results, d_Points, mem_size_Points, cudaMemcpyDeviceToHost ) );
// copy singleton median index from device to host
checkCudaErrors( cudaMemcpy( (void *) h_results_GPU, d_results_GPU, mem_size_Results, cudaMemcpyDeviceToHost ) );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_PNT_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_PNT_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
}
/*-------------------------------------------------
Step 6. Double check GPU result
against CPU result
-------------------------------------------------*/
if (g_app.doubleCheckDists)
{
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Compute reference solution (distances) on CPU
h_CPU_Idx = MedianSort_CPU( h_Points_CPU, h_Points_Orig, w, h );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
MED_CPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
MED_CPU_Kernel += sdkGetTimerValue( g_app.hTimer );
}
}
// Double check GPU Result against CPU result (for distances)
int NCheck = nPad;
int i;
for (i = 0; i < NCheck; i++)
{
const float eps = 1.0e-2f;
//printf( "[%d] GPU=%f, CPU=%f \n", i, gVal, cVal );
if ( ((cVal - eps) >= gVal) ||
((cVal + eps) <= gVal) )
{
// Error - Out of tolerance check range
printf( "[%d] GPU %f != CPU %f \n", i, gVal, cVal );
checkDistResults = false;
}
}
} // double check distances
/*-------------------------------------------------
Step 7. GPU Kernel to reduce distances
(& index) vector
to single best result
-------------------------------------------------*/
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Copy 'Distances' vector to 'Reduction' vector
// This is currently necessary to avoid garbage
// results in output caused by unitialized values
checkCudaErrors( cudaMemcpy( d_Reduce, d_Dists, mem_size_Dists_GPU, cudaMemcpyDeviceToDevice ) );
int reduceElems = nPad;
dim3 reduceThreads;
dim3 reduceGrid;
BlockGridShape reduceShape;
// Compute Initial Grid Shape
reduceShape.nElems = reduceElems;
reduceShape.threadsPerRow = BFMR_THREADS_PER_ROW;
reduceShape.rowsPerBlock = BFMR_ROWS_PER_BLOCK;
ComputeBlockShapeFromVector( reduceShape );
// Make sure we have an even number of blocks to work on
if ((reduceShape.blocksPerRow % 2) != 0)
{
// Error - not an even number of blocks
fprintf( stderr, "Error - not an even number of blocks\n" );
return false;
}
reduceThreads.x = reduceShape.threadsPerRow;
reduceThreads.y = reduceShape.rowsPerBlock;
reduceThreads.z = 1;
reduceGrid.x = reduceShape.blocksPerRow / 2; // Divide by 2 (algorithm works on 2 blocks at a time)
reduceGrid.y = reduceShape.rowsPerGrid;
reduceGrid.z = 1;
bool bReduced = false;
bPingPong = true;
while (!bReduced)
{
// Ping Pong between "Distances" and "Reduce" vectors
if (bPingPong)
{
bPingPong = false;
// Call GPU Kernel to reduce result vector by THREADS_PER_BLOCK
Reduce_Min_GPU<<< reduceGrid, reduceThreads >>>( d_Reduce, d_Dists );
}
else
{
bPingPong = true;
// Call GPU Kernel to reduce result vector by THREADS_PER_BLOCK
Reduce_Min_GPU<<< reduceGrid, reduceThreads >>>( d_Dists, d_Reduce );
}
// Check if GPU kernel execution generated an error
cuda_err = cudaGetLastError();
if( cudaSuccess != cuda_err)
{
fprintf( stderr, "Cuda error: %s in file '%s' in line %i : %s.\n",
"PLQ_GPU_BF_DIST() failed", __FILE__, __LINE__, cudaGetErrorString( cuda_err ) );
exit( EXIT_FAILURE );
}
// Update Number of elements in reduction vector
reduceElems = reduceShape.blocksPerGrid / 2; // Divide by 2 - Algorithm works on 2 columns of blocks at a time
if (reduceElems == 1)
{
bReduced = true;
}
else
{
// Update Shape of Grid
reduceShape.nElems = reduceElems;
reduceShape.threadsPerRow = BFMR_THREADS_PER_ROW;
reduceShape.rowsPerBlock = BFMR_ROWS_PER_BLOCK;
ComputeBlockShapeFromVector( reduceShape );
// Make sure we have an even number of blocks to work on
if ((reduceShape.blocksPerRow % 2) != 0)
{
// Error - not even number of blocks
fprintf( stderr, "Error - not an even number of blocks" );
return false;
}
reduceThreads.x = reduceShape.threadsPerRow;
reduceThreads.y = reduceShape.rowsPerBlock;
reduceThreads.z = 1;
reduceGrid.x = reduceShape.blocksPerRow / 2; // Divide by 2 (algorithm works on 2 blocks at a time)
reduceGrid.y = reduceShape.rowsPerGrid;
reduceGrid.z = 1;
}
}
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
BF_GPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
BF_GPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
/*-------------------------------------------------
Step 8. Read Result from GPU
-------------------------------------------------*/
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Copy closest point result from device to host memory (singleton distance & index)
if (!bPingPong)
{
cuda_err = cudaMemcpy( h_result_GPU, d_Reduce, mem_size_Result, cudaMemcpyDeviceToHost );
}
else
{
cuda_err = cudaMemcpy( h_result_GPU, d_Dists, mem_size_Result, cudaMemcpyDeviceToHost );
}
if (cudaSuccess != cuda_err)
{
fprintf( stderr, "Cuda error in file '%s' in line %i : %s.\n",
__FILE__, __LINE__, cudaGetErrorString( cuda_err ) );
exit( EXIT_FAILURE );
}
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
BF_M_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
BF_M_from_device += sdkGetTimerValue( g_app.hTimer );
}
}
// Save Results
gpuMinDist = h_result_GPU[0].x;
gpuMinIdx = (unsigned int)(h_result_GPU[0].y);
/*-------------------------------------------------
Step 9. Double check GPU result
against CPU result
-------------------------------------------------*/
if (g_app.doubleCheckMin)
{
// BUGBUG - this is a temporary step to verify brute force distance calculation
if (g_app.profile)
{
// Start Timer
sdkResetTimer( g_app.hTimer );
sdkStartTimer( g_app.hTimer );
}
// Compute reference solution (distances) on CPU
Reduce_Min_CPU( cpuMinIdx, cpuMinDist, h_Points, queryPoint, nOrig );
if (g_app.profile)
{
// Stop Timer and save performance measurement
sdkStopTimer( g_app.hTimer );
if (g_app.profileSkipFirstLast)
{
if ((1 < currIter) && (currIter <= g_app.profileActualLoops))
{
BF_CPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
else
{
BF_CPU_min += sdkGetTimerValue( g_app.hTimer );
}
}
//
// Double check GPU Result against CPU result
//
// Index check
if (gpuMinIdx != cpuMinIdx)
{
// Warning - Indices are not the same
// Note: This is not truly an error unless
// the closest distances also don't match
printf( "WARN - MIN GPU IDX %d != MIN CPU IDX %d \n", gpuMinIdx, cpuMinIdx );
}
// Distance Check
const float minEps = 1.0e-4f;
gVal = gpuMinDist;
cVal = cpuMinDist;
if ( ((cVal - minEps) >= gVal) ||
((cVal + minEps) <= gVal) )
{
// Error - Out of tolerance check range
printf( "ERR - MIN GPU DIST %f != MIN CPU DIST %f \n", i, gVal, cVal );
checkMinResults = false;
}
}
} // Profile Loops
/*--------------------------------------------------------
Step 11. Print out Results
--------------------------------------------------------*/
int vectLen = g_app.nSearch;
printf( "\n" );
printf( "Search Vector Length = %d\n", vectLen );
printf( "Query Point: <%f %f %f>\n",
queryPoint.x, queryPoint.y, queryPoint.z );
printf( "GPU Closest Distance: %f\n", gpuMinDist );
printf( "GPU Closest Index: %d\n", gpuMinIdx );
printf( "GPU Closest Point: <%f %f %f>\n",
h_Points[gpuMinIdx].x, h_Points[gpuMinIdx].y, h_Points[gpuMinIdx].z );
if (g_app.doubleCheckMin)
{
printf( "CPU Closest Distance: %f\n", cpuMinDist );
printf( "CPU Closest Index: %d\n", cpuMinIdx );
printf( "CPU Closest Point: <%f %f %f>\n",
h_Points[cpuMinIdx].x, h_Points[cpuMinIdx].y, h_Points[cpuMinIdx].z );
}
printf( "\n" );
/*--------------------------------------------------------
Step 12. Print out Profile Performance Metrics
--------------------------------------------------------*/
// Does GPU Distance Kernel match up with CPU ?!?
if (g_app.doubleCheckDists)
{
if (true == checkDistResults)
{
printf( "Distance check: CPU and GPU results agree within tolerance.\n" );
}
else
{
printf( "Distance check: CPU and GPU results don't agree within tolerance !!!\n" );
}
}
// Does GPU Min Distance Kernel match up with CPU ?!?
if (g_app.doubleCheckMin)
{
if (true == checkMinResults)
{
printf( "Min Distance check: CPU and GPU results agree within tolerance.\n" );
}
else
{
printf( "Min Distance check: CPU and GPU results don't agree within tolerance !!!\n" );
}
}
// Dump Profile Info
if (g_app.profile)
{
float loops = (float)g_app.profileActualLoops;
float o_l = 1.0f / loops;
float avgP = BF_P_onto_device * o_l;
float avgD = BF_D_from_device * o_l;
float avgM = BF_M_from_device * o_l;
float avgGPUdist = BF_GPU_dist * o_l;
float avgCPUdist = BF_CPU_dist * o_l;
float avgGPUmin = BF_GPU_min * o_l;
float avgCPUmin = BF_CPU_min * o_l;
// Verbose
printf( "Number of profile loops = %f.\n", loops );
printf( "BF - Copy 'Point' vector onto GPU, time: %f msecs.\n", avgP );
printf( "BF - Copy 'Dists' vector from GPU, time: %f msecs.\n", avgD );
printf( "BF - Copy 'Results' from GPU, time: %f msecs.\n", avgM );
printf( "BF - GPU Distance computation, time: %f msecs.\n", avgGPUdist );
printf( "BF - CPU Distance computation, time: %f msecs.\n", avgCPUdist );
printf( "BF - GPU Min Distance computation, time: %f msecs.\n", avgGPUmin );
printf( "BF - CPU Min Distance computation, time: %f msecs.\n\n", avgCPUmin );
// Terse
//printf( "BF - P, D, M, G_D, C_D, G_M, C_M\n" );
//printf( " %f, %f, %f, %f, %f, %f, %f\n\n", avgP, avgD, avgM, avgGPUdist, avgCPUdist, avgGPUmin, avgCPUmin );
}
else
{
printf( "BF - Copy 'Point' vector onto GPU, time: %f msecs.\n", BF_P_onto_device );
printf( "BF - Copy 'Dists' vector from GPU, time: %f msecs.\n", BF_D_from_device );
printf( "BF - Copy 'Results' from GPU, time: %f msecs.\n", BF_M_from_device );
printf( "BF - GPU Distance computation, time: %f msecs.\n", BF_GPU_dist );
printf( "BF - CPU Distance computation, time: %f msecs.\n", BF_CPU_dist );
printf( "BF - GPU Min Distance computation, time: %f msecs.\n", BF_GPU_min );
printf( "BF - CPU Min Distance computation, time: %f msecs.\n\n", BF_CPU_min );
}
/*---------------------------------
Step 13. Cleanup vector memory
---------------------------------*/
printf( "Shutting Down...\n" );
// clean up allocations
free( h_Points );
free( h_Dists_GPU );
free( h_Dists_CPU );
free( h_result_GPU );
sdkDeleteTimer( g_app.hTimer );
checkCudaErrors( cudaFree( d_Points ) );
checkCudaErrors( cudaFree( d_Dists ) );
checkCudaErrors( cudaFree( d_Reduce ) );
printf( "Shutdown done...\n\n" );
#endif
// Success
return true;
}
/*---------------------------------------------------------
Name: ComputeBlockShapeFromVector
Desc:
Notes:
0. Assumes following members are initialized properly
before this funciton is called
shape.nElems = Number of original elements in vector
shape.tpr = threads per row
shape.rpb = rows per block
1. Block Limits
Thread block has at most 512 theads per block
2. Grid Limits
Grid has at most 65,535 blocks in any dimension
So a 1D grid is at most 65,535 x 1
and a 2D grid is at most 65,535 x 65,535
We use next smallest even number to these limits
IE 65,535 - 1
IE (65,535*65,535 - 1)
It's useful to have an even number of columns
in grid structure when doing reductions
---------------------------------------------------------*/
bool ComputeBlockShapeFromVector
(
BlockGridShape & bgShape // IN/OUT - bgShape
)
{
unsigned int remainder, extra;
//---------------------------------
// 1. Compute Threads Per Block
//---------------------------------
if ((bgShape.threadsPerRow > 512) || (bgShape.rowsPerBlock > 512))
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) > 512 TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
}
// Compute threads per block
bgShape.threadsPerBlock = bgShape.threadsPerRow * bgShape.rowsPerBlock;
// Make sure we don't exceed block limits
if (bgShape.threadsPerBlock > 512)
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) = %d TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock, bgShape.threadsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
return false;
}
//---------------------------------
// 2. Compute GRID structure
//---------------------------------
// Compute number of blocks needed to contain all elements in vector
bgShape.blocksPerGrid = bgShape.nElems / bgShape.threadsPerBlock;
remainder = bgShape.nElems % bgShape.threadsPerBlock;
extra = ((0 == remainder) ? 0 : 1);
bgShape.blocksPerGrid += extra;
// Check if need a 1D Grid or 2D Grid structure
// to contain all blocks in grid
if (bgShape.blocksPerGrid <= 65534)
{
// We can use a simple 1D grid of blocks
bgShape.blocksPerRow = bgShape.blocksPerGrid;
bgShape.rowsPerGrid = 1;
}
else if (bgShape.blocksPerGrid <= 4294836224) // 4294836224 = (65535 * 65535 - 1)
{
// We need to use a 2D Grid structure instead...
// Use square root as an approximation for shape of 2D grid
float sq_r = sqrtf( (float)( bgShape.blocksPerGrid ) );
unsigned int uiSqrt = (unsigned int)sq_r;
bgShape.blocksPerRow = uiSqrt;
bgShape.rowsPerGrid = uiSqrt;
// Increment # of columns until we have enough space
// in grid layout for all elements in vector
while ((bgShape.blocksPerRow * bgShape.rowsPerGrid) < bgShape.blocksPerGrid)
{
bgShape.blocksPerRow++;
}
}
else
{
// Error - Vector is too large for 2D Grid
printf( "Vector is way too large...\n" );
return false;
}
// Make sure # of columns in 1D or 2D grid is even
// Which is useful to avoid special cases in reduction kernel
remainder = bgShape.blocksPerRow % 2;
extra = ((0 == remainder) ? 0 : 1);
bgShape.blocksPerRow += extra;
// Compute # of padded blocks in Grid
bgShape.blocksPerGrid = bgShape.blocksPerRow * bgShape.rowsPerGrid;
// Make sure we don't exceed grid limits
if ((bgShape.blocksPerRow >= 65535) || (bgShape.rowsPerGrid >= 65535))
{
// Error - Grid can't have more than 65535 blocks in any dimension
printf( "Grid (%d BPR x %d RPG) = %d BPG\n",
bgShape.blocksPerRow, bgShape.rowsPerGrid, bgShape.blocksPerGrid );
printf( "Error - can't have more than 65535 blocks in any dimension\n" );
return false;
}
// Compute Width and Height of 2D vector structure
bgShape.W = bgShape.threadsPerRow * bgShape.blocksPerRow; // Width (in elements) of 2D vector
bgShape.H = bgShape.rowsPerBlock * bgShape.rowsPerGrid; // Height (in elements) of 2D vector
// Compute padded length of 2D vector
unsigned int sizeWH = bgShape.W * bgShape.H;
unsigned int sizeBG = bgShape.threadsPerBlock * bgShape.blocksPerGrid;
if (sizeWH != sizeBG)
{
// Programmer error-
printf( "Error - sizes don't match\n" );
return false;
}
// Compute number of elements in padded block structure
bgShape.nPadded = sizeWH;
// Success
return true;
}
/*---------------------------------------------------------
Name: ComputeBlockShapeFromQueryVector
Desc:
Notes:
0. Assumes following members are initialized properly
before this function is called
shape.nElems = Number of original elements in query vector
shape.tpr = threads per row
shape.rpb = rows per block
1. Block Limits
Thread block has at most 512 theads per block
2. Grid Limits
Grid has at most 65,535 blocks in any dimension
So a 1D grid is at most 65,535 x 1
and a 2D grid is at most 65,535 x 65,535
We use next smallest even number to these limits
IE 65,535 - 1
IE (65,535*65,535 - 1)
---------------------------------------------------------*/
bool ComputeBlockShapeFromQueryVector
(
BlockGridShape & bgShape // IN/OUT - bgShape
)
{
unsigned int remainder, extra;
//---------------------------------
// 1. Compute Threads Per Block
//---------------------------------
if ((bgShape.threadsPerRow > 512) || (bgShape.rowsPerBlock > 512))
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) > 512 TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
}
// Compute threads per block
bgShape.threadsPerBlock = bgShape.threadsPerRow * bgShape.rowsPerBlock;
// Make sure we don't exceed block limits
if (bgShape.threadsPerBlock > 512)
{
// Error - block can't have more than 512 threads
printf( "block (%d TPR x %d RPB) = %d TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock, bgShape.threadsPerBlock );
printf( "Error - can't have more than 512 threads in block" );
return false;
}
//---------------------------------
// 2. Compute GRID structure
//---------------------------------
// Compute number of blocks needed to contain all elements in vector
bgShape.blocksPerGrid = bgShape.nElems / bgShape.threadsPerBlock;
remainder = bgShape.nElems % bgShape.threadsPerBlock;
extra = ((0 == remainder) ? 0 : 1);
bgShape.blocksPerGrid += extra;
// Check if need a 1D Grid or 2D Grid structure
// to contain all blocks in grid
if (bgShape.blocksPerGrid <= 65534)
{
// We can use a simple 1D grid of blocks
bgShape.blocksPerRow = bgShape.blocksPerGrid;
bgShape.rowsPerGrid = 1;
}
else if (bgShape.blocksPerGrid <= 4294836224) // 4294836224 = (65535 * 65535 - 1)
{
// We need to use a 2D Grid structure instead...
// Use square root as an approximation for shape of 2D grid
float sq_r = sqrtf( (float)( bgShape.blocksPerGrid ) );
unsigned int uiSqrt = (unsigned int)sq_r;
bgShape.blocksPerRow = uiSqrt;
bgShape.rowsPerGrid = uiSqrt;
// Increment # of columns until we have enough space
// in grid layout for all elements in vector
while ((bgShape.blocksPerRow * bgShape.rowsPerGrid) < bgShape.blocksPerGrid)
{
bgShape.blocksPerRow++;
}
}
else
{
// Error - Vector is too large for 2D Grid
printf( "Vector is way too large...\n" );
return false;
}
// Make sure we don't exceed grid limits
if ((bgShape.blocksPerRow >= 65535) || (bgShape.rowsPerGrid >= 65535))
{
// Error - Grid can't have more than 65535 blocks in any dimension
printf( "Grid (%d BPR x %d RPG) = %d BPG\n",
bgShape.blocksPerRow, bgShape.rowsPerGrid, bgShape.blocksPerGrid );
printf( "Error - can't have more than 65535 blocks in any dimension\n" );
return false;
}
// Compute # of padded blocks in Grid
bgShape.blocksPerGrid = bgShape.blocksPerRow * bgShape.rowsPerGrid;
// Compute Width and Height of 2D vector structure
bgShape.W = bgShape.threadsPerRow * bgShape.blocksPerRow; // Width (in elements) of 2D vector
bgShape.H = bgShape.rowsPerBlock * bgShape.rowsPerGrid; // Height (in elements) of 2D vector
// Compute padded length of 2D vector
unsigned int sizeWH = bgShape.W * bgShape.H;
unsigned int sizeBG = bgShape.threadsPerBlock * bgShape.blocksPerGrid;
if (sizeWH != sizeBG)
{
// Programmer error-
printf( "Error - sizes don't match\n" );
return false;
}
// Compute number of elements in padded block structure
bgShape.nPadded = sizeWH;
// Success
return true;
}
/*---------------------------------------------------------
Name: RandomFloat
Desc: Generates a random float value in range [low,high]
---------------------------------------------------------*/
float RandomFloat( float low, float high )
{
float t = (float)rand() / (float)RAND_MAX;
return (1.0f - t) * low + t * high;
}
/*---------------------------------------------------------
Name: InitCUDA
Desc: Initialize CUDA system for GPU processing
---------------------------------------------------------*/
// Runtime API version...
bool InitCUDA( AppGlobals & g )
{
bool bResult = false;
int nDevices = 0;
int deviceToUse = 0;
unsigned int cudaContextFlags = 0;
cudaError_t cudaResult = cudaSuccess;
#if (CUDA_PLATFORM == CUDA_DEVICE)
CUresult cuda_Result = CUDA_SUCCESS;
// Initialize CUDA
unsigned int cudaFlags = 0;
cuda_Result = cuInit( cudaFlags );
if (CUDA_SUCCESS != cuda_Result)
{
// Error - cudaGetDeviceCount() failed
fprintf( stderr, "InitCuda() - cuInit() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Get count of CUDA Devices
cuda_Result = cuDeviceGetCount(&nDevices);
if (CUDA_SUCCESS != cuda_Result)
{
// Error - cudaGetDeviceCount() failed
fprintf( stderr, "InitCuda() - cuDeviceGetCount() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
if (nDevices <= 0)
{
// No Valid Display Device found
cuda_Result = CUDA_ERROR_INVALID_DEVICE;
fprintf( stderr, "InitCuda() - no valid display device found, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
else if (nDevices >= 2)
{
deviceToUse = 1;
}
// Get Specified Device
cuda_Result = cuDeviceGet( &(g.currDevice), deviceToUse );
if (CUDA_SUCCESS != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - cuDeviceGet() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Get RAW Device Properties
cuda_Result = cuDeviceGetProperties( &(g.rawProps), g.currDevice );
if (CUDA_SUCCESS != cuda_Result )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - cuDeviceGetProperties() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Set up the CUDA context
cuda_Result = cuCtxCreate( &g.currContext, cudaContextFlags, g.currDevice );
if ( CUDA_SUCCESS != cuda_Result )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - cuCtxCreate() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
goto lblError;
}
// Get CUDA Display Device Properties
cudaResult = cudaGetDeviceProperties( &(g.cudaProps) , deviceToUse );
if ( cudaSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - cudaGetDeviceProperties() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
#elif (CUDA_PLATFORM == CUDA_CUDA)
// Pick Display Device to perform GPU calculations on...
cudaResult = cudaGetDeviceCount( &nDevices );
if ( cudaSuccess != cudaResult )
{
// Error - cudaGetDeviceCount() failed
fprintf( stderr, "InitCuda() - cudaGetDeviceCount() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
// Note: Assumes Device 0 = primary display device
// Assumes Device 1 = work horse for CUDA
if (nDevices <= 0)
{
// No Valid Display Device found
cudaResult = cudaErrorInvalidDevice;
fprintf( stderr, "InitCuda() - no valid display device found, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
else if (nDevices >= 2)
{
deviceToUse = 1;
}
// Get Display Device Properties
cudaResult = cudaGetDeviceProperties( &(g.cudaProps) , deviceToUse );
if ( cudaSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - cudaGetDeviceProperties() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
// Setup Display Device
cudaResult = cudaSetDevice( deviceToUse );
if ( cudaSuccess != cudaResult )
{
// Error - cudaDeviceGet() failed
fprintf( stderr, "InitCuda() - cudaSetDevice() failed, error = %x in file '%s' in line %i.\n",
cudaResult, __FILE__, __LINE__ );
goto lblError;
}
#endif // CUDA_CUDA
// Success
bResult = true;
lblError:
return bResult;
}
//---------------------------------------------------------
// Name: FiniCUDA
// Desc: Cleanup CUDA system
//---------------------------------------------------------
bool FiniCUDA()
{
#if (CUDA_PLATFORM == CUDA_DEVICE)
// Detach CUDA from current thread
CUresult cuda_Result = CUDA_SUCCESS;
cuda_Result = cuCtxDetach( g_app.currContext );
if (CUDA_SUCCESS != cuda_Result)
{
// Error - cuCtxDetach() failed
fprintf( stderr, "FiniCUDA() - cuCtxDetach() failed, error = %x in file '%s' in line %i.\n",
cuda_Result, __FILE__, __LINE__ );
return false;
}
#endif
// Success
return true;
}
/*---------------------------------------------------------
Name: InitGlobals
Desc: Initialize Application Globals to Default
---------------------------------------------------------*/
bool InitGlobals( AppGlobals & g )
{
//
// Set Defaults
//
// Search Vectors
g.nSearch = 100;
g.searchList = NULL;
g.nQuery = 10;
g.queryList = NULL;
// Cuda Properties
size_t byteSize;
#if (CUDA_PLATFORM == CUDA_DEVICE)
g.currDevice = 0;
// Initialize cuda device props to zero
byteSize = sizeof( g.rawProps );
memset( &g.rawProps, 0, byteSize );
#endif
// Initialize cuda props to zero
byteSize = sizeof( g.cudaProps );
memset( &g.cudaProps, 0, byteSize );
// Init Block Grid Shape
InitShapeDefaults( g.bgShape );
// App Properties
g.nopromptOnExit = 0;
g.doubleCheckDists = 1;
// Profiling Info
g.hTimer = 0;
g.profile = 1;
g.profileSkipFirstLast = 0;
g.profileRequestedLoops = 1;
g.profileActualLoops = 1;
return true;
}
/*---------------------------------------------------------
Name: GetCommandLineParameters
Desc:
---------------------------------------------------------*/
bool GetCommandLineParams
(
int argc, // Count of Command Line Parameters
const char** argv, // List of Command Line Parameters
AppGlobals & g // Structure to store results in
)
{
int iVal;
// Prompt before exiting application ?!?
if (cutCheckCmdLineFlag( argc, argv, "noprompt") )
{
g.nopromptOnExit = true;
}
else
{
g.nopromptOnExit = false;
}
// Double Check Distances
if (cutCheckCmdLineFlag( argc, argv, "cdist") )
{
g.doubleCheckDists = true;
}
else
{
g.doubleCheckDists = false;
}
// Double Check Distances
if (cutCheckCmdLineFlag( argc, argv, "cmin") )
{
g.doubleCheckMin = true;
}
else
{
g.doubleCheckMin = false;
}
// Get # Threads Per Row (block shape)
if (cutGetCmdLineArgumenti( argc, argv, "TPR", &iVal ))
{
if (iVal < 1) { iVal = 1; }
g.bgShape.threadsPerRow = iVal;
}
// Get # Rows Per Block
if (cutGetCmdLineArgumenti( argc, argv, "RPB", &iVal ))
{
if (iVal < 1) { iVal = 1; }
g.bgShape.rowsPerBlock = iVal;
}
// Calculate Threads Per Block
g.bgShape.threadsPerBlock = g.bgShape.threadsPerRow * g.bgShape.rowsPerBlock;
if (g.bgShape.threadsPerBlock > 512)
{
// Error - Can't have more than 512 threads per block
printf( "Max Threads Per Block is 512!!!\n\n" );
return false;
}
// Get search Vector Length
if (cutGetCmdLineArgumenti( argc, argv, "N", &iVal ))
{
if (iVal < 1) { iVal = 10000; }
g.nSearch = (int)iVal;
}
// Get Query Vector Length
if (cutGetCmdLineArgumenti( argc, argv, "NQ", &iVal ))
{
if (iVal < 1) { iVal = 100; }
g.nQuery = (int)iVal;
}
// Should we do profiling (performance measurements) on application
if (cutCheckCmdLineFlag( argc, argv, "profile") )
{
g.profile = true;
}
else
{
g.profile = false;
}
if (g.profile)
{
// Get Skip First Last flag
if (cutCheckCmdLineFlag( argc, argv, "skip") )
{
g.profileSkipFirstLast = true;
}
else
{
g.profileSkipFirstLast = false;
}
// Get Number of Iterations for Profiling performance
if (cutGetCmdLineArgumenti( argc, argv, "profile", &iVal ))
{
if (iVal < 1) { iVal = 100; }
g.profileRequestedLoops = iVal;
if (g.profileSkipFirstLast)
{
g.profileActualLoops = g.profileRequestedLoops + 2;
}
else
{
g.profileActualLoops = g.profileRequestedLoops;
}
}
}
// Success
return true;
}
/*---------------------------------------------------------
Name: InitShapeDefaults
---------------------------------------------------------*/
void InitShapeDefaults( BlockGridShape & bgShape )
{
// Default Thread, Grid, Vector Properties
bgShape.nElems = 100;
bgShape.threadsPerRow = 1;
bgShape.rowsPerBlock = 1;
bgShape.threadsPerBlock = bgShape.threadsPerRow * bgShape.rowsPerBlock;
bgShape.blocksPerRow = 100;
bgShape.rowsPerGrid = 1;
bgShape.blocksPerGrid = bgShape.blocksPerRow * bgShape.rowsPerGrid;
bgShape.W = bgShape.threadsPerRow * bgShape.blocksPerRow;
bgShape.H = bgShape.rowsPerBlock * bgShape.rowsPerGrid;
bgShape.nPadded = bgShape.W * bgShape.H;
}
/*---------------------------------------------------------
Name: DumpBlockGridShape
---------------------------------------------------------*/
void DumpBlockGridShape( BlockGridShape & bgShape )
{
printf( "N = %d, NPadded = %d\n",
bgShape.nElems, bgShape.nPadded );
printf( "Block (%d TPR x %d RPB) = %d TPB\n",
bgShape.threadsPerRow, bgShape.rowsPerBlock,
bgShape.threadsPerBlock );
printf( "Grid (%d BPR x %d RPG) = %d BPG\n",
bgShape.blocksPerRow, bgShape.rowsPerGrid,
bgShape.blocksPerGrid );
printf( "W = %d, H = %d\n",
bgShape.W, bgShape.H );
}
|
76e3ab116c411c7db796695ee92e4f201ad919ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "HeterogeneousCore/CUDAUtilities/interface/AtomicPairCounter.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h"
__global__ void update(AtomicPairCounter *dc, uint32_t *ind, uint32_t *cont, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
auto m = i % 11;
m = m % 6 + 1; // max 6, no 0
auto c = dc->add(m);
assert(c.m < n);
ind[c.m] = c.n;
for (int j = c.n; j < c.n + m; ++j)
cont[j] = i;
};
__global__ void finalize(AtomicPairCounter const *dc, uint32_t *ind, uint32_t *cont, uint32_t n) {
assert(dc->get().m == n);
ind[n] = dc->get().n;
}
__global__ void verify(AtomicPairCounter const *dc, uint32_t const *ind, uint32_t const *cont, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
assert(0 == ind[0]);
assert(dc->get().m == n);
assert(ind[n] == dc->get().n);
auto ib = ind[i];
auto ie = ind[i + 1];
auto k = cont[ib++];
assert(k < n);
for (; ib < ie; ++ib)
assert(cont[ib] == k);
}
#include <iostream>
int main() {
AtomicPairCounter *dc_d;
hipMalloc(&dc_d, sizeof(AtomicPairCounter));
hipMemset(dc_d, 0, sizeof(AtomicPairCounter));
std::cout << "size " << sizeof(AtomicPairCounter) << std::endl;
constexpr uint32_t N = 20000;
constexpr uint32_t M = N * 6;
uint32_t *n_d, *m_d;
hipMalloc(&n_d, N * sizeof(int));
// hipMemset(n_d, 0, N*sizeof(int));
hipMalloc(&m_d, M * sizeof(int));
hipLaunchKernelGGL(( update), dim3(2000), dim3(512), 0, 0, dc_d, n_d, m_d, 10000);
hipLaunchKernelGGL(( finalize), dim3(1), dim3(1), 0, 0, dc_d, n_d, m_d, 10000);
hipLaunchKernelGGL(( verify), dim3(2000), dim3(512), 0, 0, dc_d, n_d, m_d, 10000);
AtomicPairCounter dc;
hipMemcpy(&dc, dc_d, sizeof(AtomicPairCounter), hipMemcpyDeviceToHost);
std::cout << dc.get().n << ' ' << dc.get().m << std::endl;
return 0;
}
| 76e3ab116c411c7db796695ee92e4f201ad919ad.cu | #include "HeterogeneousCore/CUDAUtilities/interface/AtomicPairCounter.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h"
__global__ void update(AtomicPairCounter *dc, uint32_t *ind, uint32_t *cont, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
auto m = i % 11;
m = m % 6 + 1; // max 6, no 0
auto c = dc->add(m);
assert(c.m < n);
ind[c.m] = c.n;
for (int j = c.n; j < c.n + m; ++j)
cont[j] = i;
};
__global__ void finalize(AtomicPairCounter const *dc, uint32_t *ind, uint32_t *cont, uint32_t n) {
assert(dc->get().m == n);
ind[n] = dc->get().n;
}
__global__ void verify(AtomicPairCounter const *dc, uint32_t const *ind, uint32_t const *cont, uint32_t n) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n)
return;
assert(0 == ind[0]);
assert(dc->get().m == n);
assert(ind[n] == dc->get().n);
auto ib = ind[i];
auto ie = ind[i + 1];
auto k = cont[ib++];
assert(k < n);
for (; ib < ie; ++ib)
assert(cont[ib] == k);
}
#include <iostream>
int main() {
AtomicPairCounter *dc_d;
cudaMalloc(&dc_d, sizeof(AtomicPairCounter));
cudaMemset(dc_d, 0, sizeof(AtomicPairCounter));
std::cout << "size " << sizeof(AtomicPairCounter) << std::endl;
constexpr uint32_t N = 20000;
constexpr uint32_t M = N * 6;
uint32_t *n_d, *m_d;
cudaMalloc(&n_d, N * sizeof(int));
// cudaMemset(n_d, 0, N*sizeof(int));
cudaMalloc(&m_d, M * sizeof(int));
update<<<2000, 512>>>(dc_d, n_d, m_d, 10000);
finalize<<<1, 1>>>(dc_d, n_d, m_d, 10000);
verify<<<2000, 512>>>(dc_d, n_d, m_d, 10000);
AtomicPairCounter dc;
cudaMemcpy(&dc, dc_d, sizeof(AtomicPairCounter), cudaMemcpyDeviceToHost);
std::cout << dc.get().n << ' ' << dc.get().m << std::endl;
return 0;
}
|
b8657d49b2b5a0125d68fec5480351b0293019c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
#include <cfloat>
#include <cmath>
extern "C" __global__ void double2float_f(double *A, float *ret, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
// TODO: Use __double2float_rd or __double2float_rn or __double2float_ru or __double2float_rz after
ret[tid] = (float)A[tid];
}
}
extern "C" __global__ void float2double_f(float *A, double *ret, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
ret[tid] = (double)A[tid];
}
}
/**
* Performs a slice operation where the input matrix is sparse and the output
* matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input
* matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
template <typename T>
__device__ void slice_sparse_dense_row(T *inVal, int *inRowPtr, int *colInd,
T *ret, int rl, int ru, int cl, int cu,
int retClen) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int rowIndex = index + rl;
if (rowIndex <= ru) {
/*
* TODO: Alternative approach: use dynamic parallelism. We are skipping this
*for now to avoid
* the complexity of two-step separate compilation and linking process.
*
* extern "C"
* __global__ void slice_sparse_dense_row_helper(double* inVal, int*
*inRowPtr, int* colInd, double* ret,
* int rl, int ru, int cl, int cu, int retClen, int start, int end, int
*index) {
* int i = blockIdx.x * blockDim.x + threadIdx.x + start;
* // Only slice if the index falls into the given range
* if(i < end && cl <= colInd[i] && colInd[i] <= cu) {
* ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
* }
* }
*
* int size = inRowPtr[rowIndex+1] - inRowPtr[rowIndex];
* double numThreads = (double)min(size, MAX_NUM_THREADS_CHILD_KERNEL);
* slice_sparse_dense_row_helper<<< ceil(numThreads/
*MAX_NUM_THREADS_CHILD_KERNEL), MAX_NUM_THREADS_CHILD_KERNEL>>>(inVal, inRowPtr,
*colInd, ret,
* rl, ru, cl, cu, retClen, inRowPtr[rowIndex],
*inRowPtr[rowIndex+1], index);
*
* Two-step compilation and linking process in JCudaKernels's constructor:
* hipLinkAddFile(linkState, hipJitInputType.hipJitInputTypeLibrary,
*"/usr/local/cuda/lib64/libcudadevrt.a", jitOptions);
*/
// Iterate over elements of the row 'rowIndex'.
for (int i = inRowPtr[rowIndex]; i < inRowPtr[rowIndex + 1]; i++) {
// Only slice if the index falls into the given range
if (cl <= colInd[i] && colInd[i] <= cu) {
ret[index * retClen + (colInd[i] - cl)] = inVal[i];
}
}
}
}
extern "C" __global__ void slice_sparse_dense_row_d(double *inVal, int *inRowPtr,
int *colInd, double *ret,
int rl, int ru, int cl,
int cu, int retClen) {
slice_sparse_dense_row(inVal, inRowPtr, colInd, ret, rl, ru, cl, cu, retClen);
}
extern "C" __global__ void slice_sparse_dense_row_f(float *inVal, int *inRowPtr,
int *colInd, float *ret,
int rl, int ru, int cl,
int cu, int retClen) {
slice_sparse_dense_row(inVal, inRowPtr, colInd, ret, rl, ru, cl, cu, retClen);
}
/**
* Performs a slice operation where the input matrix is sparse and the output
* matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input
* matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
template <typename T>
__device__ void slice_sparse_dense_nnz(T *inVal, int *inRowPtr, int *colInd,
T *ret, int rl, int ru, int cl, int cu,
int retClen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i = tid + inRowPtr[rl];
// Only slice if the index falls into the given range
if (i < inRowPtr[ru + 1] && cl <= colInd[i] && colInd[i] <= cu) {
// Find the row index for corresponding non-zero value 'i'.
int rowIndex = rl;
while (inRowPtr[rowIndex + 1] <= i) {
rowIndex++;
}
ret[(rowIndex - rl) * retClen + (colInd[i] - cl)] = inVal[i];
}
}
extern "C" __global__ void slice_sparse_dense_nnz_d(double *inVal, int *inRowPtr,
int *colInd, double *ret,
int rl, int ru, int cl,
int cu, int retClen) {
slice_sparse_dense_nnz(inVal, inRowPtr, colInd, ret, rl, ru, cl, cu, retClen);
}
extern "C" __global__ void slice_sparse_dense_nnz_f(float *inVal, int *inRowPtr,
int *colInd, float *ret,
int rl, int ru, int cl,
int cu, int retClen) {
slice_sparse_dense_nnz(inVal, inRowPtr, colInd, ret, rl, ru, cl, cu, retClen);
}
/**
* Performs a slice operation where the input matrix is dense and the output
* matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
template <typename T>
__device__ void slice_dense_dense(T *in, T *ret, int rl, int ru, int cl, int cu,
int inClen, int retRlen, int retClen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / retClen;
int iy = tid % retClen;
if (ix < retRlen && iy < retClen) {
int inIndex = (ix + rl) * inClen + cl + iy;
ret[tid] = in[inIndex];
}
}
extern "C" __global__ void slice_dense_dense_d(double *in, double *ret, int rl,
int ru, int cl, int cu,
int inClen, int retRlen,
int retClen) {
slice_dense_dense(in, ret, rl, ru, cl, cu, inClen, retRlen, retClen);
}
extern "C" __global__ void slice_dense_dense_f(float *in, float *ret, int rl,
int ru, int cl, int cu,
int inClen, int retRlen,
int retClen) {
slice_dense_dense(in, ret, rl, ru, cl, cu, inClen, retRlen, retClen);
}
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
template <typename T>
__device__ void copy_u2l_dense(T *ret, int dim, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / dim;
int iy = tid % dim;
int id_dest = iy * dim + ix;
if (iy > ix && id_dest < N) {
// TODO: Potential to reduce the number of threads by half
int id_src = tid;
ret[id_dest] = ret[id_src];
}
}
extern "C" __global__ void copy_u2l_dense_d(double *ret, int dim, int N) {
copy_u2l_dense(ret, dim, N);
}
extern "C" __global__ void copy_u2l_dense_f(float *ret, int dim, int N) {
copy_u2l_dense(ret, dim, N);
}
// Use this method in templates to fetch the maximum value for a given datatype
template <typename T>
__forceinline__ __device__ T T_MAX(T x) {
return (T)DBL_MAX;
}
template <>
__forceinline__ __device__ float T_MAX(float x) {
return FLT_MAX;
}
template <>
__forceinline__ __device__ double T_MAX(double x) {
return DBL_MAX;
}
// op = {0=plus, 1=minus, 2=multiply, 3=divide, 4=power,
// 5=less, 6=lessequal, 7=greater, 8=greaterequal, 9=equal, 10=notequal,
// 11=min, 12=max, 13=and, 14=or, 15=minus1multiply, 16=minusnz,
// 17=modulus, 18=integer division}
template <typename T>
__forceinline__ __device__ T binaryOp(T x, T y, int op) {
switch (op) {
case 0:
return x + y;
case 1:
return x - y;
case 2:
return x * y;
case 3:
return x / y;
case 4:
return pow(x, y);
case 5:
return (x < y) == 0 ? 0.0 : 1.0;
case 6:
return (x <= y) == 0 ? 0.0 : 1.0;
case 7:
return (x > y) == 0 ? 0.0 : 1.0;
case 8:
return (x >= y) == 0 ? 0.0 : 1.0;
case 9:
return (x == y) == 0 ? 0.0 : 1.0;
case 10:
return (x != y) == 0 ? 0.0 : 1.0;
case 11:
return min(x, y);
case 12:
return max(x, y);
case 13:
return ((int)llrint(x) & (int)llrint(y)) == 0 ? 0.0 : 1.0;
case 14:
return ((int)llrint(x) | (int)llrint(y)) == 0 ? 0.0 : 1.0;
case 15:
return 1 - x * y;
case 16:
return (x != 0.0 ? x - y : 0.0);
case 17: {
if (y == 0.0 || y == -0.0) {
return nan("");
}
T v = x / y;
// Check for v being NaN (v != v) or if it is infinity
if (isnan(v) || isinf(v)) {
return v;
} else {
v = floor(v);
}
return x - v * y;
}
case 18: {
T v = x / y;
if (isnan(v) || isinf(v)) {
return v;
} else {
return floor(v);
}
}
default:
return T_MAX(x);
}
}
/**
* Performs forward pass for relu: ret = max(A, 0)
*
* @param A input array allocated on the GPU
* @param ret output array allocated on the GPU
* @param rlen the number of rows
* @param clen the number of columns
*/
template <typename T>
__device__ void relu(T *A, T *ret, int rlen, int clen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if (ix < rlen && iy < clen) {
ret[tid] = max(0.0, A[tid]);
}
}
extern "C" __global__ void relu_d(double *A, double *ret, int rlen, int clen) {
relu(A, ret, rlen, clen);
}
extern "C" __global__ void relu_f(float *A, float *ret, int rlen, int clen) {
relu(A, ret, rlen, clen);
}
/**
* This method computes the backpropagation errors for previous layer of relu operation
*
* @param X input activation array allocated on the GPU
* @param dout errors from previous layer
* @param ret output array allocated on the GPU
* @param rlen the number of rows
* @param clen the number of columns
*/
template <typename T>
__device__ void relu_backward(T *X, T *dout, T *ret, int rlen, int clen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if (ix < rlen && iy < clen) {
ret[tid] = X[tid] > 0 ? dout[tid] : 0;
}
}
extern "C" __global__ void relu_backward_d(double *X, double *dout, double *ret,
int rlen, int clen) {
relu_backward(X, dout, ret, rlen, clen);
}
extern "C" __global__ void relu_backward_f(float *X, float *dout, float *ret,
int rlen, int clen) {
relu_backward(X, dout, ret, rlen, clen);
}
/**
* Performs inplace addition: ret += input
*
* @param input rhs input array allocated on the GPU
* @param ret the input and output array allocated on the GPU
* @param rlen the number of rows
* @param clen the number of columns
*/
template <typename T>
__device__ void inplace_add(T *input, T *ret, int rlen, int clen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if (ix < rlen && iy < clen) {
ret[tid] += input[tid];
}
}
extern "C" __global__ void inplace_add_d(double *input, double *ret, int rlen,
int clen) {
inplace_add(input, ret, rlen, clen);
}
extern "C" __global__ void inplace_add_f(float *input, float *ret, int rlen,
int clen) {
inplace_add(input, ret, rlen, clen);
}
// Performs the operation corresponding to the DML script:
// ones = matrix(1, rows=1, cols=Hout*Wout)
// output = input + matrix(bias %*% ones, rows=1, cols=F*Hout*Wout)
// This operation is often followed by conv2d and hence we have introduced
// bias_add(input, bias) built-in function
template <typename T>
__device__ void bias_add(T *input, T *bias, T *ret, int rlen, int clen,
int PQ) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if (ix < rlen && iy < clen) {
int biasIndex = iy / PQ;
ret[tid] = input[tid] + bias[biasIndex];
}
}
extern "C" __global__ void bias_add_d(double *input, double *bias, double *ret,
int rlen, int clen, int PQ) {
bias_add(input, bias, ret, rlen, clen, PQ);
}
extern "C" __global__ void bias_add_f(float *input, float *bias, float *ret,
int rlen, int clen, int PQ) {
bias_add(input, bias, ret, rlen, clen, PQ);
}
// Performs the operation "ret <- A + alpha*B", where B is a vector
template <typename T>
__device__ void daxpy_matrix_vector(T *A, T *B, double alpha, T *ret, int rlenA,
int clenA, int rlenB, int clenB) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clenA;
int iy = tid % clenA;
if (ix < rlenA && iy < clenA) {
int index = ix * clenA + iy;
if (rlenB == 1) {
ret[index] = A[index] + alpha * B[iy];
} else {
ret[index] = A[index] + alpha * B[ix];
}
}
}
extern "C" __global__ void daxpy_matrix_vector_d(double *A, double *B,
double alpha, double *ret,
int rlenA, int clenA, int rlenB,
int clenB) {
daxpy_matrix_vector(A, B, alpha, ret, rlenA, clenA, rlenB, clenB);
}
extern "C" __global__ void daxpy_matrix_vector_f(float *A, float *B,
double alpha, float *ret,
int rlenA, int clenA, int rlenB,
int clenB) {
daxpy_matrix_vector(A, B, alpha, ret, rlenA, clenA, rlenB, clenB);
}
// Performs similar operation as bias_add except elementwise multiplication
// instead of add
template <typename T>
__device__ void bias_multiply(T *input, T *bias, T *ret, int rlen, int clen,
int PQ) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if (ix < rlen && iy < clen) {
int biasIndex = iy / PQ;
ret[tid] = input[tid] * bias[biasIndex];
}
}
extern "C" __global__ void bias_multiply_d(double *input, double *bias,
double *ret, int rlen, int clen,
int PQ) {
bias_multiply(input, bias, ret, rlen, clen, PQ);
}
extern "C" __global__ void bias_multiply_f(float *input, float *bias, float *ret,
int rlen, int clen, int PQ) {
bias_multiply(input, bias, ret, rlen, clen, PQ);
}
/**
* Performs a binary cellwise arithmetic operation on 2 matrices.
* Either both matrices are of equal size or one of them is a vector or both
* are.
* @param A first input matrix allocated on GPU
* @param B second input matrix allocated on GPU
* @param C output allocated on GPU
* @param maxRlen maximum of the row lengths of A and B
* @param maxClen maximum of the column lengths of A and B
* @param vectorAStatus if A is a row vector, column vector or neither
* @param vectorBStatus if B is a row vector, column vector or neither
* @param op the numeric code of the arithmetic operation to
* perform
*
*/
template <typename T>
__device__ void matrix_matrix_cellwise_op(T *A, T *B, T *C, int maxRlen,
int maxClen, int vectorAStatus,
int vectorBStatus, int op) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / maxClen;
int iy = tid % maxClen;
if (ix < maxRlen && iy < maxClen) {
int outIndex = ix * maxClen + iy;
int aIndex = outIndex;
int bIndex = outIndex;
if (vectorAStatus == 1)
aIndex = ix; // clen == 1
else if (vectorAStatus == 2)
aIndex = iy; // rlen == 1
if (vectorBStatus == 1)
bIndex = ix; // clen == 1
else if (vectorBStatus == 2)
bIndex = iy; // rlen == 1
C[outIndex] = binaryOp(A[aIndex], B[bIndex], op);
// printf("C[%d] = A[%d](%f) B[%d](%f) (%d %d)\n", outIndex, aIndex,
// A[aIndex], bIndex, B[bIndex], (ix+1), (iy+1));
__syncthreads();
}
}
extern "C" __global__ void matrix_matrix_cellwise_op_d(
double *A, double *B, double *C, int maxRlen, int maxClen,
int vectorAStatus, int vectorBStatus, int op) {
matrix_matrix_cellwise_op(A, B, C, maxRlen, maxClen, vectorAStatus,
vectorBStatus, op);
}
extern "C" __global__ void matrix_matrix_cellwise_op_f(
float *A, float *B, float *C, int maxRlen, int maxClen, int vectorAStatus,
int vectorBStatus, int op) {
matrix_matrix_cellwise_op(A, B, C, maxRlen, maxClen, vectorAStatus,
vectorBStatus, op);
}
/**
* Performs an arithmetic operation between a matrix and a scalar.
* C = s op A or C = A op s (where A is the matrix, s is the scalar and op is
* the operation)
* @param A input matrix allocated on GPU
* @param scalar scalar input
* @param C output matrix allocated on GPU
* @param size number of elements in matrix A
* @param op number code of the arithmetic operation to perform
* @param isLeftScalar whether the scalar is on the left side
*/
template <typename T>
__device__ void matrix_scalar_op(T *A, T scalar, T *C, int size, int op,
int isLeftScalar) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
if (isLeftScalar) {
C[index] = binaryOp(scalar, A[index], op);
} else {
C[index] = binaryOp(A[index], scalar, op);
}
}
__syncthreads();
}
extern "C" __global__ void matrix_scalar_op_d(double *A, double scalar,
double *C, int size, int op,
int isLeftScalar) {
matrix_scalar_op(A, scalar, C, size, op, isLeftScalar);
}
extern "C" __global__ void matrix_scalar_op_f(float *A, double scalar, float *C,
int size, int op,
int isLeftScalar) {
matrix_scalar_op(A, (float)scalar, C, size, op, isLeftScalar);
}
/**
* Sets all elements (fills) of a double array of given length with a given
* scalar value
* @param A array to be filled
* @param scalar value to fill array with
* @param lenA length of array A
*/
template <typename T>
__device__ void fill(T *A, T scalar, int lenA) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < lenA) {
A[index] = scalar;
}
}
extern "C" __global__ void fill_d(double *A, double scalar, int lenA) {
fill(A, scalar, lenA);
}
extern "C" __global__ void fill_f(float *A, double scalar, int lenA) {
fill(A, (float)scalar, lenA);
}
/**
* Appends Matrix B to the right side of Matrix A into a new matrix C
* | 1 2 3 4 | | 8 8 8 | | 1 2 3 4 8 8 8 |
* cbind ( | 9 8 7 6 | , | 7 7 7 | ) = | 9 8 7 6 7 7 7 |
* | 4 3 2 1 | | 9 9 9 | | 4 3 2 1 9 9 9 |
* @param A input matrix A allocated on the GPU
* @param B input matrix B allocated on the GPU
* @param C input matrix C allocated on the GPU
* @param rowsA rows in A
* @param colsA columns in A
* @param rowsB rows in B
* @param colsB columns in B
*/
template <typename T>
__device__ void cbind(T *A, T *B, T *C, int rowsA, int colsA, int rowsB,
int colsB) {
int maxClen = max(colsA, colsB);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / maxClen;
int iy = tid % maxClen;
int colsC = colsA + colsB;
int rowsC = rowsA;
// Copy an element of A into C into the appropriate location
if (ix < rowsA && iy < colsA) {
T elemA = A[ix * colsA + iy];
C[ix * colsC + iy] = elemA;
}
// Copy an element of B into C into the appropriate location
if (ix < rowsB && iy < colsB) {
T elemB = B[ix * colsB + iy];
C[ix * colsC + (iy + colsA)] = elemB;
}
}
extern "C" __global__ void cbind_d(double *A, double *B, double *C, int rowsA,
int colsA, int rowsB, int colsB) {
cbind(A, B, C, rowsA, colsA, rowsB, colsB);
}
extern "C" __global__ void cbind_f(float *A, float *B, float *C, int rowsA,
int colsA, int rowsB, int colsB) {
cbind(A, B, C, rowsA, colsA, rowsB, colsB);
}
/**
* Appends Matrix B to the bottom of Matrix A into a new matrix C
* | 2 3 4 | | 8 8 8 | | 2 3 4 |
* rbind ( | 8 7 6 | , | 7 7 7 | ) = | 8 7 6 |
* | 3 2 1 | | 3 2 1 |
| 8 8 8 |
| 7 7 7 |
* @param A input matrix A allocated on the GPU
* @param B input matrix B allocated on the GPU
* @param C input matrix C allocated on the GPU
* @param rowsA rows in A
* @param colsA columns in A
* @param rowsB rows in B
* @param colsB columns in B
*/
template <typename T>
__device__ void rbind(T *A, T *B, T *C, int rowsA, int colsA, int rowsB,
int colsB) {
int maxClen = max(colsA, colsB);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / maxClen;
int iy = tid % maxClen;
int rowsC = rowsA + rowsB;
int colsC = colsA;
// Copy an element of A into C into the appropriate location
if (ix < rowsA && iy < colsA) {
T elemA = A[ix * colsA + iy];
C[ix * colsC + iy] = elemA;
}
// Copy an element of B into C into the appropriate location
if (ix < rowsB && iy < colsB) {
T elemB = B[ix * colsB + iy];
C[(ix + rowsA) * colsC + iy] = elemB;
}
}
extern "C" __global__ void rbind_d(double *A, double *B, double *C, int rowsA,
int colsA, int rowsB, int colsB) {
rbind(A, B, C, rowsA, colsA, rowsB, colsB);
}
extern "C" __global__ void rbind_f(float *A, float *B, float *C, int rowsA,
int colsA, int rowsB, int colsB) {
rbind(A, B, C, rowsA, colsA, rowsB, colsB);
}
/**
* Does a reduce operation over all elements of the array.
* This method has been adapted from the Reduction sample in the NVIDIA CUDA
* Samples (v8.0)
* and the Reduction example available through jcuda.org
* When invoked initially, all blocks partly compute the reduction operation
* over the entire array
* and writes it to the output/temporary array. A second invokation needs to
* happen to get the
* reduced value.
* The number of threads, blocks and amount of shared memory is calculated in a
* specific way.
* Please refer to the NVIDIA CUDA Sample or the SystemML code that invokes this
* method to see
* how its done.
* The template-ized version of this function is similar to what is found in
* NVIDIA CUB
*
* @param ReductionOp Type of the functor object that implements the
* reduction operation
*/
template <typename ReductionOp, typename T>
__device__ void reduce(
T *g_idata, ///< input data stored in device memory (of size n)
T *g_odata, ///< output/temporary array stored in device memory (of size n)
unsigned int n, ///< size of the input and temporary/output arrays
ReductionOp
reduction_op, ///< Reduction operation to perform (functor object)
T initialValue) ///< initial value for the reduction variable
{
// extern __shared__ T sdata[];
extern __shared__ __align__(sizeof(T)) unsigned char my_sdata[];
T *sdata = reinterpret_cast<T *>(my_sdata);
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
unsigned int gridSize = blockDim.x * 2 * gridDim.x;
T v = initialValue;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n) {
v = reduction_op(v, g_idata[i]);
// ensure we don't read out of bounds
if (i + blockDim.x < n) v = reduction_op(v, g_idata[i + blockDim.x]);
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = v;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 1024) {
if (tid < 512) {
sdata[tid] = v = reduction_op(v, sdata[tid + 512]);
}
__syncthreads();
}
if (blockDim.x >= 512) {
if (tid < 256) {
sdata[tid] = v = reduction_op(v, sdata[tid + 256]);
}
__syncthreads();
}
if (blockDim.x >= 256) {
if (tid < 128) {
sdata[tid] = v = reduction_op(v, sdata[tid + 128]);
}
__syncthreads();
}
if (blockDim.x >= 128) {
if (tid < 64) {
sdata[tid] = v = reduction_op(v, sdata[tid + 64]);
}
__syncthreads();
}
if (tid < 32) {
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile T *smem = sdata;
if (blockDim.x >= 64) {
smem[tid] = v = reduction_op(v, smem[tid + 32]);
}
if (blockDim.x >= 32) {
smem[tid] = v = reduction_op(v, smem[tid + 16]);
}
if (blockDim.x >= 16) {
smem[tid] = v = reduction_op(v, smem[tid + 8]);
}
if (blockDim.x >= 8) {
smem[tid] = v = reduction_op(v, smem[tid + 4]);
}
if (blockDim.x >= 4) {
smem[tid] = v = reduction_op(v, smem[tid + 2]);
}
if (blockDim.x >= 2) {
smem[tid] = v = reduction_op(v, smem[tid + 1]);
}
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/**
* Does a reduce (sum) over each row of the array.
* This kernel must be launched with as many blocks as there are rows.
* The intuition for this kernel is that each block does a reduction over a
* single row.
* The maximum number of blocks that can launched (as of compute capability 3.0)
* is 2^31 - 1
* This works out fine for SystemML, since the maximum elements in a Java array
* can be 2^31 - c (some small constant)
* If the matrix is "fat" and "short", i.e. there are small number of rows and a
* large number of columns,
* there could be under-utilization of the hardware.
* The template-ized version of this function is similar to what is found in
* NVIDIA CUB
* @param ReductionOp Type of the functor object that implements the
* reduction operation
* @param AssignmentOp Type of the functor object that is used to modify
* the value before writing it to its final location in global memory for each
* row
*/
template <typename ReductionOp, typename AssignmentOp, typename T>
__device__ void reduce_row(
T *g_idata, ///< input data stored in device memory (of size rows*cols)
T *g_odata, ///< output/temporary array store in device memory (of size
///rows*cols)
unsigned int rows, ///< rows in input and temporary/output arrays
unsigned int cols, ///< columns in input and temporary/output arrays
ReductionOp
reduction_op, ///< Reduction operation to perform (functor object)
AssignmentOp assignment_op, ///< Operation to perform before assigning this
///to its final location in global memory for
///each row
T initialValue) { ///< initial value for the reduction variable
// extern __shared__ T sdata[];
extern __shared__ __align__(sizeof(T)) unsigned char my_sdata[];
T *sdata = reinterpret_cast<T *>(my_sdata);
// one block per row
if (blockIdx.x >= rows) {
return;
}
unsigned int block = blockIdx.x;
unsigned int tid = threadIdx.x;
unsigned int i = tid;
unsigned int block_offset = block * cols;
T v = initialValue;
while (i < cols) {
v = reduction_op(v, g_idata[block_offset + i]);
i += blockDim.x;
}
// each thread puts its local sum into shared memory
sdata[tid] = v;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 1024) {
if (tid < 512) {
sdata[tid] = v = reduction_op(v, sdata[tid + 512]);
}
__syncthreads();
}
if (blockDim.x >= 512) {
if (tid < 256) {
sdata[tid] = v = reduction_op(v, sdata[tid + 256]);
}
__syncthreads();
}
if (blockDim.x >= 256) {
if (tid < 128) {
sdata[tid] = v = reduction_op(v, sdata[tid + 128]);
}
__syncthreads();
}
if (blockDim.x >= 128) {
if (tid < 64) {
sdata[tid] = v = reduction_op(v, sdata[tid + 64]);
}
__syncthreads();
}
if (tid < 32) {
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile T *smem = sdata;
if (blockDim.x >= 64) {
smem[tid] = v = reduction_op(v, smem[tid + 32]);
}
if (blockDim.x >= 32) {
smem[tid] = v = reduction_op(v, smem[tid + 16]);
}
if (blockDim.x >= 16) {
smem[tid] = v = reduction_op(v, smem[tid + 8]);
}
if (blockDim.x >= 8) {
smem[tid] = v = reduction_op(v, smem[tid + 4]);
}
if (blockDim.x >= 4) {
smem[tid] = v = reduction_op(v, smem[tid + 2]);
}
if (blockDim.x >= 2) {
smem[tid] = v = reduction_op(v, smem[tid + 1]);
}
}
// write result for this block to global mem, modify it with assignment op
if (tid == 0) g_odata[block] = assignment_op(sdata[0]);
}
/**
* Does a column wise reduction.
* The intuition is that there are as many global threads as there are columns
* Each global thread is responsible for a single element in the output vector
* This of course leads to a under-utilization of the GPU resources.
* For cases, where the number of columns is small, there can be unused SMs
*
* The template-ized version of this function is similar to what is found in
* NVIDIA CUB
* @param ReductionOp Type of the functor object that implements the
* reduction operation
* @param AssignmentOp Type of the functor object that is used to modify
* the value before writing it to its final location in global memory for each
* column
*/
template <typename ReductionOp, typename AssignmentOp, typename T>
__device__ void reduce_col(
T *g_idata, ///< input data stored in device memory (of size rows*cols)
T *g_odata, ///< output/temporary array store in device memory (of size
///rows*cols)
unsigned int rows, ///< rows in input and temporary/output arrays
unsigned int cols, ///< columns in input and temporary/output arrays
ReductionOp
reduction_op, ///< Reduction operation to perform (functor object)
AssignmentOp assignment_op, ///< Operation to perform before assigning this
///to its final location in global memory for
///each column
T initialValue) ///< initial value for the reduction variable
{
unsigned int global_tid = blockIdx.x * blockDim.x + threadIdx.x;
if (global_tid >= cols) {
return;
}
unsigned int i = global_tid;
unsigned int grid_size = cols;
T val = initialValue;
while (i < rows * cols) {
val = reduction_op(val, g_idata[i]);
i += grid_size;
}
g_odata[global_tid] = assignment_op(val);
}
/**
* Functor op for assignment op. This is a dummy/identity op.
*/
template <typename T>
struct IdentityOp {
__device__ __forceinline__ T operator()(T a) const { return a; }
};
/**
* Functor op for summation operation
*/
template <typename T>
struct SumOp {
__device__ __forceinline__ T operator()(T a, T b) const { return a + b; }
};
/**
* Do a summation over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stored in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
template <typename T>
__device__ void reduce_sum(T *g_idata, T *g_odata, unsigned int n) {
SumOp<T> op;
reduce<SumOp<T>, T>(g_idata, g_odata, n, op, (T)0.0);
}
extern "C" __global__ void reduce_sum_d(double *g_idata, double *g_odata,
unsigned int n) {
reduce_sum(g_idata, g_odata, n);
}
extern "C" __global__ void reduce_sum_f(float *g_idata, float *g_odata,
unsigned int n) {
reduce_sum(g_idata, g_odata, n);
}
/**
* Do a summation over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_row_sum(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
SumOp<T> op;
IdentityOp<T> aop;
reduce_row<SumOp<T>, IdentityOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
0.0);
}
extern "C" __global__ void reduce_row_sum_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_sum(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_row_sum_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_sum(g_idata, g_odata, rows, cols);
}
/**
* Do a summation over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_col_sum(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
SumOp<T> op;
IdentityOp<T> aop;
reduce_col<SumOp<T>, IdentityOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
(T)0.0);
}
extern "C" __global__ void reduce_col_sum_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_sum(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_col_sum_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_sum(g_idata, g_odata, rows, cols);
}
/**
* Functor op for max operation
*/
template <typename T>
struct MaxOp {
__device__ __forceinline__ T operator()(T a, T b) const { return fmax(a, b); }
};
template<>
struct MaxOp<float> {
__device__ __forceinline__ float operator()(float a, float b) const { return fmaxf(a, b); }
};
/**
* Do a max over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
template <typename T>
__device__ void reduce_max(T *g_idata, T *g_odata, unsigned int n) {
MaxOp<T> op;
reduce<MaxOp<T>, T>(g_idata, g_odata, n, op, -T_MAX(g_idata[0]));
}
extern "C" __global__ void reduce_max_d(double *g_idata, double *g_odata,
unsigned int n) {
reduce_max(g_idata, g_odata, n);
}
extern "C" __global__ void reduce_max_f(float *g_idata, float *g_odata,
unsigned int n) {
reduce_max(g_idata, g_odata, n);
}
/**
* Do a max over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_row_max(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
MaxOp<T> op;
IdentityOp<T> aop;
reduce_row<MaxOp<T>, IdentityOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
-T_MAX(g_idata[0]));
}
extern "C" __global__ void reduce_row_max_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_max(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_row_max_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_max(g_idata, g_odata, rows, cols);
}
/**
* Do a max over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_col_max(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
MaxOp<T> op;
IdentityOp<T> aop;
reduce_col<MaxOp<T>, IdentityOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
(T)-T_MAX(g_idata[0]));
}
extern "C" __global__ void reduce_col_max_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_max(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_col_max_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_max(g_idata, g_odata, rows, cols);
}
/**
* Functor op for min operation
*/
template <typename T>
struct MinOp {
__device__ __forceinline__ T operator()(T a, T b) const { return fmin(a, b); }
};
/**
* Do a min over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
template <typename T>
__device__ void reduce_min(T *g_idata, T *g_odata, unsigned int n) {
MinOp<T> op;
reduce<MinOp<T>, T>(g_idata, g_odata, n, op, T_MAX(g_idata[0]));
}
extern "C" __global__ void reduce_min_d(double *g_idata, double *g_odata,
unsigned int n) {
reduce_min(g_idata, g_odata, n);
}
extern "C" __global__ void reduce_min_f(float *g_idata, float *g_odata,
unsigned int n) {
reduce_min(g_idata, g_odata, n);
}
/**
* Do a min over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_row_min(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
MinOp<T> op;
IdentityOp<T> aop;
reduce_row<MinOp<T>, IdentityOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
T_MAX(g_idata[0]));
}
extern "C" __global__ void reduce_row_min_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_min(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_row_min_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_min(g_idata, g_odata, rows, cols);
}
/**
* Do a min over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_col_min(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
MinOp<T> op;
IdentityOp<T> aop;
reduce_col<MinOp<T>, IdentityOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
T_MAX(g_idata[0]));
}
extern "C" __global__ void reduce_col_min_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_min(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_col_min_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_min(g_idata, g_odata, rows, cols);
}
/**
* Functor op for product operation
*/
template <typename T>
struct ProductOp {
__device__ __forceinline__ T operator()(T a, T b) const { return a * b; }
};
/**
* Do a product over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
template <typename T>
__device__ void reduce_prod(T *g_idata, T *g_odata, unsigned int n) {
ProductOp<T> op;
reduce<ProductOp<T>, T>(g_idata, g_odata, n, op, (T)1.0);
}
extern "C" __global__ void reduce_prod_d(double *g_idata, double *g_odata,
unsigned int n) {
reduce_prod(g_idata, g_odata, n);
}
extern "C" __global__ void reduce_prod_f(float *g_idata, float *g_odata,
unsigned int n) {
reduce_prod(g_idata, g_odata, n);
}
/**
* Functor op for mean operation
*/
template <typename T>
struct MeanOp {
const long
_size; ///< Number of elements by which to divide to calculate mean
__device__ __forceinline__ MeanOp(long size) : _size(size) {}
__device__ __forceinline__ T operator()(T total) const {
return total / _size;
}
};
/**
* Do a mean over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_row_mean(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
SumOp<T> op;
MeanOp<T> aop(cols);
reduce_row<SumOp<T>, MeanOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
(T)0.0);
}
extern "C" __global__ void reduce_row_mean_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_mean(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_row_mean_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_mean(g_idata, g_odata, rows, cols);
}
/**
* Do a mean over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_col_mean(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
SumOp<T> op;
MeanOp<T> aop(rows);
reduce_col<SumOp<T>, MeanOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
0.0);
}
extern "C" __global__ void reduce_col_mean_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_mean(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_col_mean_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_mean(g_idata, g_odata, rows, cols);
}
/**
* Do an exp over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_exp(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = exp(A[index]);
}
}
extern "C" __global__ void matrix_exp_d(double *A, double *C,
unsigned int size) {
matrix_exp(A, C, size);
}
extern "C" __global__ void matrix_exp_f(float *A, float *C, unsigned int size) {
matrix_exp(A, C, size);
}
/**
* Do an sqrt over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_sqrt(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = sqrt(A[index]);
}
}
extern "C" __global__ void matrix_sqrt_d(double *A, double *C,
unsigned int size) {
matrix_sqrt(A, C, size);
}
extern "C" __global__ void matrix_sqrt_f(float *A, float *C, unsigned int size) {
matrix_sqrt(A, C, size);
}
/**
* Do an round over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_round(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = (T)llround(A[index]);
}
}
extern "C" __global__ void matrix_round_d(double *A, double *C,
unsigned int size) {
matrix_round(A, C, size);
}
extern "C" __global__ void matrix_round_f(float *A, float *C,
unsigned int size) {
matrix_round(A, C, size);
}
/**
* Do an abs over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_abs(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = (T)fabs(A[index]);
}
}
extern "C" __global__ void matrix_abs_d(double *A, double *C,
unsigned int size) {
matrix_abs(A, C, size);
}
extern "C" __global__ void matrix_abs_f(float *A, float *C, unsigned int size) {
matrix_abs(A, C, size);
}
/**
* Do an log over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_log(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = log(A[index]);
}
}
extern "C" __global__ void matrix_log_d(double *A, double *C,
unsigned int size) {
matrix_log(A, C, size);
}
extern "C" __global__ void matrix_log_f(float *A, float *C, unsigned int size) {
matrix_log(A, C, size);
}
/**
* Do an floor over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_floor(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = floor(A[index]);
}
}
extern "C" __global__ void matrix_floor_d(double *A, double *C,
unsigned int size) {
matrix_floor(A, C, size);
}
extern "C" __global__ void matrix_floor_f(float *A, float *C,
unsigned int size) {
matrix_floor(A, C, size);
}
/**
* Do an ceil over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_ceil(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = ceil(A[index]);
}
}
extern "C" __global__ void matrix_ceil_d(double *A, double *C,
unsigned int size) {
matrix_ceil(A, C, size);
}
extern "C" __global__ void matrix_ceil_f(float *A, float *C, unsigned int size) {
matrix_ceil(A, C, size);
}
/**
* Do an sin over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_sin(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = sin(A[index]);
}
}
extern "C" __global__ void matrix_sin_d(double *A, double *C,
unsigned int size) {
matrix_sin(A, C, size);
}
extern "C" __global__ void matrix_sin_f(float *A, float *C, unsigned int size) {
matrix_sin(A, C, size);
}
/**
* Do an sinh over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_sinh(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = sinh(A[index]);
}
}
extern "C" __global__ void matrix_sinh_d(double *A, double *C,
unsigned int size) {
matrix_sinh(A, C, size);
}
extern "C" __global__ void matrix_sinh_f(float *A, float *C, unsigned int size) {
matrix_sinh(A, C, size);
}
/**
* Do an cos over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_cos(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = cos(A[index]);
}
}
extern "C" __global__ void matrix_cos_d(double *A, double *C,
unsigned int size) {
matrix_cos(A, C, size);
}
extern "C" __global__ void matrix_cos_f(float *A, float *C, unsigned int size) {
matrix_cos(A, C, size);
}
/**
* Do an cosh over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_cosh(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = cosh(A[index]);
}
}
extern "C" __global__ void matrix_cosh_d(double *A, double *C,
unsigned int size) {
matrix_cosh(A, C, size);
}
extern "C" __global__ void matrix_cosh_f(float *A, float *C, unsigned int size) {
matrix_cosh(A, C, size);
}
/**
* Do an tan over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_tan(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = tan(A[index]);
}
}
extern "C" __global__ void matrix_tan_d(double *A, double *C,
unsigned int size) {
matrix_tan(A, C, size);
}
extern "C" __global__ void matrix_tan_f(float *A, float *C, unsigned int size) {
matrix_tan(A, C, size);
}
/**
* Do an tanh over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_tanh(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = tanh(A[index]);
}
}
extern "C" __global__ void matrix_tanh_d(double *A, double *C,
unsigned int size) {
matrix_tanh(A, C, size);
}
extern "C" __global__ void matrix_tanh_f(float *A, float *C, unsigned int size) {
matrix_tanh(A, C, size);
}
/**
* Do an asin over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_asin(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = asin(A[index]);
}
}
extern "C" __global__ void matrix_asin_d(double *A, double *C,
unsigned int size) {
matrix_asin(A, C, size);
}
extern "C" __global__ void matrix_asin_f(float *A, float *C, unsigned int size) {
matrix_asin(A, C, size);
}
/**
* Do an acos over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_acos(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = acos(A[index]);
}
}
extern "C" __global__ void matrix_acos_d(double *A, double *C,
unsigned int size) {
matrix_acos(A, C, size);
}
extern "C" __global__ void matrix_acos_f(float *A, float *C, unsigned int size) {
matrix_acos(A, C, size);
}
/**
* Do an atan over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_atan(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = atan(A[index]);
}
}
extern "C" __global__ void matrix_atan_d(double *A, double *C,
unsigned int size) {
matrix_atan(A, C, size);
}
extern "C" __global__ void matrix_atan_f(float *A, float *C, unsigned int size) {
matrix_atan(A, C, size);
}
/**
* Do an sign over all the elements of a matrix
* Assign -1, 0 or 1 depending on the element being negative, 0 or positive
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_sign(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
if (A[index] == 0.0) {
C[index] = 0.0;
} else {
C[index] = copysign(1.0, A[index]);
}
}
}
extern "C" __global__ void matrix_sign_d(double *A, double *C,
unsigned int size) {
matrix_sign(A, C, size);
}
extern "C" __global__ void matrix_sign_f(float *A, float *C, unsigned int size) {
matrix_sign(A, C, size);
} | b8657d49b2b5a0125d68fec5480351b0293019c7.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
#include <cfloat>
#include <cmath>
extern "C" __global__ void double2float_f(double *A, float *ret, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
// TODO: Use __double2float_rd or __double2float_rn or __double2float_ru or __double2float_rz after
ret[tid] = (float)A[tid];
}
}
extern "C" __global__ void float2double_f(float *A, double *ret, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N) {
ret[tid] = (double)A[tid];
}
}
/**
* Performs a slice operation where the input matrix is sparse and the output
* matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input
* matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
template <typename T>
__device__ void slice_sparse_dense_row(T *inVal, int *inRowPtr, int *colInd,
T *ret, int rl, int ru, int cl, int cu,
int retClen) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int rowIndex = index + rl;
if (rowIndex <= ru) {
/*
* TODO: Alternative approach: use dynamic parallelism. We are skipping this
*for now to avoid
* the complexity of two-step separate compilation and linking process.
*
* extern "C"
* __global__ void slice_sparse_dense_row_helper(double* inVal, int*
*inRowPtr, int* colInd, double* ret,
* int rl, int ru, int cl, int cu, int retClen, int start, int end, int
*index) {
* int i = blockIdx.x * blockDim.x + threadIdx.x + start;
* // Only slice if the index falls into the given range
* if(i < end && cl <= colInd[i] && colInd[i] <= cu) {
* ret[ index*retClen + (colInd[i] - cl) ] = inVal[i];
* }
* }
*
* int size = inRowPtr[rowIndex+1] - inRowPtr[rowIndex];
* double numThreads = (double)min(size, MAX_NUM_THREADS_CHILD_KERNEL);
* slice_sparse_dense_row_helper<<< ceil(numThreads/
*MAX_NUM_THREADS_CHILD_KERNEL), MAX_NUM_THREADS_CHILD_KERNEL>>>(inVal, inRowPtr,
*colInd, ret,
* rl, ru, cl, cu, retClen, inRowPtr[rowIndex],
*inRowPtr[rowIndex+1], index);
*
* Two-step compilation and linking process in JCudaKernels's constructor:
* cuLinkAddFile(linkState, CUjitInputType.CU_JIT_INPUT_LIBRARY,
*"/usr/local/cuda/lib64/libcudadevrt.a", jitOptions);
*/
// Iterate over elements of the row 'rowIndex'.
for (int i = inRowPtr[rowIndex]; i < inRowPtr[rowIndex + 1]; i++) {
// Only slice if the index falls into the given range
if (cl <= colInd[i] && colInd[i] <= cu) {
ret[index * retClen + (colInd[i] - cl)] = inVal[i];
}
}
}
}
extern "C" __global__ void slice_sparse_dense_row_d(double *inVal, int *inRowPtr,
int *colInd, double *ret,
int rl, int ru, int cl,
int cu, int retClen) {
slice_sparse_dense_row(inVal, inRowPtr, colInd, ret, rl, ru, cl, cu, retClen);
}
extern "C" __global__ void slice_sparse_dense_row_f(float *inVal, int *inRowPtr,
int *colInd, float *ret,
int rl, int ru, int cl,
int cu, int retClen) {
slice_sparse_dense_row(inVal, inRowPtr, colInd, ret, rl, ru, cl, cu, retClen);
}
/**
* Performs a slice operation where the input matrix is sparse and the output
* matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input
* matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
template <typename T>
__device__ void slice_sparse_dense_nnz(T *inVal, int *inRowPtr, int *colInd,
T *ret, int rl, int ru, int cl, int cu,
int retClen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i = tid + inRowPtr[rl];
// Only slice if the index falls into the given range
if (i < inRowPtr[ru + 1] && cl <= colInd[i] && colInd[i] <= cu) {
// Find the row index for corresponding non-zero value 'i'.
int rowIndex = rl;
while (inRowPtr[rowIndex + 1] <= i) {
rowIndex++;
}
ret[(rowIndex - rl) * retClen + (colInd[i] - cl)] = inVal[i];
}
}
extern "C" __global__ void slice_sparse_dense_nnz_d(double *inVal, int *inRowPtr,
int *colInd, double *ret,
int rl, int ru, int cl,
int cu, int retClen) {
slice_sparse_dense_nnz(inVal, inRowPtr, colInd, ret, rl, ru, cl, cu, retClen);
}
extern "C" __global__ void slice_sparse_dense_nnz_f(float *inVal, int *inRowPtr,
int *colInd, float *ret,
int rl, int ru, int cl,
int cu, int retClen) {
slice_sparse_dense_nnz(inVal, inRowPtr, colInd, ret, rl, ru, cl, cu, retClen);
}
/**
* Performs a slice operation where the input matrix is dense and the output
* matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
template <typename T>
__device__ void slice_dense_dense(T *in, T *ret, int rl, int ru, int cl, int cu,
int inClen, int retRlen, int retClen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / retClen;
int iy = tid % retClen;
if (ix < retRlen && iy < retClen) {
int inIndex = (ix + rl) * inClen + cl + iy;
ret[tid] = in[inIndex];
}
}
extern "C" __global__ void slice_dense_dense_d(double *in, double *ret, int rl,
int ru, int cl, int cu,
int inClen, int retRlen,
int retClen) {
slice_dense_dense(in, ret, rl, ru, cl, cu, inClen, retRlen, retClen);
}
extern "C" __global__ void slice_dense_dense_f(float *in, float *ret, int rl,
int ru, int cl, int cu,
int inClen, int retRlen,
int retClen) {
slice_dense_dense(in, ret, rl, ru, cl, cu, inClen, retRlen, retClen);
}
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
template <typename T>
__device__ void copy_u2l_dense(T *ret, int dim, int N) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / dim;
int iy = tid % dim;
int id_dest = iy * dim + ix;
if (iy > ix && id_dest < N) {
// TODO: Potential to reduce the number of threads by half
int id_src = tid;
ret[id_dest] = ret[id_src];
}
}
extern "C" __global__ void copy_u2l_dense_d(double *ret, int dim, int N) {
copy_u2l_dense(ret, dim, N);
}
extern "C" __global__ void copy_u2l_dense_f(float *ret, int dim, int N) {
copy_u2l_dense(ret, dim, N);
}
// Use this method in templates to fetch the maximum value for a given datatype
template <typename T>
__forceinline__ __device__ T T_MAX(T x) {
return (T)DBL_MAX;
}
template <>
__forceinline__ __device__ float T_MAX(float x) {
return FLT_MAX;
}
template <>
__forceinline__ __device__ double T_MAX(double x) {
return DBL_MAX;
}
// op = {0=plus, 1=minus, 2=multiply, 3=divide, 4=power,
// 5=less, 6=lessequal, 7=greater, 8=greaterequal, 9=equal, 10=notequal,
// 11=min, 12=max, 13=and, 14=or, 15=minus1multiply, 16=minusnz,
// 17=modulus, 18=integer division}
template <typename T>
__forceinline__ __device__ T binaryOp(T x, T y, int op) {
switch (op) {
case 0:
return x + y;
case 1:
return x - y;
case 2:
return x * y;
case 3:
return x / y;
case 4:
return pow(x, y);
case 5:
return (x < y) == 0 ? 0.0 : 1.0;
case 6:
return (x <= y) == 0 ? 0.0 : 1.0;
case 7:
return (x > y) == 0 ? 0.0 : 1.0;
case 8:
return (x >= y) == 0 ? 0.0 : 1.0;
case 9:
return (x == y) == 0 ? 0.0 : 1.0;
case 10:
return (x != y) == 0 ? 0.0 : 1.0;
case 11:
return min(x, y);
case 12:
return max(x, y);
case 13:
return ((int)llrint(x) & (int)llrint(y)) == 0 ? 0.0 : 1.0;
case 14:
return ((int)llrint(x) | (int)llrint(y)) == 0 ? 0.0 : 1.0;
case 15:
return 1 - x * y;
case 16:
return (x != 0.0 ? x - y : 0.0);
case 17: {
if (y == 0.0 || y == -0.0) {
return nan("");
}
T v = x / y;
// Check for v being NaN (v != v) or if it is infinity
if (isnan(v) || isinf(v)) {
return v;
} else {
v = floor(v);
}
return x - v * y;
}
case 18: {
T v = x / y;
if (isnan(v) || isinf(v)) {
return v;
} else {
return floor(v);
}
}
default:
return T_MAX(x);
}
}
/**
* Performs forward pass for relu: ret = max(A, 0)
*
* @param A input array allocated on the GPU
* @param ret output array allocated on the GPU
* @param rlen the number of rows
* @param clen the number of columns
*/
template <typename T>
__device__ void relu(T *A, T *ret, int rlen, int clen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if (ix < rlen && iy < clen) {
ret[tid] = max(0.0, A[tid]);
}
}
extern "C" __global__ void relu_d(double *A, double *ret, int rlen, int clen) {
relu(A, ret, rlen, clen);
}
extern "C" __global__ void relu_f(float *A, float *ret, int rlen, int clen) {
relu(A, ret, rlen, clen);
}
/**
* This method computes the backpropagation errors for previous layer of relu operation
*
* @param X input activation array allocated on the GPU
* @param dout errors from previous layer
* @param ret output array allocated on the GPU
* @param rlen the number of rows
* @param clen the number of columns
*/
template <typename T>
__device__ void relu_backward(T *X, T *dout, T *ret, int rlen, int clen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if (ix < rlen && iy < clen) {
ret[tid] = X[tid] > 0 ? dout[tid] : 0;
}
}
extern "C" __global__ void relu_backward_d(double *X, double *dout, double *ret,
int rlen, int clen) {
relu_backward(X, dout, ret, rlen, clen);
}
extern "C" __global__ void relu_backward_f(float *X, float *dout, float *ret,
int rlen, int clen) {
relu_backward(X, dout, ret, rlen, clen);
}
/**
* Performs inplace addition: ret += input
*
* @param input rhs input array allocated on the GPU
* @param ret the input and output array allocated on the GPU
* @param rlen the number of rows
* @param clen the number of columns
*/
template <typename T>
__device__ void inplace_add(T *input, T *ret, int rlen, int clen) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if (ix < rlen && iy < clen) {
ret[tid] += input[tid];
}
}
extern "C" __global__ void inplace_add_d(double *input, double *ret, int rlen,
int clen) {
inplace_add(input, ret, rlen, clen);
}
extern "C" __global__ void inplace_add_f(float *input, float *ret, int rlen,
int clen) {
inplace_add(input, ret, rlen, clen);
}
// Performs the operation corresponding to the DML script:
// ones = matrix(1, rows=1, cols=Hout*Wout)
// output = input + matrix(bias %*% ones, rows=1, cols=F*Hout*Wout)
// This operation is often followed by conv2d and hence we have introduced
// bias_add(input, bias) built-in function
template <typename T>
__device__ void bias_add(T *input, T *bias, T *ret, int rlen, int clen,
int PQ) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if (ix < rlen && iy < clen) {
int biasIndex = iy / PQ;
ret[tid] = input[tid] + bias[biasIndex];
}
}
extern "C" __global__ void bias_add_d(double *input, double *bias, double *ret,
int rlen, int clen, int PQ) {
bias_add(input, bias, ret, rlen, clen, PQ);
}
extern "C" __global__ void bias_add_f(float *input, float *bias, float *ret,
int rlen, int clen, int PQ) {
bias_add(input, bias, ret, rlen, clen, PQ);
}
// Performs the operation "ret <- A + alpha*B", where B is a vector
template <typename T>
__device__ void daxpy_matrix_vector(T *A, T *B, double alpha, T *ret, int rlenA,
int clenA, int rlenB, int clenB) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clenA;
int iy = tid % clenA;
if (ix < rlenA && iy < clenA) {
int index = ix * clenA + iy;
if (rlenB == 1) {
ret[index] = A[index] + alpha * B[iy];
} else {
ret[index] = A[index] + alpha * B[ix];
}
}
}
extern "C" __global__ void daxpy_matrix_vector_d(double *A, double *B,
double alpha, double *ret,
int rlenA, int clenA, int rlenB,
int clenB) {
daxpy_matrix_vector(A, B, alpha, ret, rlenA, clenA, rlenB, clenB);
}
extern "C" __global__ void daxpy_matrix_vector_f(float *A, float *B,
double alpha, float *ret,
int rlenA, int clenA, int rlenB,
int clenB) {
daxpy_matrix_vector(A, B, alpha, ret, rlenA, clenA, rlenB, clenB);
}
// Performs similar operation as bias_add except elementwise multiplication
// instead of add
template <typename T>
__device__ void bias_multiply(T *input, T *bias, T *ret, int rlen, int clen,
int PQ) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / clen;
int iy = tid % clen;
if (ix < rlen && iy < clen) {
int biasIndex = iy / PQ;
ret[tid] = input[tid] * bias[biasIndex];
}
}
extern "C" __global__ void bias_multiply_d(double *input, double *bias,
double *ret, int rlen, int clen,
int PQ) {
bias_multiply(input, bias, ret, rlen, clen, PQ);
}
extern "C" __global__ void bias_multiply_f(float *input, float *bias, float *ret,
int rlen, int clen, int PQ) {
bias_multiply(input, bias, ret, rlen, clen, PQ);
}
/**
* Performs a binary cellwise arithmetic operation on 2 matrices.
* Either both matrices are of equal size or one of them is a vector or both
* are.
* @param A first input matrix allocated on GPU
* @param B second input matrix allocated on GPU
* @param C output allocated on GPU
* @param maxRlen maximum of the row lengths of A and B
* @param maxClen maximum of the column lengths of A and B
* @param vectorAStatus if A is a row vector, column vector or neither
* @param vectorBStatus if B is a row vector, column vector or neither
* @param op the numeric code of the arithmetic operation to
* perform
*
*/
template <typename T>
__device__ void matrix_matrix_cellwise_op(T *A, T *B, T *C, int maxRlen,
int maxClen, int vectorAStatus,
int vectorBStatus, int op) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / maxClen;
int iy = tid % maxClen;
if (ix < maxRlen && iy < maxClen) {
int outIndex = ix * maxClen + iy;
int aIndex = outIndex;
int bIndex = outIndex;
if (vectorAStatus == 1)
aIndex = ix; // clen == 1
else if (vectorAStatus == 2)
aIndex = iy; // rlen == 1
if (vectorBStatus == 1)
bIndex = ix; // clen == 1
else if (vectorBStatus == 2)
bIndex = iy; // rlen == 1
C[outIndex] = binaryOp(A[aIndex], B[bIndex], op);
// printf("C[%d] = A[%d](%f) B[%d](%f) (%d %d)\n", outIndex, aIndex,
// A[aIndex], bIndex, B[bIndex], (ix+1), (iy+1));
__syncthreads();
}
}
extern "C" __global__ void matrix_matrix_cellwise_op_d(
double *A, double *B, double *C, int maxRlen, int maxClen,
int vectorAStatus, int vectorBStatus, int op) {
matrix_matrix_cellwise_op(A, B, C, maxRlen, maxClen, vectorAStatus,
vectorBStatus, op);
}
extern "C" __global__ void matrix_matrix_cellwise_op_f(
float *A, float *B, float *C, int maxRlen, int maxClen, int vectorAStatus,
int vectorBStatus, int op) {
matrix_matrix_cellwise_op(A, B, C, maxRlen, maxClen, vectorAStatus,
vectorBStatus, op);
}
/**
* Performs an arithmetic operation between a matrix and a scalar.
* C = s op A or C = A op s (where A is the matrix, s is the scalar and op is
* the operation)
* @param A input matrix allocated on GPU
* @param scalar scalar input
* @param C output matrix allocated on GPU
* @param size number of elements in matrix A
* @param op number code of the arithmetic operation to perform
* @param isLeftScalar whether the scalar is on the left side
*/
template <typename T>
__device__ void matrix_scalar_op(T *A, T scalar, T *C, int size, int op,
int isLeftScalar) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
if (isLeftScalar) {
C[index] = binaryOp(scalar, A[index], op);
} else {
C[index] = binaryOp(A[index], scalar, op);
}
}
__syncthreads();
}
extern "C" __global__ void matrix_scalar_op_d(double *A, double scalar,
double *C, int size, int op,
int isLeftScalar) {
matrix_scalar_op(A, scalar, C, size, op, isLeftScalar);
}
extern "C" __global__ void matrix_scalar_op_f(float *A, double scalar, float *C,
int size, int op,
int isLeftScalar) {
matrix_scalar_op(A, (float)scalar, C, size, op, isLeftScalar);
}
/**
* Sets all elements (fills) of a double array of given length with a given
* scalar value
* @param A array to be filled
* @param scalar value to fill array with
* @param lenA length of array A
*/
template <typename T>
__device__ void fill(T *A, T scalar, int lenA) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < lenA) {
A[index] = scalar;
}
}
extern "C" __global__ void fill_d(double *A, double scalar, int lenA) {
fill(A, scalar, lenA);
}
extern "C" __global__ void fill_f(float *A, double scalar, int lenA) {
fill(A, (float)scalar, lenA);
}
/**
* Appends Matrix B to the right side of Matrix A into a new matrix C
* | 1 2 3 4 | | 8 8 8 | | 1 2 3 4 8 8 8 |
* cbind ( | 9 8 7 6 | , | 7 7 7 | ) = | 9 8 7 6 7 7 7 |
* | 4 3 2 1 | | 9 9 9 | | 4 3 2 1 9 9 9 |
* @param A input matrix A allocated on the GPU
* @param B input matrix B allocated on the GPU
* @param C input matrix C allocated on the GPU
* @param rowsA rows in A
* @param colsA columns in A
* @param rowsB rows in B
* @param colsB columns in B
*/
template <typename T>
__device__ void cbind(T *A, T *B, T *C, int rowsA, int colsA, int rowsB,
int colsB) {
int maxClen = max(colsA, colsB);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / maxClen;
int iy = tid % maxClen;
int colsC = colsA + colsB;
int rowsC = rowsA;
// Copy an element of A into C into the appropriate location
if (ix < rowsA && iy < colsA) {
T elemA = A[ix * colsA + iy];
C[ix * colsC + iy] = elemA;
}
// Copy an element of B into C into the appropriate location
if (ix < rowsB && iy < colsB) {
T elemB = B[ix * colsB + iy];
C[ix * colsC + (iy + colsA)] = elemB;
}
}
extern "C" __global__ void cbind_d(double *A, double *B, double *C, int rowsA,
int colsA, int rowsB, int colsB) {
cbind(A, B, C, rowsA, colsA, rowsB, colsB);
}
extern "C" __global__ void cbind_f(float *A, float *B, float *C, int rowsA,
int colsA, int rowsB, int colsB) {
cbind(A, B, C, rowsA, colsA, rowsB, colsB);
}
/**
* Appends Matrix B to the bottom of Matrix A into a new matrix C
* | 2 3 4 | | 8 8 8 | | 2 3 4 |
* rbind ( | 8 7 6 | , | 7 7 7 | ) = | 8 7 6 |
* | 3 2 1 | | 3 2 1 |
| 8 8 8 |
| 7 7 7 |
* @param A input matrix A allocated on the GPU
* @param B input matrix B allocated on the GPU
* @param C input matrix C allocated on the GPU
* @param rowsA rows in A
* @param colsA columns in A
* @param rowsB rows in B
* @param colsB columns in B
*/
template <typename T>
__device__ void rbind(T *A, T *B, T *C, int rowsA, int colsA, int rowsB,
int colsB) {
int maxClen = max(colsA, colsB);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int ix = tid / maxClen;
int iy = tid % maxClen;
int rowsC = rowsA + rowsB;
int colsC = colsA;
// Copy an element of A into C into the appropriate location
if (ix < rowsA && iy < colsA) {
T elemA = A[ix * colsA + iy];
C[ix * colsC + iy] = elemA;
}
// Copy an element of B into C into the appropriate location
if (ix < rowsB && iy < colsB) {
T elemB = B[ix * colsB + iy];
C[(ix + rowsA) * colsC + iy] = elemB;
}
}
extern "C" __global__ void rbind_d(double *A, double *B, double *C, int rowsA,
int colsA, int rowsB, int colsB) {
rbind(A, B, C, rowsA, colsA, rowsB, colsB);
}
extern "C" __global__ void rbind_f(float *A, float *B, float *C, int rowsA,
int colsA, int rowsB, int colsB) {
rbind(A, B, C, rowsA, colsA, rowsB, colsB);
}
/**
* Does a reduce operation over all elements of the array.
* This method has been adapted from the Reduction sample in the NVIDIA CUDA
* Samples (v8.0)
* and the Reduction example available through jcuda.org
* When invoked initially, all blocks partly compute the reduction operation
* over the entire array
* and writes it to the output/temporary array. A second invokation needs to
* happen to get the
* reduced value.
* The number of threads, blocks and amount of shared memory is calculated in a
* specific way.
* Please refer to the NVIDIA CUDA Sample or the SystemML code that invokes this
* method to see
* how its done.
* The template-ized version of this function is similar to what is found in
* NVIDIA CUB
*
* @param ReductionOp Type of the functor object that implements the
* reduction operation
*/
template <typename ReductionOp, typename T>
__device__ void reduce(
T *g_idata, ///< input data stored in device memory (of size n)
T *g_odata, ///< output/temporary array stored in device memory (of size n)
unsigned int n, ///< size of the input and temporary/output arrays
ReductionOp
reduction_op, ///< Reduction operation to perform (functor object)
T initialValue) ///< initial value for the reduction variable
{
// extern __shared__ T sdata[];
extern __shared__ __align__(sizeof(T)) unsigned char my_sdata[];
T *sdata = reinterpret_cast<T *>(my_sdata);
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
unsigned int gridSize = blockDim.x * 2 * gridDim.x;
T v = initialValue;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n) {
v = reduction_op(v, g_idata[i]);
// ensure we don't read out of bounds
if (i + blockDim.x < n) v = reduction_op(v, g_idata[i + blockDim.x]);
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = v;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 1024) {
if (tid < 512) {
sdata[tid] = v = reduction_op(v, sdata[tid + 512]);
}
__syncthreads();
}
if (blockDim.x >= 512) {
if (tid < 256) {
sdata[tid] = v = reduction_op(v, sdata[tid + 256]);
}
__syncthreads();
}
if (blockDim.x >= 256) {
if (tid < 128) {
sdata[tid] = v = reduction_op(v, sdata[tid + 128]);
}
__syncthreads();
}
if (blockDim.x >= 128) {
if (tid < 64) {
sdata[tid] = v = reduction_op(v, sdata[tid + 64]);
}
__syncthreads();
}
if (tid < 32) {
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile T *smem = sdata;
if (blockDim.x >= 64) {
smem[tid] = v = reduction_op(v, smem[tid + 32]);
}
if (blockDim.x >= 32) {
smem[tid] = v = reduction_op(v, smem[tid + 16]);
}
if (blockDim.x >= 16) {
smem[tid] = v = reduction_op(v, smem[tid + 8]);
}
if (blockDim.x >= 8) {
smem[tid] = v = reduction_op(v, smem[tid + 4]);
}
if (blockDim.x >= 4) {
smem[tid] = v = reduction_op(v, smem[tid + 2]);
}
if (blockDim.x >= 2) {
smem[tid] = v = reduction_op(v, smem[tid + 1]);
}
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
/**
* Does a reduce (sum) over each row of the array.
* This kernel must be launched with as many blocks as there are rows.
* The intuition for this kernel is that each block does a reduction over a
* single row.
* The maximum number of blocks that can launched (as of compute capability 3.0)
* is 2^31 - 1
* This works out fine for SystemML, since the maximum elements in a Java array
* can be 2^31 - c (some small constant)
* If the matrix is "fat" and "short", i.e. there are small number of rows and a
* large number of columns,
* there could be under-utilization of the hardware.
* The template-ized version of this function is similar to what is found in
* NVIDIA CUB
* @param ReductionOp Type of the functor object that implements the
* reduction operation
* @param AssignmentOp Type of the functor object that is used to modify
* the value before writing it to its final location in global memory for each
* row
*/
template <typename ReductionOp, typename AssignmentOp, typename T>
__device__ void reduce_row(
T *g_idata, ///< input data stored in device memory (of size rows*cols)
T *g_odata, ///< output/temporary array store in device memory (of size
///rows*cols)
unsigned int rows, ///< rows in input and temporary/output arrays
unsigned int cols, ///< columns in input and temporary/output arrays
ReductionOp
reduction_op, ///< Reduction operation to perform (functor object)
AssignmentOp assignment_op, ///< Operation to perform before assigning this
///to its final location in global memory for
///each row
T initialValue) { ///< initial value for the reduction variable
// extern __shared__ T sdata[];
extern __shared__ __align__(sizeof(T)) unsigned char my_sdata[];
T *sdata = reinterpret_cast<T *>(my_sdata);
// one block per row
if (blockIdx.x >= rows) {
return;
}
unsigned int block = blockIdx.x;
unsigned int tid = threadIdx.x;
unsigned int i = tid;
unsigned int block_offset = block * cols;
T v = initialValue;
while (i < cols) {
v = reduction_op(v, g_idata[block_offset + i]);
i += blockDim.x;
}
// each thread puts its local sum into shared memory
sdata[tid] = v;
__syncthreads();
// do reduction in shared mem
if (blockDim.x >= 1024) {
if (tid < 512) {
sdata[tid] = v = reduction_op(v, sdata[tid + 512]);
}
__syncthreads();
}
if (blockDim.x >= 512) {
if (tid < 256) {
sdata[tid] = v = reduction_op(v, sdata[tid + 256]);
}
__syncthreads();
}
if (blockDim.x >= 256) {
if (tid < 128) {
sdata[tid] = v = reduction_op(v, sdata[tid + 128]);
}
__syncthreads();
}
if (blockDim.x >= 128) {
if (tid < 64) {
sdata[tid] = v = reduction_op(v, sdata[tid + 64]);
}
__syncthreads();
}
if (tid < 32) {
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile T *smem = sdata;
if (blockDim.x >= 64) {
smem[tid] = v = reduction_op(v, smem[tid + 32]);
}
if (blockDim.x >= 32) {
smem[tid] = v = reduction_op(v, smem[tid + 16]);
}
if (blockDim.x >= 16) {
smem[tid] = v = reduction_op(v, smem[tid + 8]);
}
if (blockDim.x >= 8) {
smem[tid] = v = reduction_op(v, smem[tid + 4]);
}
if (blockDim.x >= 4) {
smem[tid] = v = reduction_op(v, smem[tid + 2]);
}
if (blockDim.x >= 2) {
smem[tid] = v = reduction_op(v, smem[tid + 1]);
}
}
// write result for this block to global mem, modify it with assignment op
if (tid == 0) g_odata[block] = assignment_op(sdata[0]);
}
/**
* Does a column wise reduction.
* The intuition is that there are as many global threads as there are columns
* Each global thread is responsible for a single element in the output vector
* This of course leads to a under-utilization of the GPU resources.
* For cases, where the number of columns is small, there can be unused SMs
*
* The template-ized version of this function is similar to what is found in
* NVIDIA CUB
* @param ReductionOp Type of the functor object that implements the
* reduction operation
* @param AssignmentOp Type of the functor object that is used to modify
* the value before writing it to its final location in global memory for each
* column
*/
template <typename ReductionOp, typename AssignmentOp, typename T>
__device__ void reduce_col(
T *g_idata, ///< input data stored in device memory (of size rows*cols)
T *g_odata, ///< output/temporary array store in device memory (of size
///rows*cols)
unsigned int rows, ///< rows in input and temporary/output arrays
unsigned int cols, ///< columns in input and temporary/output arrays
ReductionOp
reduction_op, ///< Reduction operation to perform (functor object)
AssignmentOp assignment_op, ///< Operation to perform before assigning this
///to its final location in global memory for
///each column
T initialValue) ///< initial value for the reduction variable
{
unsigned int global_tid = blockIdx.x * blockDim.x + threadIdx.x;
if (global_tid >= cols) {
return;
}
unsigned int i = global_tid;
unsigned int grid_size = cols;
T val = initialValue;
while (i < rows * cols) {
val = reduction_op(val, g_idata[i]);
i += grid_size;
}
g_odata[global_tid] = assignment_op(val);
}
/**
* Functor op for assignment op. This is a dummy/identity op.
*/
template <typename T>
struct IdentityOp {
__device__ __forceinline__ T operator()(T a) const { return a; }
};
/**
* Functor op for summation operation
*/
template <typename T>
struct SumOp {
__device__ __forceinline__ T operator()(T a, T b) const { return a + b; }
};
/**
* Do a summation over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stored in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
template <typename T>
__device__ void reduce_sum(T *g_idata, T *g_odata, unsigned int n) {
SumOp<T> op;
reduce<SumOp<T>, T>(g_idata, g_odata, n, op, (T)0.0);
}
extern "C" __global__ void reduce_sum_d(double *g_idata, double *g_odata,
unsigned int n) {
reduce_sum(g_idata, g_odata, n);
}
extern "C" __global__ void reduce_sum_f(float *g_idata, float *g_odata,
unsigned int n) {
reduce_sum(g_idata, g_odata, n);
}
/**
* Do a summation over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_row_sum(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
SumOp<T> op;
IdentityOp<T> aop;
reduce_row<SumOp<T>, IdentityOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
0.0);
}
extern "C" __global__ void reduce_row_sum_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_sum(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_row_sum_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_sum(g_idata, g_odata, rows, cols);
}
/**
* Do a summation over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_col_sum(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
SumOp<T> op;
IdentityOp<T> aop;
reduce_col<SumOp<T>, IdentityOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
(T)0.0);
}
extern "C" __global__ void reduce_col_sum_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_sum(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_col_sum_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_sum(g_idata, g_odata, rows, cols);
}
/**
* Functor op for max operation
*/
template <typename T>
struct MaxOp {
__device__ __forceinline__ T operator()(T a, T b) const { return fmax(a, b); }
};
template<>
struct MaxOp<float> {
__device__ __forceinline__ float operator()(float a, float b) const { return fmaxf(a, b); }
};
/**
* Do a max over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
template <typename T>
__device__ void reduce_max(T *g_idata, T *g_odata, unsigned int n) {
MaxOp<T> op;
reduce<MaxOp<T>, T>(g_idata, g_odata, n, op, -T_MAX(g_idata[0]));
}
extern "C" __global__ void reduce_max_d(double *g_idata, double *g_odata,
unsigned int n) {
reduce_max(g_idata, g_odata, n);
}
extern "C" __global__ void reduce_max_f(float *g_idata, float *g_odata,
unsigned int n) {
reduce_max(g_idata, g_odata, n);
}
/**
* Do a max over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_row_max(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
MaxOp<T> op;
IdentityOp<T> aop;
reduce_row<MaxOp<T>, IdentityOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
-T_MAX(g_idata[0]));
}
extern "C" __global__ void reduce_row_max_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_max(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_row_max_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_max(g_idata, g_odata, rows, cols);
}
/**
* Do a max over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_col_max(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
MaxOp<T> op;
IdentityOp<T> aop;
reduce_col<MaxOp<T>, IdentityOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
(T)-T_MAX(g_idata[0]));
}
extern "C" __global__ void reduce_col_max_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_max(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_col_max_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_max(g_idata, g_odata, rows, cols);
}
/**
* Functor op for min operation
*/
template <typename T>
struct MinOp {
__device__ __forceinline__ T operator()(T a, T b) const { return fmin(a, b); }
};
/**
* Do a min over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
template <typename T>
__device__ void reduce_min(T *g_idata, T *g_odata, unsigned int n) {
MinOp<T> op;
reduce<MinOp<T>, T>(g_idata, g_odata, n, op, T_MAX(g_idata[0]));
}
extern "C" __global__ void reduce_min_d(double *g_idata, double *g_odata,
unsigned int n) {
reduce_min(g_idata, g_odata, n);
}
extern "C" __global__ void reduce_min_f(float *g_idata, float *g_odata,
unsigned int n) {
reduce_min(g_idata, g_odata, n);
}
/**
* Do a min over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_row_min(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
MinOp<T> op;
IdentityOp<T> aop;
reduce_row<MinOp<T>, IdentityOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
T_MAX(g_idata[0]));
}
extern "C" __global__ void reduce_row_min_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_min(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_row_min_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_min(g_idata, g_odata, rows, cols);
}
/**
* Do a min over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_col_min(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
MinOp<T> op;
IdentityOp<T> aop;
reduce_col<MinOp<T>, IdentityOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
T_MAX(g_idata[0]));
}
extern "C" __global__ void reduce_col_min_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_min(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_col_min_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_min(g_idata, g_odata, rows, cols);
}
/**
* Functor op for product operation
*/
template <typename T>
struct ProductOp {
__device__ __forceinline__ T operator()(T a, T b) const { return a * b; }
};
/**
* Do a product over all elements of an array/matrix
* @param g_idata input data stored in device memory (of size n)
* @param g_odata output/temporary array stode in device memory (of size n)
* @param n size of the input and temporary/output arrays
*/
template <typename T>
__device__ void reduce_prod(T *g_idata, T *g_odata, unsigned int n) {
ProductOp<T> op;
reduce<ProductOp<T>, T>(g_idata, g_odata, n, op, (T)1.0);
}
extern "C" __global__ void reduce_prod_d(double *g_idata, double *g_odata,
unsigned int n) {
reduce_prod(g_idata, g_odata, n);
}
extern "C" __global__ void reduce_prod_f(float *g_idata, float *g_odata,
unsigned int n) {
reduce_prod(g_idata, g_odata, n);
}
/**
* Functor op for mean operation
*/
template <typename T>
struct MeanOp {
const long
_size; ///< Number of elements by which to divide to calculate mean
__device__ __forceinline__ MeanOp(long size) : _size(size) {}
__device__ __forceinline__ T operator()(T total) const {
return total / _size;
}
};
/**
* Do a mean over all rows of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size rows)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_row_mean(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
SumOp<T> op;
MeanOp<T> aop(cols);
reduce_row<SumOp<T>, MeanOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
(T)0.0);
}
extern "C" __global__ void reduce_row_mean_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_mean(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_row_mean_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_row_mean(g_idata, g_odata, rows, cols);
}
/**
* Do a mean over all columns of a matrix
* @param g_idata input matrix stored in device memory (of size rows * cols)
* @param g_odata output vector stored in device memory (of size cols)
* @param rows number of rows in input matrix
* @param cols number of columns in input matrix
*/
template <typename T>
__device__ void reduce_col_mean(T *g_idata, T *g_odata, unsigned int rows,
unsigned int cols) {
SumOp<T> op;
MeanOp<T> aop(rows);
reduce_col<SumOp<T>, MeanOp<T>, T>(g_idata, g_odata, rows, cols, op, aop,
0.0);
}
extern "C" __global__ void reduce_col_mean_d(double *g_idata, double *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_mean(g_idata, g_odata, rows, cols);
}
extern "C" __global__ void reduce_col_mean_f(float *g_idata, float *g_odata,
unsigned int rows,
unsigned int cols) {
reduce_col_mean(g_idata, g_odata, rows, cols);
}
/**
* Do an exp over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_exp(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = exp(A[index]);
}
}
extern "C" __global__ void matrix_exp_d(double *A, double *C,
unsigned int size) {
matrix_exp(A, C, size);
}
extern "C" __global__ void matrix_exp_f(float *A, float *C, unsigned int size) {
matrix_exp(A, C, size);
}
/**
* Do an sqrt over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_sqrt(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = sqrt(A[index]);
}
}
extern "C" __global__ void matrix_sqrt_d(double *A, double *C,
unsigned int size) {
matrix_sqrt(A, C, size);
}
extern "C" __global__ void matrix_sqrt_f(float *A, float *C, unsigned int size) {
matrix_sqrt(A, C, size);
}
/**
* Do an round over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_round(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = (T)llround(A[index]);
}
}
extern "C" __global__ void matrix_round_d(double *A, double *C,
unsigned int size) {
matrix_round(A, C, size);
}
extern "C" __global__ void matrix_round_f(float *A, float *C,
unsigned int size) {
matrix_round(A, C, size);
}
/**
* Do an abs over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_abs(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = (T)fabs(A[index]);
}
}
extern "C" __global__ void matrix_abs_d(double *A, double *C,
unsigned int size) {
matrix_abs(A, C, size);
}
extern "C" __global__ void matrix_abs_f(float *A, float *C, unsigned int size) {
matrix_abs(A, C, size);
}
/**
* Do an log over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_log(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = log(A[index]);
}
}
extern "C" __global__ void matrix_log_d(double *A, double *C,
unsigned int size) {
matrix_log(A, C, size);
}
extern "C" __global__ void matrix_log_f(float *A, float *C, unsigned int size) {
matrix_log(A, C, size);
}
/**
* Do an floor over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_floor(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = floor(A[index]);
}
}
extern "C" __global__ void matrix_floor_d(double *A, double *C,
unsigned int size) {
matrix_floor(A, C, size);
}
extern "C" __global__ void matrix_floor_f(float *A, float *C,
unsigned int size) {
matrix_floor(A, C, size);
}
/**
* Do an ceil over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_ceil(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = ceil(A[index]);
}
}
extern "C" __global__ void matrix_ceil_d(double *A, double *C,
unsigned int size) {
matrix_ceil(A, C, size);
}
extern "C" __global__ void matrix_ceil_f(float *A, float *C, unsigned int size) {
matrix_ceil(A, C, size);
}
/**
* Do an sin over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_sin(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = sin(A[index]);
}
}
extern "C" __global__ void matrix_sin_d(double *A, double *C,
unsigned int size) {
matrix_sin(A, C, size);
}
extern "C" __global__ void matrix_sin_f(float *A, float *C, unsigned int size) {
matrix_sin(A, C, size);
}
/**
* Do an sinh over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_sinh(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = sinh(A[index]);
}
}
extern "C" __global__ void matrix_sinh_d(double *A, double *C,
unsigned int size) {
matrix_sinh(A, C, size);
}
extern "C" __global__ void matrix_sinh_f(float *A, float *C, unsigned int size) {
matrix_sinh(A, C, size);
}
/**
* Do an cos over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_cos(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = cos(A[index]);
}
}
extern "C" __global__ void matrix_cos_d(double *A, double *C,
unsigned int size) {
matrix_cos(A, C, size);
}
extern "C" __global__ void matrix_cos_f(float *A, float *C, unsigned int size) {
matrix_cos(A, C, size);
}
/**
* Do an cosh over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_cosh(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = cosh(A[index]);
}
}
extern "C" __global__ void matrix_cosh_d(double *A, double *C,
unsigned int size) {
matrix_cosh(A, C, size);
}
extern "C" __global__ void matrix_cosh_f(float *A, float *C, unsigned int size) {
matrix_cosh(A, C, size);
}
/**
* Do an tan over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_tan(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = tan(A[index]);
}
}
extern "C" __global__ void matrix_tan_d(double *A, double *C,
unsigned int size) {
matrix_tan(A, C, size);
}
extern "C" __global__ void matrix_tan_f(float *A, float *C, unsigned int size) {
matrix_tan(A, C, size);
}
/**
* Do an tanh over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_tanh(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = tanh(A[index]);
}
}
extern "C" __global__ void matrix_tanh_d(double *A, double *C,
unsigned int size) {
matrix_tanh(A, C, size);
}
extern "C" __global__ void matrix_tanh_f(float *A, float *C, unsigned int size) {
matrix_tanh(A, C, size);
}
/**
* Do an asin over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_asin(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = asin(A[index]);
}
}
extern "C" __global__ void matrix_asin_d(double *A, double *C,
unsigned int size) {
matrix_asin(A, C, size);
}
extern "C" __global__ void matrix_asin_f(float *A, float *C, unsigned int size) {
matrix_asin(A, C, size);
}
/**
* Do an acos over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_acos(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = acos(A[index]);
}
}
extern "C" __global__ void matrix_acos_d(double *A, double *C,
unsigned int size) {
matrix_acos(A, C, size);
}
extern "C" __global__ void matrix_acos_f(float *A, float *C, unsigned int size) {
matrix_acos(A, C, size);
}
/**
* Do an atan over all the elements of a matrix
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_atan(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
C[index] = atan(A[index]);
}
}
extern "C" __global__ void matrix_atan_d(double *A, double *C,
unsigned int size) {
matrix_atan(A, C, size);
}
extern "C" __global__ void matrix_atan_f(float *A, float *C, unsigned int size) {
matrix_atan(A, C, size);
}
/**
* Do an sign over all the elements of a matrix
* Assign -1, 0 or 1 depending on the element being negative, 0 or positive
* @param A the input matrix (of length = size)
* @param C the pre-allocated output matrix (of length = size)
* @param siz the length of the input and output matrices
*/
template <typename T>
__device__ void matrix_sign(T *A, T *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
if (A[index] == 0.0) {
C[index] = 0.0;
} else {
C[index] = copysign(1.0, A[index]);
}
}
}
extern "C" __global__ void matrix_sign_d(double *A, double *C,
unsigned int size) {
matrix_sign(A, C, size);
}
extern "C" __global__ void matrix_sign_f(float *A, float *C, unsigned int size) {
matrix_sign(A, C, size);
} |
b6fe78af11219e804619621c25d5138084ff9260.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#define ALIVE 1
#define DEAD 0
# define CUDA_SAFE_CALL( call) { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
int termCheck(int *prev, int *next, int N, int M, int gen);
__global__ void kernel_update(int* t,int* t1,int N,int M);
__global__ void initdat(int *t, int *t1, int N, int M, time_t clock);
int main(int argc, char *argv[]) {
int N, /* rows of the grid */
M, /* columns of the grid */
Gens, /* amount of generations. */
perGens; /* checks termination per perGens generations. if perGens zero no termination check */
int *grid, *grid1;
int *gpu_grid, *gpu_grid1, *gpu_temp;
if ( argc != 5) {
printf("Error! Missing mandatory argument.\n");
return 1;
}
N = atoi(argv[1]); /* Getting rows amount */
M = atoi(argv[2]); /* Getting columns amount */
Gens = atoi(argv[3]); /* Getting Gens */
perGens = atoi(argv[4]);
if (Gens <= 0 || N < 0 || M < 0 || perGens < 0) {
printf("Please give positive values for rows/cols and Generations\n");
return 1;
}
int blockSize = 512;
int numBlocks = (N*M + blockSize - 1) / blockSize;
grid = (int*)malloc(sizeof(int)*N*M);
grid1 = (int*)malloc(sizeof(int)*N*M);
CUDA_SAFE_CALL(hipMalloc(&gpu_grid, N*M*sizeof(int)));
CUDA_SAFE_CALL(hipMalloc(&gpu_grid1, N*M*sizeof(int)));
/* Initialize random data */
hipLaunchKernelGGL(( initdat), dim3(numBlocks),dim3(blockSize), 0, 0, gpu_grid, gpu_grid1, N, M, time(NULL));
CUDA_SAFE_CALL(hipDeviceSynchronize());
for (int k = 1; k <= Gens; k++) {
hipLaunchKernelGGL(( kernel_update), dim3(numBlocks),dim3(blockSize), 0, 0, gpu_grid,gpu_grid1,N,M);
CUDA_SAFE_CALL(hipDeviceSynchronize());
if ( perGens ) {
CUDA_SAFE_CALL(hipMemcpy(grid, gpu_grid, N*M*sizeof(int), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMemcpy(grid1, gpu_grid1, N*M*sizeof(int), hipMemcpyDeviceToHost));
if ( k % perGens == 0) {
if (termCheck(grid, grid1, N, M, k)) {
hipFree(gpu_grid1);
hipFree(gpu_grid);
free(grid);
free(grid1);
return 0;
}
}
}
gpu_temp = gpu_grid;
gpu_grid = gpu_grid1;
gpu_grid1 = gpu_temp;
}
printf("Reached requested generations %d\n",Gens );
hipFree(gpu_grid1);
hipFree(gpu_grid);
free(grid);
free(grid1);
return 0;
}
int termCheck(int *prev, int *next, int N, int M, int gen){
int allDiff = 0;
int sum = 0;
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
if (prev[i*M+j] != next[i*M+j]) {
allDiff = 1;
}
sum += next[i*M+j];
}
}
if (!sum) {
printf("All cells are dead at generation %d\n", gen);
return 1;
}
else if (!allDiff) {
printf("Generation %d is the same with generation %d\n", gen, gen-1);
return 1;
}
return 0;
}
__global__ void kernel_update(int* t,int* t1,int N,int M){
int x = blockIdx.x * blockDim.x + threadIdx.x;
/*update starts*/
if (0 <= x && x < N*M ){ //if not out of bounds then..
int i,j,neighbours;
i = x / M;
j = x % M;
if (i+1 > N-1) {
if (j-1 < 0) {
/* eimai o bot_left */
neighbours = t[(i-1)*M+M-1] + t[(i-1)*M+j] + t[(i-1)*M+j+1] + t[i*M+j+1] + t[0*M+j+1] + t[0*M+j] + t[0*M+M-1] + t[i*M+M-1];
}
else if (j+1 > M-1) {
/* eimai o bot_right */
neighbours = t[(i-1)*M+j-1] + t[(i-1)*M+j] + t[(i-1)*M+0] + t[i*M+0] + t[0*M+0] + t[0*M+j] + t[0*M+j-1] + t[i*M+j-1];
}
else{
/* eimai aplos bot */
neighbours = t[(i-1)*M+j-1] + t[(i-1)*M+j] + t[(i-1)*M+j+1] + t[i*M+j+1] + t[0*M+j+1] + t[0*M+j] + t[0*M+j-1] + t[i*M+j-1];
}
}
else if (i-1 < 0) {
if (j-1 < 0) {
/* eimai o top_left */
neighbours = t[(N-1)*M+M-1] + t[(N-1)*M+j] + t[(N-1)*M+j+1] + t[i*M+j+1] + t[(i+1)*M+j+1] + t[(i+1)*M+j] + t[(i+1)*M+M-1] + t[i*M+M-1];
}
else if (j+1 > M-1) {
/* eimai o top_right */
neighbours = t[(N-1)*M+j-1] + t[(N-1)*M+j] + t[(N-1)*M+0] + t[i*M+0] + t[(i+1)*M+0] + t[(i+1)*M+j] + t[(i+1)*M+j-1] + t[i*M+j-1];
}
else{
/* eimai aplos top */
neighbours = t[(N-1)*M+j-1] + t[(N-1)*M+j] + t[(N-1)*M+j+1] + t[i*M+j+1] + t[(i+1)*M+j+1] + t[(i+1)*M+j] + t[(i+1)*M+j-1] + t[i*M+j-1];
}
}
else if (j-1 < 0) {
/* eimai aplos left */
neighbours = t[(i-1)*M+M-1] + t[(i-1)*M+j] + t[(i-1)*M+j+1] + t[i*M+j+1] + t[(i+1)*M+j+1] + t[(i+1)*M+j] + t[(i+1)*M+M-1] + t[i*M+M-1];
}
else if (j+1 > M-1) {
/* eimai aplos right */
neighbours = t[(i-1)*M+j-1] + t[(i-1)*M+j] + t[(i-1)*M+0] + t[i*M+0] + t[(i+1)*M+0] + t[(i+1)*M+j] + t[(i+1)*M+j-1] + t[i*M+j-1];
}
else{
/* oi geitones mou den peftoun eksw */
neighbours = t[(i-1)*M+j-1] + t[(i-1)*M+j] + t[(i-1)*M+j+1] + t[i*M+j+1] + t[(i+1)*M+j+1] + t[(i+1)*M+j] + t[(i+1)*M+j-1] + t[i*M+j-1];
}
/* kanones paixnidiou edw */
if (t[x] == ALIVE) {
if (neighbours <= 1 || neighbours >= 4) {
t1[x] = DEAD;
}
else{
t1[x] = ALIVE;
}
}
else if (t[x] == DEAD && neighbours == 3) {
t1[x] = ALIVE;
}
else{
t1[x] = DEAD;
}
}
}
__global__ void initdat(int *t, int *t1, int N, int M, time_t clock){
int x = blockIdx.x * blockDim.x + threadIdx.x;
hiprandState_t state;
hiprand_init(clock,x,0,&state);
if (0 <= x && x < N*M ){
t[x] = (hiprand(&state) % 4) ? DEAD : ALIVE;
t1[x] = DEAD;
}
}
| b6fe78af11219e804619621c25d5138084ff9260.cu | #include <stdio.h>
#include <time.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand.h>
#include <curand_kernel.h>
#define ALIVE 1
#define DEAD 0
# define CUDA_SAFE_CALL( call) { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
int termCheck(int *prev, int *next, int N, int M, int gen);
__global__ void kernel_update(int* t,int* t1,int N,int M);
__global__ void initdat(int *t, int *t1, int N, int M, time_t clock);
int main(int argc, char *argv[]) {
int N, /* rows of the grid */
M, /* columns of the grid */
Gens, /* amount of generations. */
perGens; /* checks termination per perGens generations. if perGens zero no termination check */
int *grid, *grid1;
int *gpu_grid, *gpu_grid1, *gpu_temp;
if ( argc != 5) {
printf("Error! Missing mandatory argument.\n");
return 1;
}
N = atoi(argv[1]); /* Getting rows amount */
M = atoi(argv[2]); /* Getting columns amount */
Gens = atoi(argv[3]); /* Getting Gens */
perGens = atoi(argv[4]);
if (Gens <= 0 || N < 0 || M < 0 || perGens < 0) {
printf("Please give positive values for rows/cols and Generations\n");
return 1;
}
int blockSize = 512;
int numBlocks = (N*M + blockSize - 1) / blockSize;
grid = (int*)malloc(sizeof(int)*N*M);
grid1 = (int*)malloc(sizeof(int)*N*M);
CUDA_SAFE_CALL(cudaMalloc(&gpu_grid, N*M*sizeof(int)));
CUDA_SAFE_CALL(cudaMalloc(&gpu_grid1, N*M*sizeof(int)));
/* Initialize random data */
initdat<<<numBlocks,blockSize>>>(gpu_grid, gpu_grid1, N, M, time(NULL));
CUDA_SAFE_CALL(cudaDeviceSynchronize());
for (int k = 1; k <= Gens; k++) {
kernel_update<<<numBlocks,blockSize>>>(gpu_grid,gpu_grid1,N,M);
CUDA_SAFE_CALL(cudaDeviceSynchronize());
if ( perGens ) {
CUDA_SAFE_CALL(cudaMemcpy(grid, gpu_grid, N*M*sizeof(int), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMemcpy(grid1, gpu_grid1, N*M*sizeof(int), cudaMemcpyDeviceToHost));
if ( k % perGens == 0) {
if (termCheck(grid, grid1, N, M, k)) {
cudaFree(gpu_grid1);
cudaFree(gpu_grid);
free(grid);
free(grid1);
return 0;
}
}
}
gpu_temp = gpu_grid;
gpu_grid = gpu_grid1;
gpu_grid1 = gpu_temp;
}
printf("Reached requested generations %d\n",Gens );
cudaFree(gpu_grid1);
cudaFree(gpu_grid);
free(grid);
free(grid1);
return 0;
}
int termCheck(int *prev, int *next, int N, int M, int gen){
int allDiff = 0;
int sum = 0;
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
if (prev[i*M+j] != next[i*M+j]) {
allDiff = 1;
}
sum += next[i*M+j];
}
}
if (!sum) {
printf("All cells are dead at generation %d\n", gen);
return 1;
}
else if (!allDiff) {
printf("Generation %d is the same with generation %d\n", gen, gen-1);
return 1;
}
return 0;
}
__global__ void kernel_update(int* t,int* t1,int N,int M){
int x = blockIdx.x * blockDim.x + threadIdx.x;
/*update starts*/
if (0 <= x && x < N*M ){ //if not out of bounds then..
int i,j,neighbours;
i = x / M;
j = x % M;
if (i+1 > N-1) {
if (j-1 < 0) {
/* eimai o bot_left */
neighbours = t[(i-1)*M+M-1] + t[(i-1)*M+j] + t[(i-1)*M+j+1] + t[i*M+j+1] + t[0*M+j+1] + t[0*M+j] + t[0*M+M-1] + t[i*M+M-1];
}
else if (j+1 > M-1) {
/* eimai o bot_right */
neighbours = t[(i-1)*M+j-1] + t[(i-1)*M+j] + t[(i-1)*M+0] + t[i*M+0] + t[0*M+0] + t[0*M+j] + t[0*M+j-1] + t[i*M+j-1];
}
else{
/* eimai aplos bot */
neighbours = t[(i-1)*M+j-1] + t[(i-1)*M+j] + t[(i-1)*M+j+1] + t[i*M+j+1] + t[0*M+j+1] + t[0*M+j] + t[0*M+j-1] + t[i*M+j-1];
}
}
else if (i-1 < 0) {
if (j-1 < 0) {
/* eimai o top_left */
neighbours = t[(N-1)*M+M-1] + t[(N-1)*M+j] + t[(N-1)*M+j+1] + t[i*M+j+1] + t[(i+1)*M+j+1] + t[(i+1)*M+j] + t[(i+1)*M+M-1] + t[i*M+M-1];
}
else if (j+1 > M-1) {
/* eimai o top_right */
neighbours = t[(N-1)*M+j-1] + t[(N-1)*M+j] + t[(N-1)*M+0] + t[i*M+0] + t[(i+1)*M+0] + t[(i+1)*M+j] + t[(i+1)*M+j-1] + t[i*M+j-1];
}
else{
/* eimai aplos top */
neighbours = t[(N-1)*M+j-1] + t[(N-1)*M+j] + t[(N-1)*M+j+1] + t[i*M+j+1] + t[(i+1)*M+j+1] + t[(i+1)*M+j] + t[(i+1)*M+j-1] + t[i*M+j-1];
}
}
else if (j-1 < 0) {
/* eimai aplos left */
neighbours = t[(i-1)*M+M-1] + t[(i-1)*M+j] + t[(i-1)*M+j+1] + t[i*M+j+1] + t[(i+1)*M+j+1] + t[(i+1)*M+j] + t[(i+1)*M+M-1] + t[i*M+M-1];
}
else if (j+1 > M-1) {
/* eimai aplos right */
neighbours = t[(i-1)*M+j-1] + t[(i-1)*M+j] + t[(i-1)*M+0] + t[i*M+0] + t[(i+1)*M+0] + t[(i+1)*M+j] + t[(i+1)*M+j-1] + t[i*M+j-1];
}
else{
/* oi geitones mou den peftoun eksw */
neighbours = t[(i-1)*M+j-1] + t[(i-1)*M+j] + t[(i-1)*M+j+1] + t[i*M+j+1] + t[(i+1)*M+j+1] + t[(i+1)*M+j] + t[(i+1)*M+j-1] + t[i*M+j-1];
}
/* kanones paixnidiou edw */
if (t[x] == ALIVE) {
if (neighbours <= 1 || neighbours >= 4) {
t1[x] = DEAD;
}
else{
t1[x] = ALIVE;
}
}
else if (t[x] == DEAD && neighbours == 3) {
t1[x] = ALIVE;
}
else{
t1[x] = DEAD;
}
}
}
__global__ void initdat(int *t, int *t1, int N, int M, time_t clock){
int x = blockIdx.x * blockDim.x + threadIdx.x;
curandState_t state;
curand_init(clock,x,0,&state);
if (0 <= x && x < N*M ){
t[x] = (curand(&state) % 4) ? DEAD : ALIVE;
t1[x] = DEAD;
}
}
|
d90f654c21d9f2142f74039bf685ef956ae24dbd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "maxpool_layer.h"
#include "hip/hip_runtime.h"
}
<<<<<<< HEAD
<<<<<<< HEAD
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + 2*pad)/stride;
int w = (in_w + 2*pad)/stride;
=======
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, float *input, float *output, int *indexes)
{
int h = (in_h-1)/stride + 1;
int w = (in_w-1)/stride + 1;
<<<<<<< HEAD
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
<<<<<<< HEAD
<<<<<<< HEAD
int w_offset = -pad;
int h_offset = -pad;
=======
int w_offset = (-size-1)/2 + 1;
int h_offset = (-size-1)/2 + 1;
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
int w_offset = (-size-1)/2 + 1;
int h_offset = (-size-1)/2 + 1;
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
<<<<<<< HEAD
<<<<<<< HEAD
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h + 2*pad)/stride;
int w = (in_w + 2*pad)/stride;
=======
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h-1)/stride + 1;
int w = (in_w-1)/stride + 1;
<<<<<<< HEAD
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
int c = in_c;
int area = (size-1)/stride;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
<<<<<<< HEAD
<<<<<<< HEAD
int w_offset = -pad;
int h_offset = -pad;
=======
int w_offset = (-size-1)/2 + 1;
int h_offset = (-size-1)/2 + 1;
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
int w_offset = (-size-1)/2 + 1;
int h_offset = (-size-1)/2 + 1;
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
float d = 0;
int l, m;
for(l = -area; l < area+1; ++l){
for(m = -area; m < area+1; ++m){
int out_w = (j-w_offset)/stride + m;
int out_h = (i-h_offset)/stride + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
<<<<<<< HEAD
<<<<<<< HEAD
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network net)
{
int h = layer.out_h;
int w = layer.out_w;
=======
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
int h = (layer.h-1)/layer.stride + 1;
int w = (layer.w-1)/layer.stride + 1;
<<<<<<< HEAD
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
int c = layer.c;
size_t n = h*w*c*layerhipLaunchKernelGGL((.batch);
, , < HEAD
, , < HEAD
forward_maxpool_layer_kernel, cuda_gridsize(n), BLOCK, 0, 0, 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, net.input_gpu, layer.output_gpu, layer.indexes_gpu);
check_error(hipPeekAtLastError());
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network net)
{
size_t n = layer.h*layer.w*layer.c*layer.batch;
hipLaunchKernelGGL(( backward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu);
=======
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
hipLaunchKernelGGL(( forward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, state.input, layer.output_gpu, layer.indexes_gpu);
check_error(hipPeekAtLastError());
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
size_t n = layer.h*layer.w*layer.c*layer.batch;
hipLaunchKernelGGL(( backward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.delta_gpu, state.delta, layer.indexes_gpu);
<<<<<<< HEAD
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
check_error(hipPeekAtLastError());
}
| d90f654c21d9f2142f74039bf685ef956ae24dbd.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "maxpool_layer.h"
#include "cuda.h"
}
<<<<<<< HEAD
<<<<<<< HEAD
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
{
int h = (in_h + 2*pad)/stride;
int w = (in_w + 2*pad)/stride;
=======
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, float *input, float *output, int *indexes)
{
int h = (in_h-1)/stride + 1;
int w = (in_w-1)/stride + 1;
<<<<<<< HEAD
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
int c = in_c;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
<<<<<<< HEAD
<<<<<<< HEAD
int w_offset = -pad;
int h_offset = -pad;
=======
int w_offset = (-size-1)/2 + 1;
int h_offset = (-size-1)/2 + 1;
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
int w_offset = (-size-1)/2 + 1;
int h_offset = (-size-1)/2 + 1;
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
int out_index = j + w*(i + h*(k + c*b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i*stride + l;
int cur_w = w_offset + j*stride + m;
int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
int valid = (cur_h >= 0 && cur_h < in_h &&
cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
indexes[out_index] = max_i;
}
<<<<<<< HEAD
<<<<<<< HEAD
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h + 2*pad)/stride;
int w = (in_w + 2*pad)/stride;
=======
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, float *delta, float *prev_delta, int *indexes)
{
int h = (in_h-1)/stride + 1;
int w = (in_w-1)/stride + 1;
<<<<<<< HEAD
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
int c = in_c;
int area = (size-1)/stride;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
<<<<<<< HEAD
<<<<<<< HEAD
int w_offset = -pad;
int h_offset = -pad;
=======
int w_offset = (-size-1)/2 + 1;
int h_offset = (-size-1)/2 + 1;
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
int w_offset = (-size-1)/2 + 1;
int h_offset = (-size-1)/2 + 1;
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
float d = 0;
int l, m;
for(l = -area; l < area+1; ++l){
for(m = -area; m < area+1; ++m){
int out_w = (j-w_offset)/stride + m;
int out_h = (i-h_offset)/stride + l;
int out_index = out_w + w*(out_h + h*(k + c*b));
int valid = (out_w >= 0 && out_w < w &&
out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
<<<<<<< HEAD
<<<<<<< HEAD
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network net)
{
int h = layer.out_h;
int w = layer.out_w;
=======
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
int h = (layer.h-1)/layer.stride + 1;
int w = (layer.w-1)/layer.stride + 1;
<<<<<<< HEAD
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
int c = layer.c;
size_t n = h*w*c*layer.batch;
<<<<<<< HEAD
<<<<<<< HEAD
forward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, net.input_gpu, layer.output_gpu, layer.indexes_gpu);
check_error(cudaPeekAtLastError());
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network net)
{
size_t n = layer.h*layer.w*layer.c*layer.batch;
backward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu);
=======
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
forward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, state.input, layer.output_gpu, layer.indexes_gpu);
check_error(cudaPeekAtLastError());
}
extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network_state state)
{
size_t n = layer.h*layer.w*layer.c*layer.batch;
backward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK>>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.delta_gpu, state.delta, layer.indexes_gpu);
<<<<<<< HEAD
>>>>>>> b5b3d7367411302dd6e73c8fe583d6860a786445
=======
>>>>>>> 07267f401b3d9c82c5f695f932c9f504d2b6a592
check_error(cudaPeekAtLastError());
}
|
598ab80091bef831805dbb9186d21ce23266b0a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void suma_vectores_cubo(int *d_v1, int *d_v2, int *d_vr)
{
int id_vector = blockIdx.x * 8 + threadIdx.x;
printf("Id: %d\n", id_vector);
d_vr[id_vector] = d_v1[id_vector] + d_v2[id_vector];
}
int main()
{
// Variables de tamaos
const int ARRAY_SIZE = 24;
const int ARRAY_BYTES = 24 * sizeof(int);
// Declaro vectores y vector resultado
int h_v1[ARRAY_SIZE];
int h_v2[ARRAY_SIZE];
int h_vr[ARRAY_SIZE];
// Relleno v1 y v2:
for (int i = 0; i < ARRAY_SIZE; i++)
{
h_v1[i] = i;
h_v2[i] = i;
h_vr[i] = 0;
}
// Declaro punteros a memoria GPU
int * d_v1;
int * d_v2;
int * d_vr;
// Asigno memoria GPU
hipMalloc((void**)&d_v1, ARRAY_BYTES);
hipMalloc((void**)&d_v2, ARRAY_BYTES);
hipMalloc((void**)&d_vr, ARRAY_BYTES);
// Transfiero los arrays a la GPU:
hipMemcpy(d_v1, h_v1, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_v2, h_v2, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_vr, h_vr, ARRAY_BYTES, hipMemcpyHostToDevice);
// Lanzo el kernel
hipLaunchKernelGGL(( suma_vectores_cubo) , dim3(3), dim3(8) , 0, 0, d_v1, d_v2, d_vr);
// Copio el resultado al HOST:
hipMemcpy(h_vr, d_vr, ARRAY_BYTES, hipMemcpyDeviceToHost);
for (int i = 0; i < ARRAY_SIZE; i++)
{
printf("V[%d]: %d\n", i, h_vr[i]);
}
return 0;
}
| 598ab80091bef831805dbb9186d21ce23266b0a1.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void suma_vectores_cubo(int *d_v1, int *d_v2, int *d_vr)
{
int id_vector = blockIdx.x * 8 + threadIdx.x;
printf("Id: %d\n", id_vector);
d_vr[id_vector] = d_v1[id_vector] + d_v2[id_vector];
}
int main()
{
// Variables de tamaņos
const int ARRAY_SIZE = 24;
const int ARRAY_BYTES = 24 * sizeof(int);
// Declaro vectores y vector resultado
int h_v1[ARRAY_SIZE];
int h_v2[ARRAY_SIZE];
int h_vr[ARRAY_SIZE];
// Relleno v1 y v2:
for (int i = 0; i < ARRAY_SIZE; i++)
{
h_v1[i] = i;
h_v2[i] = i;
h_vr[i] = 0;
}
// Declaro punteros a memoria GPU
int * d_v1;
int * d_v2;
int * d_vr;
// Asigno memoria GPU
cudaMalloc((void**)&d_v1, ARRAY_BYTES);
cudaMalloc((void**)&d_v2, ARRAY_BYTES);
cudaMalloc((void**)&d_vr, ARRAY_BYTES);
// Transfiero los arrays a la GPU:
cudaMemcpy(d_v1, h_v1, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_v2, h_v2, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_vr, h_vr, ARRAY_BYTES, cudaMemcpyHostToDevice);
// Lanzo el kernel
suma_vectores_cubo <<< 3, 8 >>> (d_v1, d_v2, d_vr);
// Copio el resultado al HOST:
cudaMemcpy(h_vr, d_vr, ARRAY_BYTES, cudaMemcpyDeviceToHost);
for (int i = 0; i < ARRAY_SIZE; i++)
{
printf("V[%d]: %d\n", i, h_vr[i]);
}
return 0;
}
|
c5bc70bc2c69356426f901f93c73988e800155f4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "common.h"
#include "cuda_common.cuh"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
template<unsigned int iblock_size>
__global__ void reduction_kernel_complete_template(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
int index = blockDim.x * blockIdx.x * 8 + threadIdx.x;
int * i_data = input + blockDim.x * blockIdx.x * 8;
//unrolling blocks
if ((index + 7 * blockDim.x) < size)
{
int a1 = input[index];
int a2 = input[index + blockDim.x];
int a3 = input[index + 2 * blockDim.x];
int a4 = input[index + 3 * blockDim.x];
int a5 = input[index + 4 * blockDim.x];
int a6 = input[index + 5 * blockDim.x];
int a7 = input[index + 6 * blockDim.x];
int a8 = input[index + 7 * blockDim.x];
input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
}
__syncthreads();
//manual unrolling depending on block size
if (iblock_size >= 1024 && tid < 512)
i_data[tid] += i_data[tid + 512];
__syncthreads();
if (iblock_size >= 512 && tid < 256)
i_data[tid] += i_data[tid + 256];
__syncthreads();
if (iblock_size >= 256 && tid < 128)
i_data[tid] += i_data[tid + 128];
__syncthreads();
if (iblock_size >= 128 && tid < 64)
i_data[tid] += i_data[tid + 64];
__syncthreads();
//unrolling warp
if (tid < 32)
{
volatile int * vsmem = i_data;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0)
{
temp[blockIdx.x] = i_data[0];
}
}
//int main(int argc, char ** argv)
//{
// printf("Running parallel reduction with complete unrolling kernel \n");
//
// int size = 1 << 25;
// int byte_size = size * sizeof(int);
// int block_size = 128;
//
// int * h_input, *h_ref;
// h_input = (int*)malloc(byte_size);
//
// initialize(h_input, size);
//
// int cpu_result = reduction_cpu(h_input, size);
//
// dim3 block(block_size);
// dim3 grid((size / block_size) / 8);
//
// printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
//
// int temp_array_byte_size = sizeof(int)* grid.x;
//
// h_ref = (int*)malloc(temp_array_byte_size);
//
// int * d_input, *d_temp;
// gpuErrchk(hipMalloc((void**)&d_input, byte_size));
// gpuErrchk(hipMalloc((void**)&d_temp, temp_array_byte_size));
//
// gpuErrchk(hipMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(hipMemcpy(d_input, h_input, byte_size,
// hipMemcpyHostToDevice));
//
// switch (block_size)
// {
// case 1024 :
// reduction_kernel_complete_template <1024> <<< grid, block >> > (d_input, d_temp, size);
// break;
// case 512:
// reduction_kernel_complete_template <512> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 256:
// reduction_kernel_complete_template <256> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 128:
// reduction_kernel_complete_template <128> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 64:
// reduction_kernel_complete_template <64> << < grid, block >> > (d_input, d_temp, size);
// break;
// }
//
// gpuErrchk(hipDeviceSynchronize());
// gpuErrchk(hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost));
//
// int gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
//
// compare_results(gpu_result, cpu_result);
//
// gpuErrchk(hipFree(d_input));
// gpuErrchk(hipFree(d_temp));
// free(h_input);
// free(h_ref);
//
// gpuErrchk(hipDeviceReset());
// return 0;
//} | c5bc70bc2c69356426f901f93c73988e800155f4.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "common.h"
#include "cuda_common.cuh"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
template<unsigned int iblock_size>
__global__ void reduction_kernel_complete_template(int * input,
int * temp, int size)
{
int tid = threadIdx.x;
int index = blockDim.x * blockIdx.x * 8 + threadIdx.x;
int * i_data = input + blockDim.x * blockIdx.x * 8;
//unrolling blocks
if ((index + 7 * blockDim.x) < size)
{
int a1 = input[index];
int a2 = input[index + blockDim.x];
int a3 = input[index + 2 * blockDim.x];
int a4 = input[index + 3 * blockDim.x];
int a5 = input[index + 4 * blockDim.x];
int a6 = input[index + 5 * blockDim.x];
int a7 = input[index + 6 * blockDim.x];
int a8 = input[index + 7 * blockDim.x];
input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
}
__syncthreads();
//manual unrolling depending on block size
if (iblock_size >= 1024 && tid < 512)
i_data[tid] += i_data[tid + 512];
__syncthreads();
if (iblock_size >= 512 && tid < 256)
i_data[tid] += i_data[tid + 256];
__syncthreads();
if (iblock_size >= 256 && tid < 128)
i_data[tid] += i_data[tid + 128];
__syncthreads();
if (iblock_size >= 128 && tid < 64)
i_data[tid] += i_data[tid + 64];
__syncthreads();
//unrolling warp
if (tid < 32)
{
volatile int * vsmem = i_data;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0)
{
temp[blockIdx.x] = i_data[0];
}
}
//int main(int argc, char ** argv)
//{
// printf("Running parallel reduction with complete unrolling kernel \n");
//
// int size = 1 << 25;
// int byte_size = size * sizeof(int);
// int block_size = 128;
//
// int * h_input, *h_ref;
// h_input = (int*)malloc(byte_size);
//
// initialize(h_input, size);
//
// int cpu_result = reduction_cpu(h_input, size);
//
// dim3 block(block_size);
// dim3 grid((size / block_size) / 8);
//
// printf("Kernel launch parameters || grid : %d, block : %d \n", grid.x, block.x);
//
// int temp_array_byte_size = sizeof(int)* grid.x;
//
// h_ref = (int*)malloc(temp_array_byte_size);
//
// int * d_input, *d_temp;
// gpuErrchk(cudaMalloc((void**)&d_input, byte_size));
// gpuErrchk(cudaMalloc((void**)&d_temp, temp_array_byte_size));
//
// gpuErrchk(cudaMemset(d_temp, 0, temp_array_byte_size));
// gpuErrchk(cudaMemcpy(d_input, h_input, byte_size,
// cudaMemcpyHostToDevice));
//
// switch (block_size)
// {
// case 1024 :
// reduction_kernel_complete_template <1024> <<< grid, block >> > (d_input, d_temp, size);
// break;
// case 512:
// reduction_kernel_complete_template <512> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 256:
// reduction_kernel_complete_template <256> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 128:
// reduction_kernel_complete_template <128> << < grid, block >> > (d_input, d_temp, size);
// break;
// case 64:
// reduction_kernel_complete_template <64> << < grid, block >> > (d_input, d_temp, size);
// break;
// }
//
// gpuErrchk(cudaDeviceSynchronize());
// gpuErrchk(cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost));
//
// int gpu_result = 0;
// for (int i = 0; i < grid.x; i++)
// {
// gpu_result += h_ref[i];
// }
//
// compare_results(gpu_result, cpu_result);
//
// gpuErrchk(cudaFree(d_input));
// gpuErrchk(cudaFree(d_temp));
// free(h_input);
// free(h_ref);
//
// gpuErrchk(cudaDeviceReset());
// return 0;
//} |
33bf71d12d91c2ec5d4c80b5f5d237b23cec12ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
__global__ void myKernel(void){
}
int main(){hipLaunchKernelGGL((
myKernel), dim3(1),dim3(1), 0, 0, );
printf("Hello world!\n");
return 0;
}
| 33bf71d12d91c2ec5d4c80b5f5d237b23cec12ee.cu | #include<stdio.h>
__global__ void myKernel(void){
}
int main(){
myKernel<<<1,1>>>();
printf("Hello world!\n");
return 0;
}
|
49ac5c535a9db90e506b859b75dcab2b6b9d70fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* File systematic_gpu.cuh contains the GPU implementation of the systematic resampling.
* This implementation is inspired by the paper by L. M. Murray et. al.:
* Parallel resampling in the particle filter https://arxiv.org/abs/1301.4019
*/
#ifdef __NVCC__
#include "inference/smc/resample/common.cuh"
#include "kernels_hip.cuh"
#include "utils/cuda_error_utils.cuh"
#include "systematic_gpu.cuh"
#include <hiprand/hiprand_kernel.h>
#include <thrust/scan.h>
HOST DEV void prefixSumNaive(floating_t* w, resampler_t resampler, int numParticles) {
resampler.prefixSum[0] = w[0];
for(int i = 1; i < numParticles; i++)
resampler.prefixSum[i] = resampler.prefixSum[i-1] + w[i];
}
HOST std::tuple<floating_t, floating_t> calcLogWeightSumAndESSGpu(floating_t* w, resampler_t& resampler, int numParticles, int numBlocks, int numThreadsPerBlock) {
floating_t maxLogWeight = *(thrust::max_element(thrust::device, w, w + numParticles));
resampler.maxLogWeight = maxLogWeight;
// floating_t maxLogWeight = maxNaive(w, numParticles);
hipLaunchKernelGGL(( scaleExpWeightsAndSquareWeightsKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, w, numParticles, maxLogWeight, resampler.wSquared);
hipDeviceSynchronize();
thrust::inclusive_scan(thrust::device, w, w + numParticles, resampler.prefixSum); // prefix sum
// prefixSumNaive(w, resampler, numParticles);
// At this point: w are scaled weights (not log), prefixSum[numParticles-1] is the scaled sum
floating_t ess = calcESSHelperGpu(w, resampler.prefixSum[numParticles - 1], resampler.wSquared, numParticles);
hipLaunchKernelGGL(( renormaliseKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, w, resampler.prefixSum, numParticles, maxLogWeight);
hipDeviceSynchronize();
// return resampler.prefixSum[numParticles - 1];
return std::make_tuple(resampler.prefixSum[numParticles - 1], ess);
}
HOST floating_t calcESSHelperGpu(floating_t* scaledW, floating_t scaledWeightSum, floating_t* scaledWSquared, int numParticles) {
// Kernel saving new square exp log weights
// expSquareWeightsKernel<<<numBlocks, numThreadsPerBlock>>>(w, resampler.wSquared, resampler.maxLogWeight, numParticles);
// Thrust for summing squared weights
hipDeviceSynchronize();
floating_t wSumOfSquares = (thrust::reduce(thrust::device, scaledWSquared, scaledWSquared + numParticles));
floating_t wSumSquared = scaledWeightSum * scaledWeightSum;
return wSumSquared / wSumOfSquares;
}
HOST DEV void decideAncestors(resampler_t& resampler, floating_t u, int numParticles, int numBlocks, int numThreadsPerBlock) {
hipLaunchKernelGGL(( systematicCumulativeOffspringKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, resampler.prefixSum, resampler.cumulativeOffspring, u, numParticles);
hipLaunchKernelGGL(( cumulativeOffspringToAncestorKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, resampler.cumulativeOffspring, resampler.ancestor, numParticles);
}
HOST DEV void postUniform(particles_t& particles, resampler_t& resampler, floating_t u, int numParticles, int numBlocks, int numThreadsPerBlock) {
decideAncestors(resampler, u, numParticles, numBlocks, numThreadsPerBlock);
// Copy states
hipLaunchKernelGGL(( copyStatesKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, resampler.auxParticles, particles, resampler.ancestor, numParticles, resampler.progStateSize);
hipDeviceSynchronize();
// Swap pointers
particles_t tempAux = resampler.auxParticles;
resampler.auxParticles = particles;
particles = tempAux;
}
DEV void resampleSystematicGpuNested(hiprandState_t* randState, particles_t& particles, resampler_t& resampler, int numParticles, int numBlocks) {
floating_t u = uniform(randState, 0.0f, 1.0f);
postUniform(particles, resampler, u, numParticles, numBlocks, NUM_THREADS_PER_BLOCK_NESTED);
}
void resampleSystematicGpu(particles_t& particles, resampler_t& resampler, int numParticles, int numBlocks) {
floating_t u = uniformCPU(generatorRes);
postUniform(particles, resampler, u, numParticles, numBlocks, NUM_THREADS_PER_BLOCK);
}
void normaliseWeightsGpu(floating_t* w, floating_t logWeightSum, int numParticles, int numBlocks, int numThreadsPerBlock) {
hipLaunchKernelGGL(( normaliseWeightsKernel), dim3(numBlocks), dim3(numThreadsPerBlock), 0, 0, w, logWeightSum, numParticles);
hipDeviceSynchronize();
}
#endif | 49ac5c535a9db90e506b859b75dcab2b6b9d70fb.cu |
/*
* File systematic_gpu.cuh contains the GPU implementation of the systematic resampling.
* This implementation is inspired by the paper by L. M. Murray et. al.:
* Parallel resampling in the particle filter https://arxiv.org/abs/1301.4019
*/
#ifdef __NVCC__
#include "inference/smc/resample/common.cuh"
#include "kernels.cuh"
#include "utils/cuda_error_utils.cuh"
#include "systematic_gpu.cuh"
#include <curand_kernel.h>
#include <thrust/scan.h>
HOST DEV void prefixSumNaive(floating_t* w, resampler_t resampler, int numParticles) {
resampler.prefixSum[0] = w[0];
for(int i = 1; i < numParticles; i++)
resampler.prefixSum[i] = resampler.prefixSum[i-1] + w[i];
}
HOST std::tuple<floating_t, floating_t> calcLogWeightSumAndESSGpu(floating_t* w, resampler_t& resampler, int numParticles, int numBlocks, int numThreadsPerBlock) {
floating_t maxLogWeight = *(thrust::max_element(thrust::device, w, w + numParticles));
resampler.maxLogWeight = maxLogWeight;
// floating_t maxLogWeight = maxNaive(w, numParticles);
scaleExpWeightsAndSquareWeightsKernel<<<numBlocks, numThreadsPerBlock>>>(w, numParticles, maxLogWeight, resampler.wSquared);
cudaDeviceSynchronize();
thrust::inclusive_scan(thrust::device, w, w + numParticles, resampler.prefixSum); // prefix sum
// prefixSumNaive(w, resampler, numParticles);
// At this point: w are scaled weights (not log), prefixSum[numParticles-1] is the scaled sum
floating_t ess = calcESSHelperGpu(w, resampler.prefixSum[numParticles - 1], resampler.wSquared, numParticles);
renormaliseKernel<<<numBlocks, numThreadsPerBlock>>>(w, resampler.prefixSum, numParticles, maxLogWeight);
cudaDeviceSynchronize();
// return resampler.prefixSum[numParticles - 1];
return std::make_tuple(resampler.prefixSum[numParticles - 1], ess);
}
HOST floating_t calcESSHelperGpu(floating_t* scaledW, floating_t scaledWeightSum, floating_t* scaledWSquared, int numParticles) {
// Kernel saving new square exp log weights
// expSquareWeightsKernel<<<numBlocks, numThreadsPerBlock>>>(w, resampler.wSquared, resampler.maxLogWeight, numParticles);
// Thrust for summing squared weights
cudaDeviceSynchronize();
floating_t wSumOfSquares = (thrust::reduce(thrust::device, scaledWSquared, scaledWSquared + numParticles));
floating_t wSumSquared = scaledWeightSum * scaledWeightSum;
return wSumSquared / wSumOfSquares;
}
HOST DEV void decideAncestors(resampler_t& resampler, floating_t u, int numParticles, int numBlocks, int numThreadsPerBlock) {
systematicCumulativeOffspringKernel<<<numBlocks, numThreadsPerBlock>>>(resampler.prefixSum, resampler.cumulativeOffspring, u, numParticles);
cumulativeOffspringToAncestorKernel<<<numBlocks, numThreadsPerBlock>>>(resampler.cumulativeOffspring, resampler.ancestor, numParticles);
}
HOST DEV void postUniform(particles_t& particles, resampler_t& resampler, floating_t u, int numParticles, int numBlocks, int numThreadsPerBlock) {
decideAncestors(resampler, u, numParticles, numBlocks, numThreadsPerBlock);
// Copy states
copyStatesKernel<<<numBlocks, numThreadsPerBlock>>>(resampler.auxParticles, particles, resampler.ancestor, numParticles, resampler.progStateSize);
cudaDeviceSynchronize();
// Swap pointers
particles_t tempAux = resampler.auxParticles;
resampler.auxParticles = particles;
particles = tempAux;
}
DEV void resampleSystematicGpuNested(curandState* randState, particles_t& particles, resampler_t& resampler, int numParticles, int numBlocks) {
floating_t u = uniform(randState, 0.0f, 1.0f);
postUniform(particles, resampler, u, numParticles, numBlocks, NUM_THREADS_PER_BLOCK_NESTED);
}
void resampleSystematicGpu(particles_t& particles, resampler_t& resampler, int numParticles, int numBlocks) {
floating_t u = uniformCPU(generatorRes);
postUniform(particles, resampler, u, numParticles, numBlocks, NUM_THREADS_PER_BLOCK);
}
void normaliseWeightsGpu(floating_t* w, floating_t logWeightSum, int numParticles, int numBlocks, int numThreadsPerBlock) {
normaliseWeightsKernel<<<numBlocks, numThreadsPerBlock>>>(w, logWeightSum, numParticles);
cudaDeviceSynchronize();
}
#endif |
4698fadb6fe3fb5c5f81c2ed9399defa1f02e2d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h> // CUDA Math.
#include <thrust/sort.h>
#include <thrust/functional.h>
#include "distance_measurer.h"
namespace radi {
DistanceMeasurer::DistanceMeasurer ()
{ }
DistanceMeasurer::~DistanceMeasurer ()
{
if (dev_num_triangles_)
hipFree(dev_num_triangles_);
if (dev_triangles_)
hipFree(dev_triangles_);
}
void
DistanceMeasurer::setNumTriangles (int num_triangles)
{
num_triangles_ = num_triangles;
hipMalloc ((void **)&dev_num_triangles_, sizeof(int));
hipMemcpy(dev_num_triangles_, &num_triangles_, sizeof(int), hipMemcpyHostToDevice);
}
void
DistanceMeasurer::setTriangles (const std::vector<std::vector<Eigen::Vector3f> > & triangles)
{
int total_size = triangles.size() * 9;
float * host_triangles = (float *)malloc (total_size*sizeof(float));
for (int i = 0; i < triangles.size(); ++i)
{
host_triangles[i*9+0] = triangles[i][0][0];
host_triangles[i*9+1] = triangles[i][0][1];
host_triangles[i*9+2] = triangles[i][0][2];
host_triangles[i*9+3] = triangles[i][1][0];
host_triangles[i*9+4] = triangles[i][1][1];
host_triangles[i*9+5] = triangles[i][1][2];
host_triangles[i*9+6] = triangles[i][2][0];
host_triangles[i*9+7] = triangles[i][2][1];
host_triangles[i*9+8] = triangles[i][2][2];
}
hipMalloc (&dev_triangles_, total_size*sizeof(float));
hipMemcpy (dev_triangles_, host_triangles, total_size*sizeof(float), hipMemcpyHostToDevice);
free (host_triangles);
}
float
DistanceMeasurer::calShortestDistance (const float * point)
{
float * dev_point;
hipMalloc ((void **)&dev_point, 3*sizeof(float));
hipMemcpy(dev_point, point, 3*sizeof(float), hipMemcpyHostToDevice);
float * dev_distances;
hipMalloc ((void **)&dev_distances, num_triangles_*sizeof(float));
hipLaunchKernelGGL(( distPointTriangle), dim3((num_triangles_+255)/256), dim3(256), 0, 0, dev_point, dev_triangles_, dev_num_triangles_, dev_distances);
float * distances = (float *) malloc (num_triangles_*sizeof(float));
hipMemcpy(distances, dev_distances, num_triangles_*sizeof(float), hipMemcpyDeviceToHost);
// std::cout << "Distances: ---------------------------------------------------" << std::endl;
// for (int i = 0; i < num_triangles_; ++i)
// std::cout << distances[i] << std::endl;
thrust::stable_sort (distances, distances+num_triangles_, thrust::less_equal<float> ());
float min_distance = distances[0];
free (distances);
hipFree (dev_point);
hipFree (dev_distances);
return (min_distance);
}
// Calculate the distance from a point to a triangle mesh.
__global__ void
distPointTriangle (const float * dev_point, const float * dev_triangles, const int * dev_num_triangles, float * dev_distances)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < dev_num_triangles[0])
{
if (isPointInTriangle (dev_point, &dev_triangles[tid*9]))
{
dev_distances[tid] = distPointPlane (dev_point, &dev_triangles[tid*9]);
}
else
{
dev_distances[tid] = distPointPlane (dev_point, &dev_triangles[tid*9]);
const float * dev_triangle = &dev_triangles[tid*9];
// Calculate the distance from the point to the vertices and segments.
float dev_distance_list[6];
float dev_vertex_0[3];
dev_vertex_0[0] = dev_triangle[0];
dev_vertex_0[1] = dev_triangle[1];
dev_vertex_0[2] = dev_triangle[2];
float dev_vertex_1[3];
dev_vertex_1[0] = dev_triangle[3];
dev_vertex_1[1] = dev_triangle[4];
dev_vertex_1[2] = dev_triangle[5];
float dev_vertex_2[3];
dev_vertex_2[0] = dev_triangle[6];
dev_vertex_2[1] = dev_triangle[7];
dev_vertex_2[2] = dev_triangle[8];
dev_distance_list[0] = distPointPoint (dev_point, dev_vertex_0);
dev_distance_list[1] = distPointPoint (dev_point, dev_vertex_1);
dev_distance_list[2] = distPointPoint (dev_point, dev_vertex_2);
float dev_segment_vertices[6];
dev_segment_vertices[0] = dev_triangle[0];
dev_segment_vertices[1] = dev_triangle[1];
dev_segment_vertices[2] = dev_triangle[2];
dev_segment_vertices[3] = dev_triangle[3];
dev_segment_vertices[4] = dev_triangle[4];
dev_segment_vertices[5] = dev_triangle[5];
dev_distance_list[3] = distPointLineSegment (dev_point, dev_segment_vertices);
dev_segment_vertices[3] = dev_triangle[6];
dev_segment_vertices[4] = dev_triangle[7];
dev_segment_vertices[5] = dev_triangle[8];
dev_distance_list[4] = distPointLineSegment (dev_point, dev_segment_vertices);
dev_segment_vertices[0] = dev_triangle[3];
dev_segment_vertices[1] = dev_triangle[4];
dev_segment_vertices[2] = dev_triangle[5];
dev_distance_list[5] = distPointLineSegment (dev_point, dev_segment_vertices);
float min_distance = dev_distance_list[0];
for (int i = 0; i < 6; ++i)
if (min_distance > dev_distance_list[i])
min_distance = dev_distance_list[i];
dev_distances[tid] = min_distance;
}
}
}
// Calculate the projected point on a plane.
__device__ void
pointProjectionOnPlane (const float * dev_point, const float * dev_triangle_vertices, float * dev_point_projection)
{
float vect_ab[3];
vect_ab[0] = dev_triangle_vertices[3] - dev_triangle_vertices[0];
vect_ab[1] = dev_triangle_vertices[4] - dev_triangle_vertices[1];
vect_ab[2] = dev_triangle_vertices[5] - dev_triangle_vertices[2];
float vect_ac[3];
vect_ac[0] = dev_triangle_vertices[6] - dev_triangle_vertices[0];
vect_ac[1] = dev_triangle_vertices[7] - dev_triangle_vertices[1];
vect_ac[2] = dev_triangle_vertices[8] - dev_triangle_vertices[2];
float normal[3];
normal[0] = -vect_ab[2]*vect_ac[1] + vect_ab[1]*vect_ac[2];
normal[1] = vect_ab[2]*vect_ac[0] - vect_ab[0]*vect_ac[2];
normal[2] = -vect_ab[1]*vect_ac[0] + vect_ab[0]*vect_ac[1];
float normal_norm = norm3df (normal[0], normal[1], normal[2]);
normal[0] /= normal_norm;
normal[1] /= normal_norm;
normal[2] /= normal_norm;
float vect_ap[3];
vect_ap[0] = dev_point[0] - dev_triangle_vertices[0];
vect_ap[1] = dev_point[1] - dev_triangle_vertices[1];
vect_ap[2] = dev_point[2] - dev_triangle_vertices[2];
float dot_ap_normal = vect_ap[0]*normal[0] + vect_ap[1]*normal[1] + vect_ap[2]*normal[2];
dev_point_projection[0] = vect_ap[0] - dot_ap_normal*normal[0];
dev_point_projection[1] = vect_ap[1] - dot_ap_normal*normal[1];
dev_point_projection[2] = vect_ap[2] - dot_ap_normal*normal[2];
}
// Detect if the projection of a point is inside the triangle.
__device__ bool
isPointInTriangle (const float * dev_point, const float * dev_triangle_vertices)
{
float vect_ab[3];
vect_ab[0] = dev_triangle_vertices[3] - dev_triangle_vertices[0];
vect_ab[1] = dev_triangle_vertices[4] - dev_triangle_vertices[1];
vect_ab[2] = dev_triangle_vertices[5] - dev_triangle_vertices[2];
float vect_ac[3];
vect_ac[0] = dev_triangle_vertices[6] - dev_triangle_vertices[0];
vect_ac[1] = dev_triangle_vertices[7] - dev_triangle_vertices[1];
vect_ac[2] = dev_triangle_vertices[8] - dev_triangle_vertices[2];
float vect_ap[3];
vect_ap[0] = dev_point[0] - dev_triangle_vertices[0];
vect_ap[1] = dev_point[1] - dev_triangle_vertices[1];
vect_ap[2] = dev_point[2] - dev_triangle_vertices[2];
float dot_ab_ab = vect_ab[0]*vect_ab[0] + vect_ab[1]*vect_ab[1] + vect_ab[2]*vect_ab[2];
float dot_ac_ac = vect_ac[0]*vect_ac[0] + vect_ac[1]*vect_ac[1] + vect_ac[2]*vect_ac[2];
float dot_ab_ac = vect_ab[0]*vect_ac[0] + vect_ab[1]*vect_ac[1] + vect_ab[2]*vect_ac[2];
float dot_ap_ab = vect_ap[0]*vect_ab[0] + vect_ap[1]*vect_ab[1] + vect_ap[2]*vect_ab[2];
float dot_ap_ac = vect_ap[0]*vect_ac[0] + vect_ap[1]*vect_ac[1] + vect_ap[2]*vect_ac[2];
float u = (dot_ac_ac*dot_ap_ab - dot_ab_ac*dot_ap_ac) / (dot_ab_ab*dot_ac_ac - dot_ab_ac*dot_ab_ac);
float v = (dot_ab_ab*dot_ap_ac - dot_ab_ac*dot_ap_ab) / (dot_ab_ab*dot_ac_ac - dot_ab_ac*dot_ab_ac);
return ((u >= 0.0) && (v >= 0.0) && (u + v <= 1.0));
}
// Calculate the distance from a point to another point.
__device__ float
distPointPoint (const float * dev_point_a, const float * dev_point_b)
{
return (norm3df (dev_point_b[0]-dev_point_a[0], dev_point_b[1]-dev_point_a[1], dev_point_b[2]-dev_point_a[2]));
}
// Calculate the distance from a point to a line segment.
__device__ float
distPointLineSegment (const float * dev_point, const float * dev_segment_vertices)
{
float vect_v[3];
vect_v[0] = dev_segment_vertices[3] - dev_segment_vertices[0];
vect_v[1] = dev_segment_vertices[4] - dev_segment_vertices[1];
vect_v[2] = dev_segment_vertices[5] - dev_segment_vertices[2];
float vect_w[3];
vect_w[0] = dev_point[0] - dev_segment_vertices[0];
vect_w[1] = dev_point[1] - dev_segment_vertices[1];
vect_w[2] = dev_point[2] - dev_segment_vertices[2];
float scalar_1 = vect_v[0]*vect_w[0] + vect_v[1]*vect_w[1] + vect_v[2]*vect_w[2];
float scalar_2 = vect_v[0]*vect_v[0] + vect_v[1]*vect_v[1] + vect_v[2]*vect_v[2];
if (scalar_1 <= 0.0)
{
// Projected point on the line is on the left of segmentVertices[0].
return (distPointPoint (dev_point, &dev_segment_vertices[0]));
}
else if (scalar_1 >= scalar_2)
{
// Projected point on the line is on the right of segmentVertices[1].
return (distPointPoint (dev_point, &dev_segment_vertices[3]));
}
else
{
// Projected point on the line is on the line segment.
float point_projection[3];
point_projection[0] = dev_segment_vertices[0] + scalar_1/scalar_2*vect_v[0];
point_projection[1] = dev_segment_vertices[1] + scalar_1/scalar_2*vect_v[1];
point_projection[2] = dev_segment_vertices[2] + scalar_1/scalar_2*vect_v[2];
return (distPointPoint (dev_point, point_projection));
}
}
// Calculate the distance from a point to a plane.
__device__ float
distPointPlane (const float * dev_point, const float * dev_triangle_vertices)
{
float vect_ab[3];
vect_ab[0] = dev_triangle_vertices[3] - dev_triangle_vertices[0];
vect_ab[1] = dev_triangle_vertices[4] - dev_triangle_vertices[1];
vect_ab[2] = dev_triangle_vertices[5] - dev_triangle_vertices[2];
float vect_ac[3];
vect_ac[0] = dev_triangle_vertices[6] - dev_triangle_vertices[0];
vect_ac[1] = dev_triangle_vertices[7] - dev_triangle_vertices[1];
vect_ac[2] = dev_triangle_vertices[8] - dev_triangle_vertices[2];
float normal[3];
normal[0] = -vect_ab[2]*vect_ac[1] + vect_ab[1]*vect_ac[2];
normal[1] = vect_ab[2]*vect_ac[0] - vect_ab[0]*vect_ac[2];
normal[2] = -vect_ab[1]*vect_ac[0] + vect_ab[0]*vect_ac[1];
float normal_norm = norm3df (normal[0], normal[1], normal[2]);
normal[0] /= normal_norm;
normal[1] /= normal_norm;
normal[2] /= normal_norm;
float vect_ap[3];
vect_ap[0] = dev_point[0] - dev_triangle_vertices[0];
vect_ap[1] = dev_point[1] - dev_triangle_vertices[1];
vect_ap[2] = dev_point[2] - dev_triangle_vertices[2];
float dot_ap_normal = vect_ap[0]*normal[0] + vect_ap[1]*normal[1] + vect_ap[2]*normal[2];
if (dot_ap_normal < 0.0)
dot_ap_normal = -dot_ap_normal;
return (fabsf (dot_ap_normal));
}
}
| 4698fadb6fe3fb5c5f81c2ed9399defa1f02e2d0.cu | #include <math.h> // CUDA Math.
#include <thrust/sort.h>
#include <thrust/functional.h>
#include "distance_measurer.h"
namespace radi {
DistanceMeasurer::DistanceMeasurer ()
{ }
DistanceMeasurer::~DistanceMeasurer ()
{
if (dev_num_triangles_)
cudaFree(dev_num_triangles_);
if (dev_triangles_)
cudaFree(dev_triangles_);
}
void
DistanceMeasurer::setNumTriangles (int num_triangles)
{
num_triangles_ = num_triangles;
cudaMalloc ((void **)&dev_num_triangles_, sizeof(int));
cudaMemcpy(dev_num_triangles_, &num_triangles_, sizeof(int), cudaMemcpyHostToDevice);
}
void
DistanceMeasurer::setTriangles (const std::vector<std::vector<Eigen::Vector3f> > & triangles)
{
int total_size = triangles.size() * 9;
float * host_triangles = (float *)malloc (total_size*sizeof(float));
for (int i = 0; i < triangles.size(); ++i)
{
host_triangles[i*9+0] = triangles[i][0][0];
host_triangles[i*9+1] = triangles[i][0][1];
host_triangles[i*9+2] = triangles[i][0][2];
host_triangles[i*9+3] = triangles[i][1][0];
host_triangles[i*9+4] = triangles[i][1][1];
host_triangles[i*9+5] = triangles[i][1][2];
host_triangles[i*9+6] = triangles[i][2][0];
host_triangles[i*9+7] = triangles[i][2][1];
host_triangles[i*9+8] = triangles[i][2][2];
}
cudaMalloc (&dev_triangles_, total_size*sizeof(float));
cudaMemcpy (dev_triangles_, host_triangles, total_size*sizeof(float), cudaMemcpyHostToDevice);
free (host_triangles);
}
float
DistanceMeasurer::calShortestDistance (const float * point)
{
float * dev_point;
cudaMalloc ((void **)&dev_point, 3*sizeof(float));
cudaMemcpy(dev_point, point, 3*sizeof(float), cudaMemcpyHostToDevice);
float * dev_distances;
cudaMalloc ((void **)&dev_distances, num_triangles_*sizeof(float));
distPointTriangle<<<(num_triangles_+255)/256, 256>>> (dev_point, dev_triangles_, dev_num_triangles_, dev_distances);
float * distances = (float *) malloc (num_triangles_*sizeof(float));
cudaMemcpy(distances, dev_distances, num_triangles_*sizeof(float), cudaMemcpyDeviceToHost);
// std::cout << "Distances: ---------------------------------------------------" << std::endl;
// for (int i = 0; i < num_triangles_; ++i)
// std::cout << distances[i] << std::endl;
thrust::stable_sort (distances, distances+num_triangles_, thrust::less_equal<float> ());
float min_distance = distances[0];
free (distances);
cudaFree (dev_point);
cudaFree (dev_distances);
return (min_distance);
}
// Calculate the distance from a point to a triangle mesh.
__global__ void
distPointTriangle (const float * dev_point, const float * dev_triangles, const int * dev_num_triangles, float * dev_distances)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < dev_num_triangles[0])
{
if (isPointInTriangle (dev_point, &dev_triangles[tid*9]))
{
dev_distances[tid] = distPointPlane (dev_point, &dev_triangles[tid*9]);
}
else
{
dev_distances[tid] = distPointPlane (dev_point, &dev_triangles[tid*9]);
const float * dev_triangle = &dev_triangles[tid*9];
// Calculate the distance from the point to the vertices and segments.
float dev_distance_list[6];
float dev_vertex_0[3];
dev_vertex_0[0] = dev_triangle[0];
dev_vertex_0[1] = dev_triangle[1];
dev_vertex_0[2] = dev_triangle[2];
float dev_vertex_1[3];
dev_vertex_1[0] = dev_triangle[3];
dev_vertex_1[1] = dev_triangle[4];
dev_vertex_1[2] = dev_triangle[5];
float dev_vertex_2[3];
dev_vertex_2[0] = dev_triangle[6];
dev_vertex_2[1] = dev_triangle[7];
dev_vertex_2[2] = dev_triangle[8];
dev_distance_list[0] = distPointPoint (dev_point, dev_vertex_0);
dev_distance_list[1] = distPointPoint (dev_point, dev_vertex_1);
dev_distance_list[2] = distPointPoint (dev_point, dev_vertex_2);
float dev_segment_vertices[6];
dev_segment_vertices[0] = dev_triangle[0];
dev_segment_vertices[1] = dev_triangle[1];
dev_segment_vertices[2] = dev_triangle[2];
dev_segment_vertices[3] = dev_triangle[3];
dev_segment_vertices[4] = dev_triangle[4];
dev_segment_vertices[5] = dev_triangle[5];
dev_distance_list[3] = distPointLineSegment (dev_point, dev_segment_vertices);
dev_segment_vertices[3] = dev_triangle[6];
dev_segment_vertices[4] = dev_triangle[7];
dev_segment_vertices[5] = dev_triangle[8];
dev_distance_list[4] = distPointLineSegment (dev_point, dev_segment_vertices);
dev_segment_vertices[0] = dev_triangle[3];
dev_segment_vertices[1] = dev_triangle[4];
dev_segment_vertices[2] = dev_triangle[5];
dev_distance_list[5] = distPointLineSegment (dev_point, dev_segment_vertices);
float min_distance = dev_distance_list[0];
for (int i = 0; i < 6; ++i)
if (min_distance > dev_distance_list[i])
min_distance = dev_distance_list[i];
dev_distances[tid] = min_distance;
}
}
}
// Calculate the projected point on a plane.
__device__ void
pointProjectionOnPlane (const float * dev_point, const float * dev_triangle_vertices, float * dev_point_projection)
{
float vect_ab[3];
vect_ab[0] = dev_triangle_vertices[3] - dev_triangle_vertices[0];
vect_ab[1] = dev_triangle_vertices[4] - dev_triangle_vertices[1];
vect_ab[2] = dev_triangle_vertices[5] - dev_triangle_vertices[2];
float vect_ac[3];
vect_ac[0] = dev_triangle_vertices[6] - dev_triangle_vertices[0];
vect_ac[1] = dev_triangle_vertices[7] - dev_triangle_vertices[1];
vect_ac[2] = dev_triangle_vertices[8] - dev_triangle_vertices[2];
float normal[3];
normal[0] = -vect_ab[2]*vect_ac[1] + vect_ab[1]*vect_ac[2];
normal[1] = vect_ab[2]*vect_ac[0] - vect_ab[0]*vect_ac[2];
normal[2] = -vect_ab[1]*vect_ac[0] + vect_ab[0]*vect_ac[1];
float normal_norm = norm3df (normal[0], normal[1], normal[2]);
normal[0] /= normal_norm;
normal[1] /= normal_norm;
normal[2] /= normal_norm;
float vect_ap[3];
vect_ap[0] = dev_point[0] - dev_triangle_vertices[0];
vect_ap[1] = dev_point[1] - dev_triangle_vertices[1];
vect_ap[2] = dev_point[2] - dev_triangle_vertices[2];
float dot_ap_normal = vect_ap[0]*normal[0] + vect_ap[1]*normal[1] + vect_ap[2]*normal[2];
dev_point_projection[0] = vect_ap[0] - dot_ap_normal*normal[0];
dev_point_projection[1] = vect_ap[1] - dot_ap_normal*normal[1];
dev_point_projection[2] = vect_ap[2] - dot_ap_normal*normal[2];
}
// Detect if the projection of a point is inside the triangle.
__device__ bool
isPointInTriangle (const float * dev_point, const float * dev_triangle_vertices)
{
float vect_ab[3];
vect_ab[0] = dev_triangle_vertices[3] - dev_triangle_vertices[0];
vect_ab[1] = dev_triangle_vertices[4] - dev_triangle_vertices[1];
vect_ab[2] = dev_triangle_vertices[5] - dev_triangle_vertices[2];
float vect_ac[3];
vect_ac[0] = dev_triangle_vertices[6] - dev_triangle_vertices[0];
vect_ac[1] = dev_triangle_vertices[7] - dev_triangle_vertices[1];
vect_ac[2] = dev_triangle_vertices[8] - dev_triangle_vertices[2];
float vect_ap[3];
vect_ap[0] = dev_point[0] - dev_triangle_vertices[0];
vect_ap[1] = dev_point[1] - dev_triangle_vertices[1];
vect_ap[2] = dev_point[2] - dev_triangle_vertices[2];
float dot_ab_ab = vect_ab[0]*vect_ab[0] + vect_ab[1]*vect_ab[1] + vect_ab[2]*vect_ab[2];
float dot_ac_ac = vect_ac[0]*vect_ac[0] + vect_ac[1]*vect_ac[1] + vect_ac[2]*vect_ac[2];
float dot_ab_ac = vect_ab[0]*vect_ac[0] + vect_ab[1]*vect_ac[1] + vect_ab[2]*vect_ac[2];
float dot_ap_ab = vect_ap[0]*vect_ab[0] + vect_ap[1]*vect_ab[1] + vect_ap[2]*vect_ab[2];
float dot_ap_ac = vect_ap[0]*vect_ac[0] + vect_ap[1]*vect_ac[1] + vect_ap[2]*vect_ac[2];
float u = (dot_ac_ac*dot_ap_ab - dot_ab_ac*dot_ap_ac) / (dot_ab_ab*dot_ac_ac - dot_ab_ac*dot_ab_ac);
float v = (dot_ab_ab*dot_ap_ac - dot_ab_ac*dot_ap_ab) / (dot_ab_ab*dot_ac_ac - dot_ab_ac*dot_ab_ac);
return ((u >= 0.0) && (v >= 0.0) && (u + v <= 1.0));
}
// Calculate the distance from a point to another point.
__device__ float
distPointPoint (const float * dev_point_a, const float * dev_point_b)
{
return (norm3df (dev_point_b[0]-dev_point_a[0], dev_point_b[1]-dev_point_a[1], dev_point_b[2]-dev_point_a[2]));
}
// Calculate the distance from a point to a line segment.
__device__ float
distPointLineSegment (const float * dev_point, const float * dev_segment_vertices)
{
float vect_v[3];
vect_v[0] = dev_segment_vertices[3] - dev_segment_vertices[0];
vect_v[1] = dev_segment_vertices[4] - dev_segment_vertices[1];
vect_v[2] = dev_segment_vertices[5] - dev_segment_vertices[2];
float vect_w[3];
vect_w[0] = dev_point[0] - dev_segment_vertices[0];
vect_w[1] = dev_point[1] - dev_segment_vertices[1];
vect_w[2] = dev_point[2] - dev_segment_vertices[2];
float scalar_1 = vect_v[0]*vect_w[0] + vect_v[1]*vect_w[1] + vect_v[2]*vect_w[2];
float scalar_2 = vect_v[0]*vect_v[0] + vect_v[1]*vect_v[1] + vect_v[2]*vect_v[2];
if (scalar_1 <= 0.0)
{
// Projected point on the line is on the left of segmentVertices[0].
return (distPointPoint (dev_point, &dev_segment_vertices[0]));
}
else if (scalar_1 >= scalar_2)
{
// Projected point on the line is on the right of segmentVertices[1].
return (distPointPoint (dev_point, &dev_segment_vertices[3]));
}
else
{
// Projected point on the line is on the line segment.
float point_projection[3];
point_projection[0] = dev_segment_vertices[0] + scalar_1/scalar_2*vect_v[0];
point_projection[1] = dev_segment_vertices[1] + scalar_1/scalar_2*vect_v[1];
point_projection[2] = dev_segment_vertices[2] + scalar_1/scalar_2*vect_v[2];
return (distPointPoint (dev_point, point_projection));
}
}
// Calculate the distance from a point to a plane.
__device__ float
distPointPlane (const float * dev_point, const float * dev_triangle_vertices)
{
float vect_ab[3];
vect_ab[0] = dev_triangle_vertices[3] - dev_triangle_vertices[0];
vect_ab[1] = dev_triangle_vertices[4] - dev_triangle_vertices[1];
vect_ab[2] = dev_triangle_vertices[5] - dev_triangle_vertices[2];
float vect_ac[3];
vect_ac[0] = dev_triangle_vertices[6] - dev_triangle_vertices[0];
vect_ac[1] = dev_triangle_vertices[7] - dev_triangle_vertices[1];
vect_ac[2] = dev_triangle_vertices[8] - dev_triangle_vertices[2];
float normal[3];
normal[0] = -vect_ab[2]*vect_ac[1] + vect_ab[1]*vect_ac[2];
normal[1] = vect_ab[2]*vect_ac[0] - vect_ab[0]*vect_ac[2];
normal[2] = -vect_ab[1]*vect_ac[0] + vect_ab[0]*vect_ac[1];
float normal_norm = norm3df (normal[0], normal[1], normal[2]);
normal[0] /= normal_norm;
normal[1] /= normal_norm;
normal[2] /= normal_norm;
float vect_ap[3];
vect_ap[0] = dev_point[0] - dev_triangle_vertices[0];
vect_ap[1] = dev_point[1] - dev_triangle_vertices[1];
vect_ap[2] = dev_point[2] - dev_triangle_vertices[2];
float dot_ap_normal = vect_ap[0]*normal[0] + vect_ap[1]*normal[1] + vect_ap[2]*normal[2];
if (dot_ap_normal < 0.0)
dot_ap_normal = -dot_ap_normal;
return (fabsf (dot_ap_normal));
}
}
|
3002d138d7e49c7aba504834bcdb030f9979c159.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DEFINEGLOBALSHERE
#include "gmatrix.h"
void initialize_globals()
{
for(int i=0;i<MAX_DEVICE;i++) {
total_states[i]=0;
threads_per_block[i]=0;
dev_state_set[i]=0;
dev_cublas_set[i]=0;
};
currentDevice=0;
}
SEXP get_globals()
{
int deviceCount = 0;
int i;
hipGetDeviceCount(&deviceCount);
SEXP ret, ret_total_states,ret_threads_per_block,ret_dev_state_set,ret_dev_cublas_set,ret_currentDevice;
PROTECT(ret = allocVector(VECSXP, deviceCount));
PROTECT(ret_total_states = allocVector(INTSXP, deviceCount));
PROTECT(ret_threads_per_block = allocVector(INTSXP, deviceCount));
PROTECT(ret_dev_state_set = allocVector(INTSXP, deviceCount));
PROTECT(ret_dev_cublas_set = allocVector(INTSXP, deviceCount));
PROTECT(ret_currentDevice = allocVector(INTSXP, 1));
for(i=0;i<deviceCount;i++) {
INTEGER(ret_total_states)[i]=total_states[i];
INTEGER(ret_threads_per_block)[i]=threads_per_block[i];
INTEGER(ret_dev_state_set)[i]=dev_state_set[i];
INTEGER(ret_dev_cublas_set)[i]=dev_cublas_set[i];
}
INTEGER(ret_currentDevice)[0]=currentDevice;
SET_VECTOR_ELT(ret, 0, ret_total_states);
SET_VECTOR_ELT(ret, 1, ret_threads_per_block);
SET_VECTOR_ELT(ret, 2, ret_dev_state_set);
SET_VECTOR_ELT(ret, 3, ret_dev_cublas_set);
SET_VECTOR_ELT(ret, 4, ret_currentDevice);
/*GLOBAL int total_states[MAX_DEVICE];
GLOBAL hiprandState_t* dev_states[MAX_DEVICE];
GLOBAL int threads_per_block[MAX_DEVICE];
GLOBAL int dev_state_set[MAX_DEVICE];
GLOBAL int dev_cublas_set[MAX_DEVICE];
GLOBAL int currentDevice;*/
UNPROTECT(6);
return(ret);
}
SEXP get_device()
{
SEXP ret;
PROTECT(ret = allocVector(INTSXP, 1));
INTEGER(ret)[0]=currentDevice;
UNPROTECT(1);
return(ret);
}
void free_dev_states(int *silent)
{
hipError_t status1;
if(dev_state_set[currentDevice]==1) {
if(silent[0]==0)
Rprintf("Deleting old states on device %d.\n", currentDevice);
status1=hipFree((dev_states[currentDevice]));
if (status1 != hipSuccess ) {
error("CUDA memory free error in 'free_(dev_states[currentDevice]).' (%d) \n", (int) status1);
return;
}
}
}
void set_threads_per_block(int *tpb) {
threads_per_block[currentDevice]=tpb[0];
}
/*
void set_c(double *in_c)
{
c1=in_c[0];
c2=in_c[1];
c3=in_c[2];
}
void get_c(double *in_c)
{
in_c[0]=c1;
in_c[1]=c2;
in_c[2]=c3;
}
void set_(total_states[currentDevice])(int *in_(total_states[currentDevice]))
{
(total_states[currentDevice])=in_(total_states[currentDevice])[0];
}*/
/*
void check_started()
{
if(started==0L)
error("GPU device has not yet been selected. Please use listDevices() and setDevice() to select a divice.")
}*/
/* do some setup*/
__global__ void kernel_setup_curand(hiprandState_t *state, int seed, int n)
{
int id = threadIdx.x + blockIdx.x * blockDim.x ;
/* Each thread gets same seed , a different sequence number - no offset */
if(id<n)
hiprand_init(seed, id, 0, &state[id]) ;
}
SEXP setup_curand(SEXP in_total_states, SEXP in_seed, SEXP in_silent, SEXP in_force)
{ //check_started();
int my_total_states=INTEGER(in_total_states)[0];
int force = INTEGER(in_force)[0];
int silent = INTEGER(in_silent)[0];
int seed=INTEGER(in_seed)[0];
hipError_t cudaStat;
int doit;
if(force==1)
doit=1;
else if(dev_state_set[currentDevice]==0)
doit=1;
else if(total_states[currentDevice]!=my_total_states)
doit=1;
else doit=0;
if(doit==1) {
if(dev_state_set[currentDevice]==1) {
if(silent==0)
Rprintf("Deleting old states on device %d.\n", currentDevice);
if((dev_states[currentDevice])!=NULL) {
cudaStat=hipFree((dev_states[currentDevice]));
if (cudaStat != hipSuccess ) {
error("CUDA memory free error in 'setup_curand.' (%d) \n", (int) cudaStat);
}
}
}
total_states[currentDevice]=my_total_states;
if(silent==0)
Rprintf("Creating new states on device %d.\n", currentDevice);
/* Allocate space for prng states on device */
cudaStat = hipMalloc (( void **)&(dev_states[currentDevice]), (total_states[currentDevice])*sizeof(hiprandState_t));
if (cudaStat != hipSuccess ) {
error("Allocation error from 'setup_curand.' (%d)'\n", (int) cudaStat);
}
/* Setup prng states */
int blocksPerGrid = ((total_states[currentDevice]) + (threads_per_block[currentDevice]) - 1) / (threads_per_block[currentDevice]);
hipLaunchKernelGGL(( kernel_setup_curand), dim3(blocksPerGrid), dim3((threads_per_block[currentDevice])), 0, 0, (dev_states[currentDevice]), seed, (total_states[currentDevice]));
cudaStat = hipDeviceSynchronize();
if (cudaStat != hipSuccess ) {
error("Kernal error from 'setup_curand.' (%d)'\n", (int) cudaStat);
}
dev_state_set[currentDevice]=1;
}
return in_total_states;
}
void startCublas(int* silent) { // must be called with .C interface
hipblasStatus_t status1;
if(dev_cublas_set[currentDevice]==0) {
if(silent[0]==0)
Rprintf("Starting cublas on device %d.\n", currentDevice);
status1 = hipblasCreate(&(handle[currentDevice]));
if (status1 != HIPBLAS_STATUS_SUCCESS) {
error("CUBLAS initialization error\n");
}
dev_cublas_set[currentDevice]=1;
}
}
void stopCublas(int* silent) {
hipblasStatus_t status1;
//check_started();
if(dev_cublas_set[currentDevice]!=0) {
if(silent[0]==0)
Rprintf("Shutting down cublas on device %d", currentDevice);
status1 = hipblasDestroy((handle[currentDevice]));
if (status1 != HIPBLAS_STATUS_SUCCESS) {
warning("CUBLAS shutdown error\n");
}
}
}
/*
void RlistDevices(int* curdevice, int *memory, int *total, int *silent) {
int deviceCount = 0;
int i;
hipDeviceProp_t deviceProp;
hipGetDeviceCount(&deviceCount);
if(deviceCount>20)
error("to many devices to list.");
for(i=0;i<deviceCount;i++) {
hipGetDeviceProperties(&deviceProp, i);
memory[i]=deviceProp.totalGlobalMem ;
if(silent[0]==0) {
if(current[0]==i)
Rprintf("%d - \"%s\" (current device)\n", i, deviceProp.name);
else
Rprintf("%d - \"%s\"\n", i, deviceProp.name);
Rprintf(" Total global memory: %d\n", deviceProp.totalGlobalMem );
Rprintf(" Multiprocessor count: %d\n", deviceProp.multiProcessorCount);
}
}
}
*/
void setDevice(int *device, int *silent) {
hipError_t status1;
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
#ifdef DEBUG
Rprintf("%d %d",
deviceCount,device[0]);
#endif
if((device[0] < 0) || (device[0] > deviceCount))
error("The gpu id (%d) number is not valid.",device[0]);
#ifdef DEBUG
Rprintf("here");
#endif
status1 = hipSetDevice(device[0]);
if (status1 != hipSuccess) {
if(status1 == hipErrorSetOnActiveProcess)
error("Active process. Can't set device.\n");
else if(status1 == hipErrorInvalidDevice)
error("Invalid Device\n");
else
error("Unknown errors\n");
} else {
currentDevice=device[0];
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device[0]);
if(silent[0]==0)
Rprintf("Now using device %d - \"%s\"\n", device[0], deviceProp.name);
}
/*
GLOBAL __device__ int CUDA_R_Na_int;
GLOBAL __device__ double CUDA_R_Na_double;
GLOBAL __device__ float CUDA_R_Na_float;
R defines the following
void attribute_hidden InitArithmetic()
{
R_NaInt = INT_MIN;
R_NaN = 0.0/R_Zero_Hack;
R_NaReal = R_ValueOfNA();
R_PosInf = 1.0/R_Zero_Hack;
R_NegInf = -1.0/R_Zero_Hack;
}*/
float R_NaFloat = (float) R_NaReal;
hipMemcpyToSymbol(CUDA_R_Na_int, &R_NaInt, sizeof(int));
hipMemcpyToSymbol(CUDA_R_Na_float, &R_NaFloat, sizeof(float));
hipMemcpyToSymbol(CUDA_R_Na_double, &R_NaReal, sizeof(double));
}
void deviceReset() {
hipError_t cudaStat;
cudaStat=hipDeviceReset();
CUDA_ERROR;
}
void setFlagSpin() {
hipError_t cudaStat;
cudaStat= hipSetDeviceFlags(hipDeviceScheduleSpin);
CUDA_ERROR;
}
void setFlagYield() {
hipError_t cudaStat;
cudaStat= hipSetDeviceFlags(hipDeviceScheduleYield);
CUDA_ERROR;
}
void setFlagBlock() {
hipError_t cudaStat;
cudaStat= hipSetDeviceFlags(hipDeviceScheduleBlockingSync);
CUDA_ERROR;
}
/*
void d_PrintMatrix(double *d_matrix,int rows, int cols, int startRow, int stopRow) {
double *matrix = Calloc(rows*cols, double);
if (matrix == NULL ) {
Rprintf("d_PrintMatrix: Could not allocate memory.");
} else {
hipblasGetMatrix(rows, cols, sizeof(double), d_matrix, rows, matrix, rows);
PrintMatrix(matrix, rows, cols, startRow, stopRow);
Free(matrix);
}
}
void PrintMatrix(double matrix[], int rows, int cols, int startRow, int stopRow)
{
int r,c;
int row_stop= min(rows,stopRow);
Rprintf("Matrix is: %d x %d \n", rows, cols);
for(r=startRow;r<row_stop;r++) {
Rprintf("[%3d]", r);
for(c=0; c<cols;c++) {
// if( abs(matrix[c*rows + r]) > 100000)
// Rprintf("%1.10f ", matrix[c*rows + r]);
// else
Rprintf(" %e ", matrix[c*rows + r]);
}
Rprintf("\n");
}
}*/
void check_mem(int *freer, int *totr, int *silent) {
size_t free, total;
hipMemGetInfo(&free,&total);
if(silent[0]==0)
Rprintf("%d MB free out of %d MB total.\n",free/1048576,total/1048576);
freer[0]=free;
totr[0]=total;
//mem[0]=(int) free;
//mem[1]=(int) total;
}
SEXP get_device_info(SEXP property)
{
int deviceCount = 0;
int i;
hipDeviceProp_t deviceProp;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess)
{
error("hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id));
}
SEXP ret;
#define LOOK(MPROP,MDPROP) \
if(strcmp(CHAR(STRING_ELT(property, 0)), #MPROP) == 0) {\
PROTECT(ret = allocVector(INTSXP, deviceCount));\
for(i=0;i<deviceCount;i++) {\
hipGetDeviceProperties(&deviceProp, i);\
INTEGER(ret)[i] = deviceProp.MDPROP ;\
}\
}
if(strcmp(CHAR(STRING_ELT(property, 0)), "name") == 0) {
PROTECT(ret = allocVector(STRSXP, deviceCount));
for(i=0;i<deviceCount;i++) {
hipGetDeviceProperties(&deviceProp, i);
SET_STRING_ELT(ret, i, mkChar(deviceProp.name));
}
} else LOOK(totalGlobalMem,totalGlobalMem)
else LOOK(sharedMemPerBlock,sharedMemPerBlock)
else LOOK(regsPerBlock,regsPerBlock)
else LOOK(warpSize,warpSize)
else LOOK(memPitch,memPitch)
else LOOK(maxThreadsPerBlock,maxThreadsPerBlock)
else LOOK(maxThreadsDim0,maxThreadsDim[0])
else LOOK(maxThreadsDim1,maxThreadsDim[1])
else LOOK(maxThreadsDim2,maxThreadsDim[2])
else LOOK(maxGridSize0,maxGridSize[0])
else LOOK(maxGridSize1,maxGridSize[1])
else LOOK(maxGridSize2,maxGridSize[2])
else LOOK(clockRate,clockRate)
else LOOK(totalConstMem,totalConstMem)
else LOOK(major,major)
else LOOK(minor,minor)
else LOOK(textureAlignment,textureAlignment)
else LOOK(deviceOverlap,deviceOverlap)
else LOOK(multiProcessorCount,multiProcessorCount)
else LOOK(kernelExecTimeoutEnabled,kernelExecTimeoutEnabled)
else LOOK(integrated,integrated)
else LOOK(canMapHostMemory,canMapHostMemory)
else LOOK(computeMode,computeMode)
else LOOK(maxTexture1D,maxTexture1D)
else LOOK(maxTexture2D0,maxTexture2D[0])
else LOOK(maxTexture2D1,maxTexture2D[1])
else LOOK(maxTexture3D0,maxTexture3D[0])
else LOOK(maxTexture3D1,maxTexture3D[1])
else LOOK(maxTexture3D2,maxTexture3D[2])
else LOOK(maxTexture1DLayered0,maxTexture1DLayered[0])
else LOOK(maxTexture1DLayered1,maxTexture1DLayered[1])
else LOOK(maxTexture2DLayered0,maxTexture2DLayered[0])
else LOOK(maxTexture2DLayered1,maxTexture2DLayered[1])
else LOOK(maxTexture2DLayered2,maxTexture2DLayered[2])
else LOOK(surfaceAlignment,surfaceAlignment)
else LOOK(concurrentKernels,concurrentKernels)
else LOOK(ECCEnabled,ECCEnabled)
else LOOK(pciBusID,pciBusID)
else LOOK(pciDeviceID,pciDeviceID)
else LOOK(pciDomainID,pciDomainID)
else LOOK(tccDriver,tccDriver)
else LOOK(asyncEngineCount,asyncEngineCount)
else LOOK(unifiedAddressing,unifiedAddressing)
else LOOK(memoryClockRate,memoryClockRate)
else LOOK(memoryBusWidth,memoryBusWidth)
else LOOK(l2CacheSize,l2CacheSize)
else LOOK(maxThreadsPerMultiProcessor,maxThreadsPerMultiProcessor)
else
error("Property not recognized.");
UNPROTECT(1);
return(ret);
}
//void call_gc() {
// SEXP s, t;
// PROTECT(t1 = s1 = allocList(2));
// PROTECT(t2 = s2 = allocList(3));
//
// SET_TYPEOF(s1, LANGSXP);
// SET_TYPEOF(s2, LANGSXP);
//
// SETCAR(t1, install(".Internal")); t1 = CDR(t1);
// SETCAR(t2, install("gc")); t2 = CDR(t2);
//
// SETCAR(t, ScalarInteger(digits));
// SET_TAG(t, install("digits"));
// eval(s, env);
// UNPROTECT(1);
//}
/*void
R_init_mylib(DllInfo *info)
{
Rprintf("Starting cublas...");
startCublas();
}
void
R_unload_mylib(DllInfo *info)
{
Rprintf("Stoping cublas...");
stopCublas();
}
*/
| 3002d138d7e49c7aba504834bcdb030f9979c159.cu |
#define DEFINEGLOBALSHERE
#include "gmatrix.h"
void initialize_globals()
{
for(int i=0;i<MAX_DEVICE;i++) {
total_states[i]=0;
threads_per_block[i]=0;
dev_state_set[i]=0;
dev_cublas_set[i]=0;
};
currentDevice=0;
}
SEXP get_globals()
{
int deviceCount = 0;
int i;
cudaGetDeviceCount(&deviceCount);
SEXP ret, ret_total_states,ret_threads_per_block,ret_dev_state_set,ret_dev_cublas_set,ret_currentDevice;
PROTECT(ret = allocVector(VECSXP, deviceCount));
PROTECT(ret_total_states = allocVector(INTSXP, deviceCount));
PROTECT(ret_threads_per_block = allocVector(INTSXP, deviceCount));
PROTECT(ret_dev_state_set = allocVector(INTSXP, deviceCount));
PROTECT(ret_dev_cublas_set = allocVector(INTSXP, deviceCount));
PROTECT(ret_currentDevice = allocVector(INTSXP, 1));
for(i=0;i<deviceCount;i++) {
INTEGER(ret_total_states)[i]=total_states[i];
INTEGER(ret_threads_per_block)[i]=threads_per_block[i];
INTEGER(ret_dev_state_set)[i]=dev_state_set[i];
INTEGER(ret_dev_cublas_set)[i]=dev_cublas_set[i];
}
INTEGER(ret_currentDevice)[0]=currentDevice;
SET_VECTOR_ELT(ret, 0, ret_total_states);
SET_VECTOR_ELT(ret, 1, ret_threads_per_block);
SET_VECTOR_ELT(ret, 2, ret_dev_state_set);
SET_VECTOR_ELT(ret, 3, ret_dev_cublas_set);
SET_VECTOR_ELT(ret, 4, ret_currentDevice);
/*GLOBAL int total_states[MAX_DEVICE];
GLOBAL curandState* dev_states[MAX_DEVICE];
GLOBAL int threads_per_block[MAX_DEVICE];
GLOBAL int dev_state_set[MAX_DEVICE];
GLOBAL int dev_cublas_set[MAX_DEVICE];
GLOBAL int currentDevice;*/
UNPROTECT(6);
return(ret);
}
SEXP get_device()
{
SEXP ret;
PROTECT(ret = allocVector(INTSXP, 1));
INTEGER(ret)[0]=currentDevice;
UNPROTECT(1);
return(ret);
}
void free_dev_states(int *silent)
{
cudaError_t status1;
if(dev_state_set[currentDevice]==1) {
if(silent[0]==0)
Rprintf("Deleting old states on device %d.\n", currentDevice);
status1=cudaFree((dev_states[currentDevice]));
if (status1 != cudaSuccess ) {
error("CUDA memory free error in 'free_(dev_states[currentDevice]).' (%d) \n", (int) status1);
return;
}
}
}
void set_threads_per_block(int *tpb) {
threads_per_block[currentDevice]=tpb[0];
}
/*
void set_c(double *in_c)
{
c1=in_c[0];
c2=in_c[1];
c3=in_c[2];
}
void get_c(double *in_c)
{
in_c[0]=c1;
in_c[1]=c2;
in_c[2]=c3;
}
void set_(total_states[currentDevice])(int *in_(total_states[currentDevice]))
{
(total_states[currentDevice])=in_(total_states[currentDevice])[0];
}*/
/*
void check_started()
{
if(started==0L)
error("GPU device has not yet been selected. Please use listDevices() and setDevice() to select a divice.")
}*/
/* do some setup*/
__global__ void kernel_setup_curand(curandState *state, int seed, int n)
{
int id = threadIdx.x + blockIdx.x * blockDim.x ;
/* Each thread gets same seed , a different sequence number - no offset */
if(id<n)
curand_init(seed, id, 0, &state[id]) ;
}
SEXP setup_curand(SEXP in_total_states, SEXP in_seed, SEXP in_silent, SEXP in_force)
{ //check_started();
int my_total_states=INTEGER(in_total_states)[0];
int force = INTEGER(in_force)[0];
int silent = INTEGER(in_silent)[0];
int seed=INTEGER(in_seed)[0];
cudaError_t cudaStat;
int doit;
if(force==1)
doit=1;
else if(dev_state_set[currentDevice]==0)
doit=1;
else if(total_states[currentDevice]!=my_total_states)
doit=1;
else doit=0;
if(doit==1) {
if(dev_state_set[currentDevice]==1) {
if(silent==0)
Rprintf("Deleting old states on device %d.\n", currentDevice);
if((dev_states[currentDevice])!=NULL) {
cudaStat=cudaFree((dev_states[currentDevice]));
if (cudaStat != cudaSuccess ) {
error("CUDA memory free error in 'setup_curand.' (%d) \n", (int) cudaStat);
}
}
}
total_states[currentDevice]=my_total_states;
if(silent==0)
Rprintf("Creating new states on device %d.\n", currentDevice);
/* Allocate space for prng states on device */
cudaStat = cudaMalloc (( void **)&(dev_states[currentDevice]), (total_states[currentDevice])*sizeof(curandState));
if (cudaStat != cudaSuccess ) {
error("Allocation error from 'setup_curand.' (%d)'\n", (int) cudaStat);
}
/* Setup prng states */
int blocksPerGrid = ((total_states[currentDevice]) + (threads_per_block[currentDevice]) - 1) / (threads_per_block[currentDevice]);
kernel_setup_curand<<<blocksPerGrid, (threads_per_block[currentDevice])>>>((dev_states[currentDevice]), seed, (total_states[currentDevice]));
cudaStat = cudaDeviceSynchronize();
if (cudaStat != cudaSuccess ) {
error("Kernal error from 'setup_curand.' (%d)'\n", (int) cudaStat);
}
dev_state_set[currentDevice]=1;
}
return in_total_states;
}
void startCublas(int* silent) { // must be called with .C interface
cublasStatus_t status1;
if(dev_cublas_set[currentDevice]==0) {
if(silent[0]==0)
Rprintf("Starting cublas on device %d.\n", currentDevice);
status1 = cublasCreate(&(handle[currentDevice]));
if (status1 != CUBLAS_STATUS_SUCCESS) {
error("CUBLAS initialization error\n");
}
dev_cublas_set[currentDevice]=1;
}
}
void stopCublas(int* silent) {
cublasStatus_t status1;
//check_started();
if(dev_cublas_set[currentDevice]!=0) {
if(silent[0]==0)
Rprintf("Shutting down cublas on device %d", currentDevice);
status1 = cublasDestroy((handle[currentDevice]));
if (status1 != CUBLAS_STATUS_SUCCESS) {
warning("CUBLAS shutdown error\n");
}
}
}
/*
void RlistDevices(int* curdevice, int *memory, int *total, int *silent) {
int deviceCount = 0;
int i;
cudaDeviceProp deviceProp;
cudaGetDeviceCount(&deviceCount);
if(deviceCount>20)
error("to many devices to list.");
for(i=0;i<deviceCount;i++) {
cudaGetDeviceProperties(&deviceProp, i);
memory[i]=deviceProp.totalGlobalMem ;
if(silent[0]==0) {
if(current[0]==i)
Rprintf("%d - \"%s\" (current device)\n", i, deviceProp.name);
else
Rprintf("%d - \"%s\"\n", i, deviceProp.name);
Rprintf(" Total global memory: %d\n", deviceProp.totalGlobalMem );
Rprintf(" Multiprocessor count: %d\n", deviceProp.multiProcessorCount);
}
}
}
*/
void setDevice(int *device, int *silent) {
cudaError_t status1;
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
#ifdef DEBUG
Rprintf("%d %d",
deviceCount,device[0]);
#endif
if((device[0] < 0) || (device[0] > deviceCount))
error("The gpu id (%d) number is not valid.",device[0]);
#ifdef DEBUG
Rprintf("here");
#endif
status1 = cudaSetDevice(device[0]);
if (status1 != cudaSuccess) {
if(status1 == cudaErrorSetOnActiveProcess)
error("Active process. Can't set device.\n");
else if(status1 == cudaErrorInvalidDevice)
error("Invalid Device\n");
else
error("Unknown errors\n");
} else {
currentDevice=device[0];
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device[0]);
if(silent[0]==0)
Rprintf("Now using device %d - \"%s\"\n", device[0], deviceProp.name);
}
/*
GLOBAL __device__ int CUDA_R_Na_int;
GLOBAL __device__ double CUDA_R_Na_double;
GLOBAL __device__ float CUDA_R_Na_float;
R defines the following
void attribute_hidden InitArithmetic()
{
R_NaInt = INT_MIN;
R_NaN = 0.0/R_Zero_Hack;
R_NaReal = R_ValueOfNA();
R_PosInf = 1.0/R_Zero_Hack;
R_NegInf = -1.0/R_Zero_Hack;
}*/
float R_NaFloat = (float) R_NaReal;
cudaMemcpyToSymbol(CUDA_R_Na_int, &R_NaInt, sizeof(int));
cudaMemcpyToSymbol(CUDA_R_Na_float, &R_NaFloat, sizeof(float));
cudaMemcpyToSymbol(CUDA_R_Na_double, &R_NaReal, sizeof(double));
}
void deviceReset() {
cudaError_t cudaStat;
cudaStat=cudaDeviceReset();
CUDA_ERROR;
}
void setFlagSpin() {
cudaError_t cudaStat;
cudaStat= cudaSetDeviceFlags(cudaDeviceScheduleSpin);
CUDA_ERROR;
}
void setFlagYield() {
cudaError_t cudaStat;
cudaStat= cudaSetDeviceFlags(cudaDeviceScheduleYield);
CUDA_ERROR;
}
void setFlagBlock() {
cudaError_t cudaStat;
cudaStat= cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync);
CUDA_ERROR;
}
/*
void d_PrintMatrix(double *d_matrix,int rows, int cols, int startRow, int stopRow) {
double *matrix = Calloc(rows*cols, double);
if (matrix == NULL ) {
Rprintf("d_PrintMatrix: Could not allocate memory.");
} else {
cublasGetMatrix(rows, cols, sizeof(double), d_matrix, rows, matrix, rows);
PrintMatrix(matrix, rows, cols, startRow, stopRow);
Free(matrix);
}
}
void PrintMatrix(double matrix[], int rows, int cols, int startRow, int stopRow)
{
int r,c;
int row_stop= min(rows,stopRow);
Rprintf("Matrix is: %d x %d \n", rows, cols);
for(r=startRow;r<row_stop;r++) {
Rprintf("[%3d]", r);
for(c=0; c<cols;c++) {
// if( abs(matrix[c*rows + r]) > 100000)
// Rprintf("%1.10f ", matrix[c*rows + r]);
// else
Rprintf(" %e ", matrix[c*rows + r]);
}
Rprintf("\n");
}
}*/
void check_mem(int *freer, int *totr, int *silent) {
size_t free, total;
cudaMemGetInfo(&free,&total);
if(silent[0]==0)
Rprintf("%d MB free out of %d MB total.\n",free/1048576,total/1048576);
freer[0]=free;
totr[0]=total;
//mem[0]=(int) free;
//mem[1]=(int) total;
}
SEXP get_device_info(SEXP property)
{
int deviceCount = 0;
int i;
cudaDeviceProp deviceProp;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess)
{
error("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
}
SEXP ret;
#define LOOK(MPROP,MDPROP) \
if(strcmp(CHAR(STRING_ELT(property, 0)), #MPROP) == 0) {\
PROTECT(ret = allocVector(INTSXP, deviceCount));\
for(i=0;i<deviceCount;i++) {\
cudaGetDeviceProperties(&deviceProp, i);\
INTEGER(ret)[i] = deviceProp.MDPROP ;\
}\
}
if(strcmp(CHAR(STRING_ELT(property, 0)), "name") == 0) {
PROTECT(ret = allocVector(STRSXP, deviceCount));
for(i=0;i<deviceCount;i++) {
cudaGetDeviceProperties(&deviceProp, i);
SET_STRING_ELT(ret, i, mkChar(deviceProp.name));
}
} else LOOK(totalGlobalMem,totalGlobalMem)
else LOOK(sharedMemPerBlock,sharedMemPerBlock)
else LOOK(regsPerBlock,regsPerBlock)
else LOOK(warpSize,warpSize)
else LOOK(memPitch,memPitch)
else LOOK(maxThreadsPerBlock,maxThreadsPerBlock)
else LOOK(maxThreadsDim0,maxThreadsDim[0])
else LOOK(maxThreadsDim1,maxThreadsDim[1])
else LOOK(maxThreadsDim2,maxThreadsDim[2])
else LOOK(maxGridSize0,maxGridSize[0])
else LOOK(maxGridSize1,maxGridSize[1])
else LOOK(maxGridSize2,maxGridSize[2])
else LOOK(clockRate,clockRate)
else LOOK(totalConstMem,totalConstMem)
else LOOK(major,major)
else LOOK(minor,minor)
else LOOK(textureAlignment,textureAlignment)
else LOOK(deviceOverlap,deviceOverlap)
else LOOK(multiProcessorCount,multiProcessorCount)
else LOOK(kernelExecTimeoutEnabled,kernelExecTimeoutEnabled)
else LOOK(integrated,integrated)
else LOOK(canMapHostMemory,canMapHostMemory)
else LOOK(computeMode,computeMode)
else LOOK(maxTexture1D,maxTexture1D)
else LOOK(maxTexture2D0,maxTexture2D[0])
else LOOK(maxTexture2D1,maxTexture2D[1])
else LOOK(maxTexture3D0,maxTexture3D[0])
else LOOK(maxTexture3D1,maxTexture3D[1])
else LOOK(maxTexture3D2,maxTexture3D[2])
else LOOK(maxTexture1DLayered0,maxTexture1DLayered[0])
else LOOK(maxTexture1DLayered1,maxTexture1DLayered[1])
else LOOK(maxTexture2DLayered0,maxTexture2DLayered[0])
else LOOK(maxTexture2DLayered1,maxTexture2DLayered[1])
else LOOK(maxTexture2DLayered2,maxTexture2DLayered[2])
else LOOK(surfaceAlignment,surfaceAlignment)
else LOOK(concurrentKernels,concurrentKernels)
else LOOK(ECCEnabled,ECCEnabled)
else LOOK(pciBusID,pciBusID)
else LOOK(pciDeviceID,pciDeviceID)
else LOOK(pciDomainID,pciDomainID)
else LOOK(tccDriver,tccDriver)
else LOOK(asyncEngineCount,asyncEngineCount)
else LOOK(unifiedAddressing,unifiedAddressing)
else LOOK(memoryClockRate,memoryClockRate)
else LOOK(memoryBusWidth,memoryBusWidth)
else LOOK(l2CacheSize,l2CacheSize)
else LOOK(maxThreadsPerMultiProcessor,maxThreadsPerMultiProcessor)
else
error("Property not recognized.");
UNPROTECT(1);
return(ret);
}
//void call_gc() {
// SEXP s, t;
// PROTECT(t1 = s1 = allocList(2));
// PROTECT(t2 = s2 = allocList(3));
//
// SET_TYPEOF(s1, LANGSXP);
// SET_TYPEOF(s2, LANGSXP);
//
// SETCAR(t1, install(".Internal")); t1 = CDR(t1);
// SETCAR(t2, install("gc")); t2 = CDR(t2);
//
// SETCAR(t, ScalarInteger(digits));
// SET_TAG(t, install("digits"));
// eval(s, env);
// UNPROTECT(1);
//}
/*void
R_init_mylib(DllInfo *info)
{
Rprintf("Starting cublas...");
startCublas();
}
void
R_unload_mylib(DllInfo *info)
{
Rprintf("Stoping cublas...");
stopCublas();
}
*/
|
8b05b64760a64759c9925cabcde0c916f571c351.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "cpu_bitmap.h"
#include "util.h"
#include "math.h"
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float r,b,g;
float radius;
float x,y,z;
__device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
*n = dz / sqrtf( radius * radius );
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__global__ void kernel( Sphere *s, unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
// t -> distance to screen
float t = s[i].hit( ox, oy, &n );
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
Sphere *s;
};
int main( void ) {
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
Sphere *s;
// allocate memory on the GPU for the output bitmap
CUDA_CHECK_RETURN( hipMalloc( (void**)&dev_bitmap,
bitmap.image_size() ) );
// allocate memory for the Sphere dataset
CUDA_CHECK_RETURN( hipMalloc( (void**)&s,
sizeof(Sphere) * SPHERES ) );
// allocate temp memory, initialize it, copy to
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
CUDA_CHECK_RETURN( hipMemcpy( s, temp_s,
sizeof(Sphere) * SPHERES,
hipMemcpyHostToDevice ) );
free( temp_s );
// generate a bitmap from our sphere data
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
hipEvent_t beginEvent;
hipEvent_t endEvent;
hipEventCreate(&beginEvent);
hipEventCreate(&endEvent);
hipEventRecord(beginEvent, 0);
hipLaunchKernelGGL(( kernel), dim3(grids),dim3(threads), 0, 0, s, dev_bitmap );
hipEventRecord(endEvent, 0);
hipEventSynchronize(endEvent);
float gpu_processing = 0;
hipEventElapsedTime(&gpu_processing, beginEvent, endEvent);
hipEventDestroy(beginEvent);
hipEventDestroy(endEvent);
printf("Tempo gasto [GPU]: %lf (ms) \n", gpu_processing);
// copy our bitmap back from the GPU for display
CUDA_CHECK_RETURN( hipMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
hipMemcpyDeviceToHost ) );
CUDA_CHECK_RETURN( hipFree( dev_bitmap ) );
CUDA_CHECK_RETURN( hipFree( s ) );
// display
bitmap.display_and_exit();
}
| 8b05b64760a64759c9925cabcde0c916f571c351.cu | #include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
#include "cpu_bitmap.h"
#include "util.h"
#include "math.h"
#define DIM 1024
#define rnd( x ) (x * rand() / RAND_MAX)
#define INF 2e10f
struct Sphere {
float r,b,g;
float radius;
float x,y,z;
__device__ float hit( float ox, float oy, float *n ) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf( radius*radius - dx*dx - dy*dy );
*n = dz / sqrtf( radius * radius );
return dz + z;
}
return -INF;
}
};
#define SPHERES 20
__global__ void kernel( Sphere *s, unsigned char *ptr ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM/2);
float oy = (y - DIM/2);
float r=0, g=0, b=0;
float maxz = -INF;
for(int i=0; i<SPHERES; i++) {
float n;
// t -> distance to screen
float t = s[i].hit( ox, oy, &n );
if (t > maxz) {
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
// globals needed by the update routine
struct DataBlock {
unsigned char *dev_bitmap;
Sphere *s;
};
int main( void ) {
DataBlock data;
CPUBitmap bitmap( DIM, DIM, &data );
unsigned char *dev_bitmap;
Sphere *s;
// allocate memory on the GPU for the output bitmap
CUDA_CHECK_RETURN( cudaMalloc( (void**)&dev_bitmap,
bitmap.image_size() ) );
// allocate memory for the Sphere dataset
CUDA_CHECK_RETURN( cudaMalloc( (void**)&s,
sizeof(Sphere) * SPHERES ) );
// allocate temp memory, initialize it, copy to
// memory on the GPU, then free our temp memory
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = rnd( 1.0f );
temp_s[i].g = rnd( 1.0f );
temp_s[i].b = rnd( 1.0f );
temp_s[i].x = rnd( 1000.0f ) - 500;
temp_s[i].y = rnd( 1000.0f ) - 500;
temp_s[i].z = rnd( 1000.0f ) - 500;
temp_s[i].radius = rnd( 100.0f ) + 20;
}
CUDA_CHECK_RETURN( cudaMemcpy( s, temp_s,
sizeof(Sphere) * SPHERES,
cudaMemcpyHostToDevice ) );
free( temp_s );
// generate a bitmap from our sphere data
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
cudaEvent_t beginEvent;
cudaEvent_t endEvent;
cudaEventCreate(&beginEvent);
cudaEventCreate(&endEvent);
cudaEventRecord(beginEvent, 0);
kernel<<<grids,threads>>>( s, dev_bitmap );
cudaEventRecord(endEvent, 0);
cudaEventSynchronize(endEvent);
float gpu_processing = 0;
cudaEventElapsedTime(&gpu_processing, beginEvent, endEvent);
cudaEventDestroy(beginEvent);
cudaEventDestroy(endEvent);
printf("Tempo gasto [GPU]: %lf (ms) \n", gpu_processing);
// copy our bitmap back from the GPU for display
CUDA_CHECK_RETURN( cudaMemcpy( bitmap.get_ptr(), dev_bitmap,
bitmap.image_size(),
cudaMemcpyDeviceToHost ) );
CUDA_CHECK_RETURN( cudaFree( dev_bitmap ) );
CUDA_CHECK_RETURN( cudaFree( s ) );
// display
bitmap.display_and_exit();
}
|
74a4904f11b413516934f90c705f36fb93927105.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstdlib>
#include <stdio.h>
#include <ctime>
#include <clocale>
hipError_t powWithCuda(unsigned int *c, const int *a, unsigned int size);
// GPU
__global__ void powKernel(unsigned int *c, const int *a)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] * a[i];
}
//
int main()
{
setlocale(LC_CTYPE, "rus");
const int arraySize = 8000;
int a[arraySize] = { 0 };
unsigned int c[arraySize] = { 0 };
for (int i = 0; i < arraySize; i++)
{
a[i] = i + 1;
}
srand(time(0));
// Add vectors in parallel.
hipError_t cudaStatus = powWithCuda(c, a, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "powWithCuda failed!");
return 1;
}
clock_t device_time = clock();
printf(" \n\n");
printf(" 10 5 %d : \n\n", arraySize);
printf("{1,2,3,4,5,6,7,8,9,10} = \n{%d,%d,%d,%d,%d,%d,%d,%d,%d,%d}\n\n",
c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7], c[8], c[9]);
printf("{%d,%d,%d,%d,%d} = \n{%d,%d,%d,%d,%d}\n",
a[arraySize - 5], a[arraySize - 4], a[arraySize - 3], a[arraySize - 2], a[arraySize - 1],
c[arraySize - 5], c[arraySize - 4], c[arraySize - 3], c[arraySize - 2], c[arraySize - 1]);
printf("\n*******************************************\n\n");
//
srand(time(0));
for (int i = 0; i < arraySize; i++)
{
c[i] = a[i] * a[i];
}
clock_t host_time = clock();
printf(" : %d \n", device_time * 1000);
printf(" : %d \n", host_time * 1000);
printf(" : %d \n", (host_time - device_time) * 1000);
printf("\n*******************************************\n\n");
printf(" : \n\n");
int deviceCount;
hipGetDeviceCount(&deviceCount);
for (int device = 0; device < deviceCount; device++) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printf(" : %d\n", device);
printf(" : %s\n", deviceProp.name);
printf(" : %d\n", deviceProp.totalGlobalMem);
printf(" shared- : %d\n", deviceProp.sharedMemPerBlock);
printf(" : %d\n", deviceProp.regsPerBlock);
printf(" warp'a: %d\n", deviceProp.warpSize);
printf(" : %d\n", deviceProp.memPitch);
printf(" : %d\n", deviceProp.maxThreadsPerBlock);
printf(" : x = %d, y = %d, z = %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" : x = %d, y = %d, z = %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" : %d\n", deviceProp.clockRate);
printf(" : %d\n", deviceProp.totalConstMem);
printf(" : %d.%d\n", deviceProp.major, deviceProp.minor);
printf(" : %d\n", deviceProp.textureAlignment);
printf(" : %d\n\n", deviceProp.multiProcessorCount);
}
printf("\n*******************************************\n\n");
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t powWithCuda(unsigned int *c, const int *a, unsigned int size)
{
int *dev_a = 0; // dev - GPU
unsigned int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0); // , "0"- , ..
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(unsigned int)); //
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); //
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); // GPU
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
dim3 block(512, 1);
dim3 grid((size / 512 + 1), 1);
powKernel << <grid, block >> > (dev_c, dev_a); // (size - )
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "mulKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching mulKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(unsigned int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
return cudaStatus;
}
| 74a4904f11b413516934f90c705f36fb93927105.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdlib>
#include <stdio.h>
#include <ctime>
#include <clocale>
cudaError_t powWithCuda(unsigned int *c, const int *a, unsigned int size);
// Точка входа в GPU
__global__ void powKernel(unsigned int *c, const int *a)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] * a[i];
}
// Точка входа в приложение
int main()
{
setlocale(LC_CTYPE, "rus");
const int arraySize = 8000;
int a[arraySize] = { 0 };
unsigned int c[arraySize] = { 0 };
for (int i = 0; i < arraySize; i++)
{
a[i] = i + 1;
}
srand(time(0));
// Add vectors in parallel.
cudaError_t cudaStatus = powWithCuda(c, a, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "powWithCuda failed!");
return 1;
}
clock_t device_time = clock();
printf("Задача задать массив чисел и вычислить их квадраты \n\n");
printf("Вывод первых 10 и последних 5 результатов возведения массива чисел в квадрат состоящего из %d элементов: \n\n", arraySize);
printf("{1,2,3,4,5,6,7,8,9,10} = \n{%d,%d,%d,%d,%d,%d,%d,%d,%d,%d}\n\n",
c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7], c[8], c[9]);
printf("{%d,%d,%d,%d,%d} = \n{%d,%d,%d,%d,%d}\n",
a[arraySize - 5], a[arraySize - 4], a[arraySize - 3], a[arraySize - 2], a[arraySize - 1],
c[arraySize - 5], c[arraySize - 4], c[arraySize - 3], c[arraySize - 2], c[arraySize - 1]);
printf("\n*******************************************\n\n");
//Расчёт на хосте
srand(time(0));
for (int i = 0; i < arraySize; i++)
{
c[i] = a[i] * a[i];
}
clock_t host_time = clock();
printf("Время работы на устройстве составило: %d мкс \n", device_time * 1000);
printf("Время работы на хосте составило: %d мкс \n", host_time * 1000);
printf("Выигрыш времени на устройстве составил: %d мкс \n", (host_time - device_time) * 1000);
printf("\n*******************************************\n\n");
printf("Основные данные по устройству: \n\n");
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for (int device = 0; device < deviceCount; device++) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Номер устройства: %d\n", device);
printf("Имя устройства: %s\n", deviceProp.name);
printf("Объем глобальной памяти: %d\n", deviceProp.totalGlobalMem);
printf("Объем shared-памяти в блоке : %d\n", deviceProp.sharedMemPerBlock);
printf("Объем регистровой памяти: %d\n", deviceProp.regsPerBlock);
printf("Размер warp'a: %d\n", deviceProp.warpSize);
printf("Размер шага памяти: %d\n", deviceProp.memPitch);
printf("Макс количество потоков в блоке: %d\n", deviceProp.maxThreadsPerBlock);
printf("Максимальная размерность потока: x = %d, y = %d, z = %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf("Максимальный размер сетки: x = %d, y = %d, z = %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf("Тактовая частота: %d\n", deviceProp.clockRate);
printf("Общий объем константной памяти: %d\n", deviceProp.totalConstMem);
printf("Вычислительная мощность: %d.%d\n", deviceProp.major, deviceProp.minor);
printf("Величина текстурного выравнивания : %d\n", deviceProp.textureAlignment);
printf("Количество процессоров: %d\n\n", deviceProp.multiProcessorCount);
}
printf("\n*******************************************\n\n");
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t powWithCuda(unsigned int *c, const int *a, unsigned int size)
{
int *dev_a = 0; // dev - находится на GPU
unsigned int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0); // указываем, что работаем на "0"-й карте, м.б. несколько
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(unsigned int)); // выделяем память на переменную
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); // выделяем память на переменную
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); // копируем значения переменной с хоста на GPU
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
dim3 block(512, 1);
dim3 grid((size / 512 + 1), 1);
powKernel << <grid, block >> > (dev_c, dev_a); // запуск функции с параметрами (size - размер массива)
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "mulKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching mulKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(unsigned int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
return cudaStatus;
}
|
d9d70b01d0db33f1687d56b31f2e60f1edcd1e2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void mykernel(void)
{
} | d9d70b01d0db33f1687d56b31f2e60f1edcd1e2b.cu | #include "includes.h"
__global__ void mykernel(void)
{
} |
3d01c04158fd9f8eb6df53e61f1aa94150481e17.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
*/
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include "THHDeviceTensor.cuh"
//#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename Dtype>
__global__ void compute_flow_kernel(
const int nthreads,
THCDeviceTensor<Dtype, 2> boxes,
THCDeviceTensor<Dtype, 4> output/*,
const int height,
const int width*/) {
int N = boxes.getSize(0);
int H = output.getSize(1);
int W = output.getSize(2);
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int n = index % N;
const int h = (index / N) % H;
const int w = (index / (N * H)) % W;
// int x0 = ScalarConvert<Dtype, int>::to(boxes[n][0]);
// int y0 = ScalarConvert<Dtype, int>::to(boxes[n][1]);
// int x1 = ScalarConvert<Dtype, int>::to(boxes[n][2]);
// int y1 = ScalarConvert<Dtype, int>::to(boxes[n][3]);
int x0 = int(boxes[n][0]);
int y0 = int(boxes[n][1]);
int x1 = int(boxes[n][2]);
int y1 = int(boxes[n][3]);
if ((w < x0) || (h < y0) || (w >= x1) || (h >= y1)) {
output[n][h][w][0] = Dtype(-2);
output[n][h][w][1] = Dtype(-2);
continue;
}
int TO_REMOVE = 1;
int box_width = x1 - x0 + TO_REMOVE;
int box_height = y1 - y0 + TO_REMOVE;
int xx0 = max(x0, 0);
int yy0 = max(y0, 0);
//int xx1 = ::min(x1, width);
//int yy1 = ::min(y1, height);
int lx = w - xx0;
int ly = h - yy0;
Dtype x = 2.f / (Dtype(box_width - 1)) * lx - 1.f;
Dtype y = 2.f / (Dtype(box_height - 1)) * ly - 1.f;
// get the corresponding input x, y co-ordinates from grid
output[n][h][w][0] = x;
output[n][h][w][1] = y;
}
}
at::Tensor compute_flow_cuda(const at::Tensor& boxes,
const int height,
const int width) {
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
auto num_rois = boxes.size(0);
at::Tensor output = at::empty({num_rois, height, width, 2}, boxes.options());
auto output_size = num_rois * height * width;
dim3 grid(::min(THCCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
//AT_DISPATCH_FLOATING_TYPES(boxes.type(), "compute_flow", [&] {
/*
using scalar_t = float;
using THCTensor = THCudaTensor;
THCDeviceTensor<scalar_t, 2> devBoxes = toDeviceTensor<scalar_t, 2>(state, (THCTensor *) boxes.unsafeGetTH(false));
THCDeviceTensor<scalar_t, 4> devOutput = toDeviceTensor<scalar_t, 4>(state, (THCTensor *) output.unsafeGetTH(false));
compute_flow_kernel<scalar_t><<<grid, block, 0, stream>>>(
output_size,
devBoxes,
devOutput);
//});
*/
THCudaCheck(hipGetLastError());
return output;
}
| 3d01c04158fd9f8eb6df53e61f1aa94150481e17.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
*/
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include "THCDeviceTensor.cuh"
//#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename Dtype>
__global__ void compute_flow_kernel(
const int nthreads,
THCDeviceTensor<Dtype, 2> boxes,
THCDeviceTensor<Dtype, 4> output/*,
const int height,
const int width*/) {
int N = boxes.getSize(0);
int H = output.getSize(1);
int W = output.getSize(2);
CUDA_1D_KERNEL_LOOP(index, nthreads) {
const int n = index % N;
const int h = (index / N) % H;
const int w = (index / (N * H)) % W;
// int x0 = ScalarConvert<Dtype, int>::to(boxes[n][0]);
// int y0 = ScalarConvert<Dtype, int>::to(boxes[n][1]);
// int x1 = ScalarConvert<Dtype, int>::to(boxes[n][2]);
// int y1 = ScalarConvert<Dtype, int>::to(boxes[n][3]);
int x0 = int(boxes[n][0]);
int y0 = int(boxes[n][1]);
int x1 = int(boxes[n][2]);
int y1 = int(boxes[n][3]);
if ((w < x0) || (h < y0) || (w >= x1) || (h >= y1)) {
output[n][h][w][0] = Dtype(-2);
output[n][h][w][1] = Dtype(-2);
continue;
}
int TO_REMOVE = 1;
int box_width = x1 - x0 + TO_REMOVE;
int box_height = y1 - y0 + TO_REMOVE;
int xx0 = max(x0, 0);
int yy0 = max(y0, 0);
//int xx1 = std::min(x1, width);
//int yy1 = std::min(y1, height);
int lx = w - xx0;
int ly = h - yy0;
Dtype x = 2.f / (Dtype(box_width - 1)) * lx - 1.f;
Dtype y = 2.f / (Dtype(box_height - 1)) * ly - 1.f;
// get the corresponding input x, y co-ordinates from grid
output[n][h][w][0] = x;
output[n][h][w][1] = y;
}
}
at::Tensor compute_flow_cuda(const at::Tensor& boxes,
const int height,
const int width) {
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
auto num_rois = boxes.size(0);
at::Tensor output = at::empty({num_rois, height, width, 2}, boxes.options());
auto output_size = num_rois * height * width;
dim3 grid(std::min(THCCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
//AT_DISPATCH_FLOATING_TYPES(boxes.type(), "compute_flow", [&] {
/*
using scalar_t = float;
using THCTensor = THCudaTensor;
THCDeviceTensor<scalar_t, 2> devBoxes = toDeviceTensor<scalar_t, 2>(state, (THCTensor *) boxes.unsafeGetTH(false));
THCDeviceTensor<scalar_t, 4> devOutput = toDeviceTensor<scalar_t, 4>(state, (THCTensor *) output.unsafeGetTH(false));
compute_flow_kernel<scalar_t><<<grid, block, 0, stream>>>(
output_size,
devBoxes,
devOutput);
//});
*/
THCudaCheck(cudaGetLastError());
return output;
}
|
1d3c181ac481f76f536bd560a367636f5a80cd93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lusol.h"
#include "hipsparse.h"
/*-----------------------------------------------*/
void cuda_init(int argc, char **argv) {
int deviceCount, dev;
hipGetDeviceCount(&deviceCount);
printf("=========================================\n");
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %.2f GB\n",
deviceProp.totalGlobalMem/1e9);
}
dev = 0;
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("\nRunning on Device %d: \"%s\"\n", dev, deviceProp.name);
printf("=========================================\n");
}
/*---------------------------------------------------*/
void cuda_check_err() {
hipError_t cudaerr = hipGetLastError() ;
if (cudaerr != hipSuccess)
printf("error: %s\n",hipGetErrorString(cudaerr));
}
void luSolv_cusparse1(struct csr_t *csr, REAL *b, REAL *x,
int REPEAT, bool print) {
int n = csr->n;
int nnz = csr->nnz;
int *d_ia, *d_ja;
REAL *d_a, *d_b, *d_x, *d_y;
double t1, t2, ta;
REAL done = 1.0;
/*------------------- allocate Device Memory */
hipMalloc((void **)&d_ia, (n+1)*sizeof(int));
hipMalloc((void **)&d_ja, nnz*sizeof(int));
hipMalloc((void **)&d_a, nnz*sizeof(REAL));
hipMalloc((void **)&d_b, n*sizeof(REAL));
hipMalloc((void **)&d_x, n*sizeof(REAL));
hipMalloc((void **)&d_y, n*sizeof(REAL));
/*------------------- Memcpy */
hipMemcpy(d_ia, csr->ia, (n+1)*sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(d_ja, csr->ja, nnz*sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(d_a, csr->a, nnz*sizeof(REAL),
hipMemcpyHostToDevice);
hipMemcpy(d_b, b, n*sizeof(REAL),
hipMemcpyHostToDevice);
hipsparseStatus_t status;
hipsparseHandle_t handle=0;
hipsparseMatDescr_t descr_L=0, descr_U=0;
/* initialize cusparse library */
status= hipsparseCreate(&handle);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
exit(1);
}
/* create and setup matrix descriptor for L */
status= hipsparseCreateMatDescr(&descr_L);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor initialization L failed\n");
exit(1);
}
t1 = wall_timer();
cusparseSolveAnalysisInfo_t info_L = 0;
cusparseCreateSolveAnalysisInfo(&info_L);
hipsparseSetMatType(descr_L,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatFillMode(descr_L, HIPSPARSE_FILL_MODE_LOWER);
hipsparseSetMatIndexBase(descr_L, HIPSPARSE_INDEX_BASE_ONE);
hipsparseSetMatDiagType(descr_L, HIPSPARSE_DIAG_TYPE_NON_UNIT);
#if DOUBLEPRECISION
status = cusparseDcsrsv_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_L, d_a, d_ia, d_ja, info_L);
#else
status = cusparseScsrsv_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_L, d_a, d_ia, d_ja, info_L);
#endif
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_analysis L failed\n");
exit(1);
}
/* create and setup matrix descriptor for U */
status= hipsparseCreateMatDescr(&descr_U);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor initialization U failed\n");
exit(1);
}
cusparseSolveAnalysisInfo_t info_U = 0;
cusparseCreateSolveAnalysisInfo(&info_U);
hipsparseSetMatType(descr_U, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatFillMode(descr_U, HIPSPARSE_FILL_MODE_UPPER);
hipsparseSetMatIndexBase(descr_U, HIPSPARSE_INDEX_BASE_ONE);
hipsparseSetMatDiagType(descr_U, HIPSPARSE_DIAG_TYPE_NON_UNIT);
#if DOUBLEPRECISION
status = cusparseDcsrsv_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_U, d_a, d_ia, d_ja, info_U);
#else
status = cusparseScsrsv_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_U, d_a, d_ia, d_ja, info_U);
#endif
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_analysis L failed\n");
exit(1);
}
//Barrier for GPU calls
hipDeviceSynchronize();
ta = wall_timer() - t1;
t1 = wall_timer();
for (int j=0; j<REPEAT; j++) {
#if DOUBLEPRECISION
// L-solve
status = cusparseDcsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, &done,
descr_L, d_a, d_ia, d_ja, info_L, d_b, d_y);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
// U-solve
status = cusparseDcsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, &done,
descr_U, d_a, d_ia, d_ja, info_U, d_y, d_x);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
#else
// L-solve
status = cusparseScsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, &done,
descr_L, d_a, d_ia, d_ja, info_L, d_b, d_y);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
// U-solve
status = cusparseScsrsv_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, &done,
descr_U, d_a, d_ia, d_ja, info_U, d_y, d_x);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
#endif
}
//Barrier for GPU calls
hipDeviceSynchronize();
t2 = wall_timer() - t1;
if (print) {
printf("[GPU] CUSPARSE csrsv\n");
printf(" time(s)=%f, Gflops=%5.2f", t2/REPEAT, REPEAT*2*((nnz)/1e9)/t2);
printf(" analysis time %f (%f) ", ta, ta/t2*REPEAT);
}
/*-------- copy x to host mem */
hipMemcpy(x, d_x, n*sizeof(REAL),
hipMemcpyDeviceToHost);
hipFree(d_ia);
hipFree(d_ja);
hipFree(d_a);
hipFree(d_b);
hipFree(d_x);
hipFree(d_y);
/* destroy matrix descriptor */
status = hipsparseDestroyMatDescr(descr_L);
descr_L = 0;
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor destruction failed\n");
exit(1);
}
status = hipsparseDestroyMatDescr(descr_U);
descr_U = 0;
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor destruction failed\n");
exit(1);
}
status = cusparseDestroySolveAnalysisInfo(info_L);
info_L = 0;
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("analysis info destruction failed\n");
exit(1);
}
status = cusparseDestroySolveAnalysisInfo(info_U);
info_U = 0;
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("analysis info destruction failed\n");
exit(1);
}
/* destroy handle */
status = hipsparseDestroy(handle);
handle = 0;
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library release of resources failed\n");
exit(1);
}
}
void luSolv_cusparse2(struct csr_t *csr, REAL *b, REAL *x,
int REPEAT, bool print) {
int n = csr->n;
int nnz = csr->nnz;
int *d_ia, *d_ja;
REAL *d_a, *d_b, *d_x, *d_y;
double t1, t2, ta;
REAL done = 1.0;
/*------------------- allocate Device Memory */
hipMalloc((void **)&d_ia, (n+1)*sizeof(int));
hipMalloc((void **)&d_ja, nnz*sizeof(int));
hipMalloc((void **)&d_a, nnz*sizeof(REAL));
hipMalloc((void **)&d_b, n*sizeof(REAL));
hipMalloc((void **)&d_x, n*sizeof(REAL));
hipMalloc((void **)&d_y, n*sizeof(REAL));
/*------------------- Memcpy */
hipMemcpy(d_ia, csr->ia, (n+1)*sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(d_ja, csr->ja, nnz*sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(d_a, csr->a, nnz*sizeof(REAL),
hipMemcpyHostToDevice);
hipMemcpy(d_b, b, n*sizeof(REAL),
hipMemcpyHostToDevice);
hipsparseStatus_t status;
hipsparseHandle_t handle=0;
hipsparseMatDescr_t descr_L=0, descr_U=0;
/* initialize cusparse library */
status= hipsparseCreate(&handle);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
exit(1);
}
/* create and setup matrix descriptor for L */
status= hipsparseCreateMatDescr(&descr_L);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor initialization L failed\n");
exit(1);
}
t1 = wall_timer();
csrsv2Info_t info_L = 0;
hipsparseCreateCsrsv2Info(&info_L);
hipsparseSetMatType(descr_L,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatFillMode(descr_L, HIPSPARSE_FILL_MODE_LOWER);
hipsparseSetMatIndexBase(descr_L, HIPSPARSE_INDEX_BASE_ONE);
hipsparseSetMatDiagType(descr_L, HIPSPARSE_DIAG_TYPE_NON_UNIT);
int pBufferSize_L;
void *pBuffer_L = 0;
#if DOUBLEPRECISION
hipsparseDcsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_L, d_a, d_ia, d_ja, info_L, &pBufferSize_L);
#else
hipsparseScsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_L, d_a, d_ia, d_ja, info_L, &pBufferSize_L);
#endif
// pBuffer returned by hipMalloc is automatically aligned to 128 bytes.
hipMalloc((void**)&pBuffer_L, pBufferSize_L);
#if DOUBLEPRECISION
status = hipsparseDcsrsv2_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_L, d_a, d_ia, d_ja, info_L,
HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_L);
#else
status = hipsparseScsrsv2_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_L, d_a, d_ia, d_ja, info_L,
HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_L);
#endif
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_analysis L failed\n");
exit(1);
}
/* create and setup matrix descriptor for U */
status= hipsparseCreateMatDescr(&descr_U);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor initialization U failed\n");
exit(1);
}
csrsv2Info_t info_U = 0;
hipsparseCreateCsrsv2Info(&info_U);
hipsparseSetMatType(descr_U, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatFillMode(descr_U, HIPSPARSE_FILL_MODE_UPPER);
hipsparseSetMatIndexBase(descr_U, HIPSPARSE_INDEX_BASE_ONE);
hipsparseSetMatDiagType(descr_U, HIPSPARSE_DIAG_TYPE_NON_UNIT);
int pBufferSize_U;
void *pBuffer_U = 0;
#if DOUBLEPRECISION
hipsparseDcsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_U, d_a, d_ia, d_ja, info_U, &pBufferSize_U);
#else
hipsparseScsrsv2_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_U, d_a, d_ia, d_ja, info_U, &pBufferSize_U);
#endif
// pBuffer returned by hipMalloc is automatically aligned to 128 bytes.
hipMalloc((void**)&pBuffer_U, pBufferSize_U);
#if DOUBLEPRECISION
status = hipsparseDcsrsv2_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_U, d_a, d_ia, d_ja, info_U,
HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_U);
#else
status = hipsparseScsrsv2_analysis(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_U, d_a, d_ia, d_ja, info_U,
HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_U);
#endif
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_analysis U failed\n");
exit(1);
}
//Barrier for GPU calls
hipDeviceSynchronize();
ta = wall_timer() - t1;
t1 = wall_timer();
for (int j=0; j<REPEAT; j++) {
#if DOUBLEPRECISION
// L-solve
status = hipsparseDcsrsv2_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, &done,
descr_L, d_a, d_ia, d_ja, info_L, d_b, d_y,
HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_L);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
// U-solve
status = hipsparseDcsrsv2_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, &done,
descr_U, d_a, d_ia, d_ja, info_U, d_y, d_x,
HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_U);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
#else
// L-solve
status = hipsparseScsrsv2_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, &done,
descr_L, d_a, d_ia, d_ja, info_L, d_b, d_y,
HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_L);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
// U-solve
status = hipsparseScsrsv2_solve(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, &done,
descr_U, d_a, d_ia, d_ja, info_U, d_y, d_x,
HIPSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_U);
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
#endif
}
//Barrier for GPU calls
hipDeviceSynchronize();
t2 = wall_timer() - t1;
if (print) {
printf("[GPU] CUSPARSE csrsv2\n");
printf(" time(s)=%f, Gflops=%5.2f", t2/REPEAT, REPEAT*2*((nnz)/1e9)/t2);
printf(" analysis time %f (%f) ", ta, ta/t2*REPEAT);
}
/*-------- copy x to host mem */
hipMemcpy(x, d_x, n*sizeof(REAL),
hipMemcpyDeviceToHost);
hipFree(d_ia);
hipFree(d_ja);
hipFree(d_a);
hipFree(d_b);
hipFree(d_x);
hipFree(d_y);
hipFree(pBuffer_L);
hipFree(pBuffer_U);
/* destroy matrix descriptor */
status = hipsparseDestroyMatDescr(descr_L);
descr_L = 0;
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor destruction failed\n");
exit(1);
}
status = hipsparseDestroyMatDescr(descr_U);
descr_U = 0;
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor destruction failed\n");
exit(1);
}
status = hipsparseDestroyCsrsv2Info(info_L);
info_L = 0;
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("analysis info destruction failed\n");
exit(1);
}
status = hipsparseDestroyCsrsv2Info(info_U);
info_U = 0;
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("analysis info destruction failed\n");
exit(1);
}
/* destroy handle */
status = hipsparseDestroy(handle);
handle = 0;
if (status != HIPSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library release of resources failed\n");
exit(1);
}
}
#if 0
__global__
void LU_SOL_SF2_INIT(int n, int *ia, int *da, int *dpl, int *dpu,
int *hl, int *hu, int *d_tl, int tl,
int *d_tu, int tu, int *lockL, int *lockU) {
int gid = blockIdx.x * BLOCKDIM + threadIdx.x;
if (gid < n) {
int t = da[gid];
dpl[gid] = t - ia[gid];
dpu[gid] = ia[gid+1] - t - 1;
}
if (gid == 0) {
*hl = 0;
*hu = 0;
*d_tl = tl;
*d_tu = tu;
*lockL = 0;
*lockU = 0;
}
}
/* lock: 0 is unlocked and 1 is locked */
__global__
void L_SOL_SF2(int n, REAL *b, REAL *x, REAL *a, int *ja, int *ia, int *da,
int *jb, int *ib, int *db, int *dp, int *jlev,
volatile int *head, volatile int *tail, volatile int *lock) {
// num of warps in grid
//int nw = gridDim.x * BLOCKDIM / WARP;
// global warp id
int wid = (blockIdx.x * BLOCKDIM + threadIdx.x) / WARP;
// first warp in this block
//int fid = (blockIdx.x * BLOCKDIM) / WARP;
// thread lane in each warp
int lane = threadIdx.x & (WARP-1);
// shared memory for patial result in reduction
volatile __shared__ REAL r[BLOCKDIM + 16];
// local warp id
int wlane = threadIdx.x / WARP;
volatile __shared__ int buffer[BLOCKDIM/WARP];
// make dp volatile to tell compiler do not use cached value
//volatile int *vdp = dp;
if (wid >= n) {
return;
}
int i, p1, q1, p2, q2, hd, tl;
REAL dinv, bi, sum;
if (lane == 0) {
while (1) {
// try to lock
while (atomicCAS((int*)lock, 0, 1) != 0);
// locked, read the head and tail
hd = *head; tl = *tail;
if (hd <= tl) {
/* there is a row for me to work on, increase the head */
(*head) ++;
__threadfence();
}
// realease the lock
atomicExch((int*)lock, 0);
if (hd >= n) {
buffer[wlane] = -1;
break;
} else if (hd <= tl) {
int tt = jlev[hd] - 1;
buffer[wlane] = tt;
break;
}
}
}
// this warp get a row to work on
i = buffer[wlane];
//if (lane == 0) {
// printf("hd %d tl %d row i = %d\n", hd, tl, i);
//}
if (i < 0) {
return;
}
p1 = ia[i];
q1 = da[i];
p2 = db[i] + 1;
q2 = ib[i+1];
sum = 0.0;
if (lane == 0) {
dinv = 1.0 / a[q1-1];
bi = b[i];
}
for (int k=p1+lane; k<q1; k+=WARP) {
sum += a[k-1]*x[ja[k-1]-1];
}
// parallel reduction
r[threadIdx.x] = sum;
r[threadIdx.x] = sum = sum + r[threadIdx.x+16];
r[threadIdx.x] = sum = sum + r[threadIdx.x+8];
r[threadIdx.x] = sum = sum + r[threadIdx.x+4];
r[threadIdx.x] = sum = sum + r[threadIdx.x+2];
r[threadIdx.x] = sum = sum + r[threadIdx.x+1];
// save the result
if (lane == 0) {
x[i] = dinv * (bi - r[threadIdx.x]);
__threadfence();
/* remove i from other's dependents */
for (int k=p2; k<q2; k++) {
int s1 = jb[k-1]-1;
int *p = dp + s1;
int old = atomicSub(p, 1);
if (old == 1) {
while (atomicCAS((int*)lock, 0, 1) != 0);
(*tail)++;
jlev[*tail] = s1 + 1;
__threadfence();
atomicExch((int*)lock, 0);
}
}
}
}
//--------------------------------------------------------
void luSolvSF2(int n, int nnz, struct csr_t *csr,
struct syncfree_t *syncf, REAL *x, REAL *b)
{
int j, *d_ia, *d_ja, *d_da, *d_ib, *d_jb, *d_db, *d_dpl,
*d_dpu, *d_jlevL, *d_jlevU, *lockL, *lockU, *headL, *headU,
*tailL, *tailU;
REAL *d_a, *d_b, *d_x;
double t1, t2;
/*------------------- allocate Device Memory */
hipMalloc((void **)&d_ia, (n+1)*sizeof(int));
hipMalloc((void **)&d_ja, nnz*sizeof(int));
hipMalloc((void **)&d_da, n*sizeof(int));
hipMalloc((void **)&d_a, nnz*sizeof(REAL));
hipMalloc((void **)&d_b, n*sizeof(REAL));
hipMalloc((void **)&d_x, n*sizeof(REAL));
hipMalloc((void **)&d_ib, (n+1)*sizeof(int));
hipMalloc((void **)&d_jb, nnz*sizeof(int));
hipMalloc((void **)&d_db, n*sizeof(int));
hipMalloc((void **)&d_dpl, n*sizeof(int));
hipMalloc((void **)&d_dpu, n*sizeof(int));
hipMalloc((void **)&d_jlevL, n*sizeof(int));
hipMalloc((void **)&d_jlevU, n*sizeof(int));
hipMalloc((void **)&lockL, sizeof(int));
hipMalloc((void **)&lockU, sizeof(int));
hipMalloc((void **)&headL, sizeof(int));
hipMalloc((void **)&headU, sizeof(int));
hipMalloc((void **)&tailL, sizeof(int));
hipMalloc((void **)&tailU, sizeof(int));
/*------------------- Memcpy */
hipMemcpy(d_ia, csr->ia, (n+1)*sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(d_ja, csr->ja, nnz*sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(d_da, csr->di, n*sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(d_a, csr->a, nnz*sizeof(REAL),
hipMemcpyHostToDevice);
hipMemcpy(d_b, b, n*sizeof(REAL),
hipMemcpyHostToDevice);
hipMemcpy(d_ib, syncf->AT.ia, (n+1)*sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(d_jb, syncf->AT.ja, nnz*sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(d_db, syncf->AT.di, n*sizeof(int),
hipMemcpyHostToDevice);
//hipMemcpy(d_dpl, syncf->dpL, n*sizeof(int),
//hipMemcpyHostToDevice);
//hipMemcpy(d_dpu, syncf->dpU, n*sizeof(int),
//hipMemcpyHostToDevice);
hipMemcpy(d_jlevL, syncf->lev.jlevL, n*sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(d_jlevU, syncf->lev.jlevU, n*sizeof(int),
hipMemcpyHostToDevice);
int *tmp1 = (int*) malloc(n*sizeof(int));
int *tmp2 = (int*) malloc(n*sizeof(int));
// number of free rows (level == 0)
int tl = syncf->lev.ilevL[1] - syncf->lev.ilevL[0];
int tu = syncf->lev.ilevU[1] - syncf->lev.ilevU[0];
t1 = wall_timer();
for (j=0; j<REPEAT; j++) {
int bDim = BLOCKDIM;
/*-------- init dependent counter of L and U */
int gDim0 = (n + BLOCKDIM - 1) / BLOCKDIM;
hipLaunchKernelGGL(( LU_SOL_SF2_INIT), dim3(gDim0), dim3(bDim), 0, 0, n, d_ia, d_da, d_dpl, d_dpu, headL, headU,
tailL, tl-1, tailU, tu-1, lockL, lockU);
/*-------- num of warps per block */
int nwb = BLOCKDIM / WARP;
int gDim = (n + nwb-1) / nwb;
// L-solve
hipLaunchKernelGGL(( L_SOL_SF2), dim3(gDim), dim3(bDim), 0, 0, n, d_b, d_x, d_a, d_ja, d_ia, d_da,
d_jb, d_ib, d_db, d_dpl, d_jlevL, headL, tailL, lockL);
// U-solve
//U_SOL_SF1<<<gDim, bDim>>>(n, d_x, d_x, d_a, d_ja, d_ia, d_da,
// d_jb, d_ib, d_db, d_dpu, d_jlevU);
break;
}
//Barrier for GPU calls
hipDeviceSynchronize();
t2 = wall_timer() - t1;
hipMemcpy(tmp1, d_dpl, n*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(tmp2, d_dpu, n*sizeof(int), hipMemcpyDeviceToHost);
for (int i=0; i<n; i++) {
//if (tmp1[i] != syncf->dpL[i]) printf("i=%d: %d %d\n", i, tmp1[i], syncf->dpL[i]);
//if (tmp2[i] != syncf->dpU[i]) printf("i=%d: %d %d\n", i, tmp2[i], syncf->dpU[i]);
if (tmp1[i]) printf("i=%d: %d\n", i, tmp1[i]);
//if (tmp2[i]) printf("i=%d: %d\n", i, tmp2[i]);
}
printf("[GPU] SyncFree v-1\n");
printf(" time(s)=%f, Gflops=%5.2f", t2/REPEAT, REPEAT*2*((nnz)/1e9)/t2);
/*-------- copy x to host mem */
hipMemcpy(x, d_x, n*sizeof(REAL),
hipMemcpyDeviceToHost);
hipFree(d_ia);
hipFree(d_ja);
hipFree(d_da);
hipFree(d_a);
hipFree(d_b);
hipFree(d_ib);
hipFree(d_jb);
hipFree(d_db);
hipFree(d_x);
hipFree(d_dpl);
hipFree(d_dpu);
free(tmp1);
free(tmp2);
}
#endif
| 1d3c181ac481f76f536bd560a367636f5a80cd93.cu | #include "lusol.h"
#include "cusparse.h"
/*-----------------------------------------------*/
void cuda_init(int argc, char **argv) {
int deviceCount, dev;
cudaGetDeviceCount(&deviceCount);
printf("=========================================\n");
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
printf("There is no device supporting CUDA.\n");
else if (deviceCount == 1)
printf("There is 1 device supporting CUDA\n");
else
printf("There are %d devices supporting CUDA\n", deviceCount);
}
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Major revision number: %d\n",
deviceProp.major);
printf(" Minor revision number: %d\n",
deviceProp.minor);
printf(" Total amount of global memory: %.2f GB\n",
deviceProp.totalGlobalMem/1e9);
}
dev = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("\nRunning on Device %d: \"%s\"\n", dev, deviceProp.name);
printf("=========================================\n");
}
/*---------------------------------------------------*/
void cuda_check_err() {
cudaError_t cudaerr = cudaGetLastError() ;
if (cudaerr != cudaSuccess)
printf("error: %s\n",cudaGetErrorString(cudaerr));
}
void luSolv_cusparse1(struct csr_t *csr, REAL *b, REAL *x,
int REPEAT, bool print) {
int n = csr->n;
int nnz = csr->nnz;
int *d_ia, *d_ja;
REAL *d_a, *d_b, *d_x, *d_y;
double t1, t2, ta;
REAL done = 1.0;
/*------------------- allocate Device Memory */
cudaMalloc((void **)&d_ia, (n+1)*sizeof(int));
cudaMalloc((void **)&d_ja, nnz*sizeof(int));
cudaMalloc((void **)&d_a, nnz*sizeof(REAL));
cudaMalloc((void **)&d_b, n*sizeof(REAL));
cudaMalloc((void **)&d_x, n*sizeof(REAL));
cudaMalloc((void **)&d_y, n*sizeof(REAL));
/*------------------- Memcpy */
cudaMemcpy(d_ia, csr->ia, (n+1)*sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(d_ja, csr->ja, nnz*sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(d_a, csr->a, nnz*sizeof(REAL),
cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, n*sizeof(REAL),
cudaMemcpyHostToDevice);
cusparseStatus_t status;
cusparseHandle_t handle=0;
cusparseMatDescr_t descr_L=0, descr_U=0;
/* initialize cusparse library */
status= cusparseCreate(&handle);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
exit(1);
}
/* create and setup matrix descriptor for L */
status= cusparseCreateMatDescr(&descr_L);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor initialization L failed\n");
exit(1);
}
t1 = wall_timer();
cusparseSolveAnalysisInfo_t info_L = 0;
cusparseCreateSolveAnalysisInfo(&info_L);
cusparseSetMatType(descr_L,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatFillMode(descr_L, CUSPARSE_FILL_MODE_LOWER);
cusparseSetMatIndexBase(descr_L, CUSPARSE_INDEX_BASE_ONE);
cusparseSetMatDiagType(descr_L, CUSPARSE_DIAG_TYPE_NON_UNIT);
#if DOUBLEPRECISION
status = cusparseDcsrsv_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_L, d_a, d_ia, d_ja, info_L);
#else
status = cusparseScsrsv_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_L, d_a, d_ia, d_ja, info_L);
#endif
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_analysis L failed\n");
exit(1);
}
/* create and setup matrix descriptor for U */
status= cusparseCreateMatDescr(&descr_U);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor initialization U failed\n");
exit(1);
}
cusparseSolveAnalysisInfo_t info_U = 0;
cusparseCreateSolveAnalysisInfo(&info_U);
cusparseSetMatType(descr_U, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatFillMode(descr_U, CUSPARSE_FILL_MODE_UPPER);
cusparseSetMatIndexBase(descr_U, CUSPARSE_INDEX_BASE_ONE);
cusparseSetMatDiagType(descr_U, CUSPARSE_DIAG_TYPE_NON_UNIT);
#if DOUBLEPRECISION
status = cusparseDcsrsv_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_U, d_a, d_ia, d_ja, info_U);
#else
status = cusparseScsrsv_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_U, d_a, d_ia, d_ja, info_U);
#endif
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_analysis L failed\n");
exit(1);
}
//Barrier for GPU calls
cudaThreadSynchronize();
ta = wall_timer() - t1;
t1 = wall_timer();
for (int j=0; j<REPEAT; j++) {
#if DOUBLEPRECISION
// L-solve
status = cusparseDcsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, &done,
descr_L, d_a, d_ia, d_ja, info_L, d_b, d_y);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
// U-solve
status = cusparseDcsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, &done,
descr_U, d_a, d_ia, d_ja, info_U, d_y, d_x);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
#else
// L-solve
status = cusparseScsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, &done,
descr_L, d_a, d_ia, d_ja, info_L, d_b, d_y);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
// U-solve
status = cusparseScsrsv_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, &done,
descr_U, d_a, d_ia, d_ja, info_U, d_y, d_x);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
#endif
}
//Barrier for GPU calls
cudaThreadSynchronize();
t2 = wall_timer() - t1;
if (print) {
printf("[GPU] CUSPARSE csrsv\n");
printf(" time(s)=%f, Gflops=%5.2f", t2/REPEAT, REPEAT*2*((nnz)/1e9)/t2);
printf(" analysis time %f (%f) ", ta, ta/t2*REPEAT);
}
/*-------- copy x to host mem */
cudaMemcpy(x, d_x, n*sizeof(REAL),
cudaMemcpyDeviceToHost);
cudaFree(d_ia);
cudaFree(d_ja);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_x);
cudaFree(d_y);
/* destroy matrix descriptor */
status = cusparseDestroyMatDescr(descr_L);
descr_L = 0;
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor destruction failed\n");
exit(1);
}
status = cusparseDestroyMatDescr(descr_U);
descr_U = 0;
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor destruction failed\n");
exit(1);
}
status = cusparseDestroySolveAnalysisInfo(info_L);
info_L = 0;
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("analysis info destruction failed\n");
exit(1);
}
status = cusparseDestroySolveAnalysisInfo(info_U);
info_U = 0;
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("analysis info destruction failed\n");
exit(1);
}
/* destroy handle */
status = cusparseDestroy(handle);
handle = 0;
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library release of resources failed\n");
exit(1);
}
}
void luSolv_cusparse2(struct csr_t *csr, REAL *b, REAL *x,
int REPEAT, bool print) {
int n = csr->n;
int nnz = csr->nnz;
int *d_ia, *d_ja;
REAL *d_a, *d_b, *d_x, *d_y;
double t1, t2, ta;
REAL done = 1.0;
/*------------------- allocate Device Memory */
cudaMalloc((void **)&d_ia, (n+1)*sizeof(int));
cudaMalloc((void **)&d_ja, nnz*sizeof(int));
cudaMalloc((void **)&d_a, nnz*sizeof(REAL));
cudaMalloc((void **)&d_b, n*sizeof(REAL));
cudaMalloc((void **)&d_x, n*sizeof(REAL));
cudaMalloc((void **)&d_y, n*sizeof(REAL));
/*------------------- Memcpy */
cudaMemcpy(d_ia, csr->ia, (n+1)*sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(d_ja, csr->ja, nnz*sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(d_a, csr->a, nnz*sizeof(REAL),
cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, n*sizeof(REAL),
cudaMemcpyHostToDevice);
cusparseStatus_t status;
cusparseHandle_t handle=0;
cusparseMatDescr_t descr_L=0, descr_U=0;
/* initialize cusparse library */
status= cusparseCreate(&handle);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
exit(1);
}
/* create and setup matrix descriptor for L */
status= cusparseCreateMatDescr(&descr_L);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor initialization L failed\n");
exit(1);
}
t1 = wall_timer();
csrsv2Info_t info_L = 0;
cusparseCreateCsrsv2Info(&info_L);
cusparseSetMatType(descr_L,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatFillMode(descr_L, CUSPARSE_FILL_MODE_LOWER);
cusparseSetMatIndexBase(descr_L, CUSPARSE_INDEX_BASE_ONE);
cusparseSetMatDiagType(descr_L, CUSPARSE_DIAG_TYPE_NON_UNIT);
int pBufferSize_L;
void *pBuffer_L = 0;
#if DOUBLEPRECISION
cusparseDcsrsv2_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_L, d_a, d_ia, d_ja, info_L, &pBufferSize_L);
#else
cusparseScsrsv2_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_L, d_a, d_ia, d_ja, info_L, &pBufferSize_L);
#endif
// pBuffer returned by cudaMalloc is automatically aligned to 128 bytes.
cudaMalloc((void**)&pBuffer_L, pBufferSize_L);
#if DOUBLEPRECISION
status = cusparseDcsrsv2_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_L, d_a, d_ia, d_ja, info_L,
CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_L);
#else
status = cusparseScsrsv2_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_L, d_a, d_ia, d_ja, info_L,
CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_L);
#endif
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_analysis L failed\n");
exit(1);
}
/* create and setup matrix descriptor for U */
status= cusparseCreateMatDescr(&descr_U);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor initialization U failed\n");
exit(1);
}
csrsv2Info_t info_U = 0;
cusparseCreateCsrsv2Info(&info_U);
cusparseSetMatType(descr_U, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatFillMode(descr_U, CUSPARSE_FILL_MODE_UPPER);
cusparseSetMatIndexBase(descr_U, CUSPARSE_INDEX_BASE_ONE);
cusparseSetMatDiagType(descr_U, CUSPARSE_DIAG_TYPE_NON_UNIT);
int pBufferSize_U;
void *pBuffer_U = 0;
#if DOUBLEPRECISION
cusparseDcsrsv2_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_U, d_a, d_ia, d_ja, info_U, &pBufferSize_U);
#else
cusparseScsrsv2_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_U, d_a, d_ia, d_ja, info_U, &pBufferSize_U);
#endif
// pBuffer returned by cudaMalloc is automatically aligned to 128 bytes.
cudaMalloc((void**)&pBuffer_U, pBufferSize_U);
#if DOUBLEPRECISION
status = cusparseDcsrsv2_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_U, d_a, d_ia, d_ja, info_U,
CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_U);
#else
status = cusparseScsrsv2_analysis(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz,
descr_U, d_a, d_ia, d_ja, info_U,
CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_U);
#endif
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_analysis U failed\n");
exit(1);
}
//Barrier for GPU calls
cudaThreadSynchronize();
ta = wall_timer() - t1;
t1 = wall_timer();
for (int j=0; j<REPEAT; j++) {
#if DOUBLEPRECISION
// L-solve
status = cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, &done,
descr_L, d_a, d_ia, d_ja, info_L, d_b, d_y,
CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_L);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
// U-solve
status = cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, &done,
descr_U, d_a, d_ia, d_ja, info_U, d_y, d_x,
CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_U);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
#else
// L-solve
status = cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, &done,
descr_L, d_a, d_ia, d_ja, info_L, d_b, d_y,
CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_L);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
// U-solve
status = cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, &done,
descr_U, d_a, d_ia, d_ja, info_U, d_y, d_x,
CUSPARSE_SOLVE_POLICY_USE_LEVEL, pBuffer_U);
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("cusparse?csrsv_solve L failed\n");
exit(1);
}
#endif
}
//Barrier for GPU calls
cudaThreadSynchronize();
t2 = wall_timer() - t1;
if (print) {
printf("[GPU] CUSPARSE csrsv2\n");
printf(" time(s)=%f, Gflops=%5.2f", t2/REPEAT, REPEAT*2*((nnz)/1e9)/t2);
printf(" analysis time %f (%f) ", ta, ta/t2*REPEAT);
}
/*-------- copy x to host mem */
cudaMemcpy(x, d_x, n*sizeof(REAL),
cudaMemcpyDeviceToHost);
cudaFree(d_ia);
cudaFree(d_ja);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(pBuffer_L);
cudaFree(pBuffer_U);
/* destroy matrix descriptor */
status = cusparseDestroyMatDescr(descr_L);
descr_L = 0;
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor destruction failed\n");
exit(1);
}
status = cusparseDestroyMatDescr(descr_U);
descr_U = 0;
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor destruction failed\n");
exit(1);
}
status = cusparseDestroyCsrsv2Info(info_L);
info_L = 0;
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("analysis info destruction failed\n");
exit(1);
}
status = cusparseDestroyCsrsv2Info(info_U);
info_U = 0;
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("analysis info destruction failed\n");
exit(1);
}
/* destroy handle */
status = cusparseDestroy(handle);
handle = 0;
if (status != CUSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library release of resources failed\n");
exit(1);
}
}
#if 0
__global__
void LU_SOL_SF2_INIT(int n, int *ia, int *da, int *dpl, int *dpu,
int *hl, int *hu, int *d_tl, int tl,
int *d_tu, int tu, int *lockL, int *lockU) {
int gid = blockIdx.x * BLOCKDIM + threadIdx.x;
if (gid < n) {
int t = da[gid];
dpl[gid] = t - ia[gid];
dpu[gid] = ia[gid+1] - t - 1;
}
if (gid == 0) {
*hl = 0;
*hu = 0;
*d_tl = tl;
*d_tu = tu;
*lockL = 0;
*lockU = 0;
}
}
/* lock: 0 is unlocked and 1 is locked */
__global__
void L_SOL_SF2(int n, REAL *b, REAL *x, REAL *a, int *ja, int *ia, int *da,
int *jb, int *ib, int *db, int *dp, int *jlev,
volatile int *head, volatile int *tail, volatile int *lock) {
// num of warps in grid
//int nw = gridDim.x * BLOCKDIM / WARP;
// global warp id
int wid = (blockIdx.x * BLOCKDIM + threadIdx.x) / WARP;
// first warp in this block
//int fid = (blockIdx.x * BLOCKDIM) / WARP;
// thread lane in each warp
int lane = threadIdx.x & (WARP-1);
// shared memory for patial result in reduction
volatile __shared__ REAL r[BLOCKDIM + 16];
// local warp id
int wlane = threadIdx.x / WARP;
volatile __shared__ int buffer[BLOCKDIM/WARP];
// make dp volatile to tell compiler do not use cached value
//volatile int *vdp = dp;
if (wid >= n) {
return;
}
int i, p1, q1, p2, q2, hd, tl;
REAL dinv, bi, sum;
if (lane == 0) {
while (1) {
// try to lock
while (atomicCAS((int*)lock, 0, 1) != 0);
// locked, read the head and tail
hd = *head; tl = *tail;
if (hd <= tl) {
/* there is a row for me to work on, increase the head */
(*head) ++;
__threadfence();
}
// realease the lock
atomicExch((int*)lock, 0);
if (hd >= n) {
buffer[wlane] = -1;
break;
} else if (hd <= tl) {
int tt = jlev[hd] - 1;
buffer[wlane] = tt;
break;
}
}
}
// this warp get a row to work on
i = buffer[wlane];
//if (lane == 0) {
// printf("hd %d tl %d row i = %d\n", hd, tl, i);
//}
if (i < 0) {
return;
}
p1 = ia[i];
q1 = da[i];
p2 = db[i] + 1;
q2 = ib[i+1];
sum = 0.0;
if (lane == 0) {
dinv = 1.0 / a[q1-1];
bi = b[i];
}
for (int k=p1+lane; k<q1; k+=WARP) {
sum += a[k-1]*x[ja[k-1]-1];
}
// parallel reduction
r[threadIdx.x] = sum;
r[threadIdx.x] = sum = sum + r[threadIdx.x+16];
r[threadIdx.x] = sum = sum + r[threadIdx.x+8];
r[threadIdx.x] = sum = sum + r[threadIdx.x+4];
r[threadIdx.x] = sum = sum + r[threadIdx.x+2];
r[threadIdx.x] = sum = sum + r[threadIdx.x+1];
// save the result
if (lane == 0) {
x[i] = dinv * (bi - r[threadIdx.x]);
__threadfence();
/* remove i from other's dependents */
for (int k=p2; k<q2; k++) {
int s1 = jb[k-1]-1;
int *p = dp + s1;
int old = atomicSub(p, 1);
if (old == 1) {
while (atomicCAS((int*)lock, 0, 1) != 0);
(*tail)++;
jlev[*tail] = s1 + 1;
__threadfence();
atomicExch((int*)lock, 0);
}
}
}
}
//--------------------------------------------------------
void luSolvSF2(int n, int nnz, struct csr_t *csr,
struct syncfree_t *syncf, REAL *x, REAL *b)
{
int j, *d_ia, *d_ja, *d_da, *d_ib, *d_jb, *d_db, *d_dpl,
*d_dpu, *d_jlevL, *d_jlevU, *lockL, *lockU, *headL, *headU,
*tailL, *tailU;
REAL *d_a, *d_b, *d_x;
double t1, t2;
/*------------------- allocate Device Memory */
cudaMalloc((void **)&d_ia, (n+1)*sizeof(int));
cudaMalloc((void **)&d_ja, nnz*sizeof(int));
cudaMalloc((void **)&d_da, n*sizeof(int));
cudaMalloc((void **)&d_a, nnz*sizeof(REAL));
cudaMalloc((void **)&d_b, n*sizeof(REAL));
cudaMalloc((void **)&d_x, n*sizeof(REAL));
cudaMalloc((void **)&d_ib, (n+1)*sizeof(int));
cudaMalloc((void **)&d_jb, nnz*sizeof(int));
cudaMalloc((void **)&d_db, n*sizeof(int));
cudaMalloc((void **)&d_dpl, n*sizeof(int));
cudaMalloc((void **)&d_dpu, n*sizeof(int));
cudaMalloc((void **)&d_jlevL, n*sizeof(int));
cudaMalloc((void **)&d_jlevU, n*sizeof(int));
cudaMalloc((void **)&lockL, sizeof(int));
cudaMalloc((void **)&lockU, sizeof(int));
cudaMalloc((void **)&headL, sizeof(int));
cudaMalloc((void **)&headU, sizeof(int));
cudaMalloc((void **)&tailL, sizeof(int));
cudaMalloc((void **)&tailU, sizeof(int));
/*------------------- Memcpy */
cudaMemcpy(d_ia, csr->ia, (n+1)*sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(d_ja, csr->ja, nnz*sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(d_da, csr->di, n*sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(d_a, csr->a, nnz*sizeof(REAL),
cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, n*sizeof(REAL),
cudaMemcpyHostToDevice);
cudaMemcpy(d_ib, syncf->AT.ia, (n+1)*sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(d_jb, syncf->AT.ja, nnz*sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(d_db, syncf->AT.di, n*sizeof(int),
cudaMemcpyHostToDevice);
//cudaMemcpy(d_dpl, syncf->dpL, n*sizeof(int),
//cudaMemcpyHostToDevice);
//cudaMemcpy(d_dpu, syncf->dpU, n*sizeof(int),
//cudaMemcpyHostToDevice);
cudaMemcpy(d_jlevL, syncf->lev.jlevL, n*sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(d_jlevU, syncf->lev.jlevU, n*sizeof(int),
cudaMemcpyHostToDevice);
int *tmp1 = (int*) malloc(n*sizeof(int));
int *tmp2 = (int*) malloc(n*sizeof(int));
// number of free rows (level == 0)
int tl = syncf->lev.ilevL[1] - syncf->lev.ilevL[0];
int tu = syncf->lev.ilevU[1] - syncf->lev.ilevU[0];
t1 = wall_timer();
for (j=0; j<REPEAT; j++) {
int bDim = BLOCKDIM;
/*-------- init dependent counter of L and U */
int gDim0 = (n + BLOCKDIM - 1) / BLOCKDIM;
LU_SOL_SF2_INIT<<<gDim0, bDim>>>(n, d_ia, d_da, d_dpl, d_dpu, headL, headU,
tailL, tl-1, tailU, tu-1, lockL, lockU);
/*-------- num of warps per block */
int nwb = BLOCKDIM / WARP;
int gDim = (n + nwb-1) / nwb;
// L-solve
L_SOL_SF2<<<gDim, bDim>>>(n, d_b, d_x, d_a, d_ja, d_ia, d_da,
d_jb, d_ib, d_db, d_dpl, d_jlevL, headL, tailL, lockL);
// U-solve
//U_SOL_SF1<<<gDim, bDim>>>(n, d_x, d_x, d_a, d_ja, d_ia, d_da,
// d_jb, d_ib, d_db, d_dpu, d_jlevU);
break;
}
//Barrier for GPU calls
cudaThreadSynchronize();
t2 = wall_timer() - t1;
cudaMemcpy(tmp1, d_dpl, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(tmp2, d_dpu, n*sizeof(int), cudaMemcpyDeviceToHost);
for (int i=0; i<n; i++) {
//if (tmp1[i] != syncf->dpL[i]) printf("i=%d: %d %d\n", i, tmp1[i], syncf->dpL[i]);
//if (tmp2[i] != syncf->dpU[i]) printf("i=%d: %d %d\n", i, tmp2[i], syncf->dpU[i]);
if (tmp1[i]) printf("i=%d: %d\n", i, tmp1[i]);
//if (tmp2[i]) printf("i=%d: %d\n", i, tmp2[i]);
}
printf("[GPU] SyncFree v-1\n");
printf(" time(s)=%f, Gflops=%5.2f", t2/REPEAT, REPEAT*2*((nnz)/1e9)/t2);
/*-------- copy x to host mem */
cudaMemcpy(x, d_x, n*sizeof(REAL),
cudaMemcpyDeviceToHost);
cudaFree(d_ia);
cudaFree(d_ja);
cudaFree(d_da);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_ib);
cudaFree(d_jb);
cudaFree(d_db);
cudaFree(d_x);
cudaFree(d_dpl);
cudaFree(d_dpu);
free(tmp1);
free(tmp2);
}
#endif
|
317612d3568aa294cf036dc53e97c6ba2394986e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <stdint.h>
#include "../../common/para.h"
#include "../../common/para.cuh"
#include <stdarg.h>
#include "runtime.cuh"
__device__ int syncID;
__device__ int threadNum;
int *done, *doneDev;
int *totalScheTasks, *totalScheTasksDev;
cTaskStruct *ccTaskPool;
gTaskStruct *ggTaskPool;
static int taskId = 0;
static int lastEmptyTask = 0;
static int round_count = 0;
static int taskIndex = 0;
static int barrierCount = 0;
hipStream_t master_kernel_stream;
hipStream_t runtime_stream;
__global__ void masterKernel(volatile int *done, volatile int *totalScheTasks, volatile gTaskStruct *gTaskPool);
void runtime_init(){
int i;
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
checkCudaErrors(hipStreamCreate(&runtime_stream));
checkCudaErrors(hipStreamCreate(&master_kernel_stream));
// done flag to interrupt runtime
checkCudaErrors(hipHostMalloc(&done, sizeof(int), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&doneDev, sizeof(int)));
// host task buffer
checkCudaErrors(hipHostMalloc(&ccTaskPool, (BK_NUM*BP_NUM)*sizeof(cTaskStruct), hipHostMallocDefault));
// device task buffer
checkCudaErrors(hipMalloc(&ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct)));
// totalScheTasks:
checkCudaErrors(hipHostMalloc(&totalScheTasks, sizeof(int), hipHostMallocDefault));
checkCudaErrors(hipMalloc(&totalScheTasksDev, sizeof(int)));
for(i = 0; i < (BK_NUM*BP_NUM); i++) {
ccTaskPool[i].ready = 0;
ccTaskPool[i].done = -1;
ccTaskPool[i].taskId = 0;
ccTaskPool[i].readyId = -1;
}
// runtime variables copy
*done = 0;
*totalScheTasks = 0;
checkCudaErrors(hipMemcpyAsync(doneDev, done, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipMemcpyAsync(ggTaskPool, ccTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
//MasterKernel
hipLaunchKernelGGL(( masterKernel), dim3(BK_NUM), dim3(TD_NUM), SH_MEM_SIZE, master_kernel_stream, doneDev, totalScheTasksDev, ggTaskPool);
}
int taskLaunch(int paraN, ...){
int j, k;
int terminate = 1;
va_list ap;
va_start(ap,paraN);
while(taskIndex < (BK_NUM*BP_NUM) && terminate == 1){
if(ccTaskPool[taskIndex].ready == 0 && ccTaskPool[taskIndex].readyId == -1){
// **Add here**: renew task table, set the bit of task ID on
// **Add here**: get_ID()
ccTaskPool[taskIndex].ready = 1;
ccTaskPool[taskIndex].taskId = taskIndex+1;
ccTaskPool[taskIndex].done = 1;
if(round_count > 0) {
ccTaskPool[taskIndex].readyId = taskId;
}else{
lastEmptyTask = taskIndex;
}
round_count ++;
taskId = taskIndex;
for(j = 0; j < paraN; j++){ // set parameters
int type = va_arg(ap, enum mytypes);
switch(type){
case INT:
if(j == 0) ccTaskPool[taskIndex].thread = va_arg(ap, int);
if(j == 1) ccTaskPool[taskIndex].block = va_arg(ap, int);
if(j == 2) ccTaskPool[taskIndex].sharemem = va_arg(ap, int);
if(j == 3) ccTaskPool[taskIndex].sync = va_arg(ap, int);
if(j == 4) ccTaskPool[taskIndex].funcId = va_arg(ap, int);
if(j > 4) ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, int*);
break;
case FLOAT:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, float*);
break;
case DOUBLE:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, double*);
break;
case LONG:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, long*);
break;
default:
break;
} // End switch
// } // End else
} // End for paraN
checkCudaErrors(hipMemcpyAsync(ggTaskPool+taskIndex, ccTaskPool+taskIndex,
sizeof(gTaskStruct), hipMemcpyHostToDevice, runtime_stream));
terminate = 0;
} // end if cTaskPool
taskIndex++;
if(taskIndex == (BK_NUM*BP_NUM) && round_count > 0){
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(hipMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
barrierCount ++;
round_count = 0;
}
if(taskIndex == (BK_NUM*BP_NUM)){
checkCudaErrors(hipMemcpyAsync(ccTaskPool, ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct), hipMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
taskIndex = 0;
}
} // end while i < BK_NUM*BP_NUM
va_end(ap);
return taskId;
}
void waitAll(int num_tasks){
*totalScheTasks = 0;
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(hipMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,
sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
round_count = 0;
int i;
while(*totalScheTasks < num_tasks){
checkCudaErrors(hipMemcpyAsync(totalScheTasks, totalScheTasksDev, sizeof(int), hipMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
}
*totalScheTasks = 0;
checkCudaErrors(hipMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
taskIndex = 0;
taskId = 0;
lastEmptyTask = 0;
}
void runtime_destroy(){
*done = 1;
checkCudaErrors(hipMemcpyAsync(doneDev, done, sizeof(int), hipMemcpyHostToDevice, runtime_stream));
checkCudaErrors(hipStreamSynchronize(runtime_stream));
}
void runtime_free(){
checkCudaErrors(hipStreamDestroy(master_kernel_stream));
checkCudaErrors(hipStreamDestroy(runtime_stream));
checkCudaErrors(hipHostFree(done));
checkCudaErrors(hipHostFree(ccTaskPool));
checkCudaErrors(hipHostFree(totalScheTasks));
checkCudaErrors(hipFree(doneDev));
checkCudaErrors(hipFree(ggTaskPool));
checkCudaErrors(hipFree(totalScheTasksDev));
}
| 317612d3568aa294cf036dc53e97c6ba2394986e.cu | #include <cuda_runtime.h>
#include <helper_cuda.h>
#include <stdint.h>
#include "../../common/para.h"
#include "../../common/para.cuh"
#include <stdarg.h>
#include "runtime.cuh"
__device__ int syncID;
__device__ int threadNum;
int *done, *doneDev;
int *totalScheTasks, *totalScheTasksDev;
cTaskStruct *ccTaskPool;
gTaskStruct *ggTaskPool;
static int taskId = 0;
static int lastEmptyTask = 0;
static int round_count = 0;
static int taskIndex = 0;
static int barrierCount = 0;
cudaStream_t master_kernel_stream;
cudaStream_t runtime_stream;
__global__ void masterKernel(volatile int *done, volatile int *totalScheTasks, volatile gTaskStruct *gTaskPool);
void runtime_init(){
int i;
setenv("CUDA_DEVICE_MAX_CONNECTIONS", "32", 1);
checkCudaErrors(cudaStreamCreate(&runtime_stream));
checkCudaErrors(cudaStreamCreate(&master_kernel_stream));
// done flag to interrupt runtime
checkCudaErrors(cudaHostAlloc(&done, sizeof(int), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&doneDev, sizeof(int)));
// host task buffer
checkCudaErrors(cudaHostAlloc(&ccTaskPool, (BK_NUM*BP_NUM)*sizeof(cTaskStruct), cudaHostAllocDefault));
// device task buffer
checkCudaErrors(cudaMalloc(&ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct)));
// totalScheTasks:
checkCudaErrors(cudaHostAlloc(&totalScheTasks, sizeof(int), cudaHostAllocDefault));
checkCudaErrors(cudaMalloc(&totalScheTasksDev, sizeof(int)));
for(i = 0; i < (BK_NUM*BP_NUM); i++) {
ccTaskPool[i].ready = 0;
ccTaskPool[i].done = -1;
ccTaskPool[i].taskId = 0;
ccTaskPool[i].readyId = -1;
}
// runtime variables copy
*done = 0;
*totalScheTasks = 0;
checkCudaErrors(cudaMemcpyAsync(doneDev, done, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaMemcpyAsync(ggTaskPool, ccTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
//MasterKernel
masterKernel<<<BK_NUM, TD_NUM, SH_MEM_SIZE, master_kernel_stream>>>(doneDev, totalScheTasksDev, ggTaskPool);
}
int taskLaunch(int paraN, ...){
int j, k;
int terminate = 1;
va_list ap;
va_start(ap,paraN);
while(taskIndex < (BK_NUM*BP_NUM) && terminate == 1){
if(ccTaskPool[taskIndex].ready == 0 && ccTaskPool[taskIndex].readyId == -1){
// **Add here**: renew task table, set the bit of task ID on
// **Add here**: get_ID()
ccTaskPool[taskIndex].ready = 1;
ccTaskPool[taskIndex].taskId = taskIndex+1;
ccTaskPool[taskIndex].done = 1;
if(round_count > 0) {
ccTaskPool[taskIndex].readyId = taskId;
}else{
lastEmptyTask = taskIndex;
}
round_count ++;
taskId = taskIndex;
for(j = 0; j < paraN; j++){ // set parameters
int type = va_arg(ap, enum mytypes);
switch(type){
case INT:
if(j == 0) ccTaskPool[taskIndex].thread = va_arg(ap, int);
if(j == 1) ccTaskPool[taskIndex].block = va_arg(ap, int);
if(j == 2) ccTaskPool[taskIndex].sharemem = va_arg(ap, int);
if(j == 3) ccTaskPool[taskIndex].sync = va_arg(ap, int);
if(j == 4) ccTaskPool[taskIndex].funcId = va_arg(ap, int);
if(j > 4) ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, int*);
break;
case FLOAT:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, float*);
break;
case DOUBLE:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, double*);
break;
case LONG:
ccTaskPool[taskIndex].para[j-inParaNum] = va_arg(ap, long*);
break;
default:
break;
} // End switch
// } // End else
} // End for paraN
checkCudaErrors(cudaMemcpyAsync(ggTaskPool+taskIndex, ccTaskPool+taskIndex,
sizeof(gTaskStruct), cudaMemcpyHostToDevice, runtime_stream));
terminate = 0;
} // end if cTaskPool
taskIndex++;
if(taskIndex == (BK_NUM*BP_NUM) && round_count > 0){
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(cudaMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
barrierCount ++;
round_count = 0;
}
if(taskIndex == (BK_NUM*BP_NUM)){
checkCudaErrors(cudaMemcpyAsync(ccTaskPool, ggTaskPool, (BK_NUM*BP_NUM)*sizeof(gTaskStruct), cudaMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
taskIndex = 0;
}
} // end while i < BK_NUM*BP_NUM
va_end(ap);
return taskId;
}
void waitAll(int num_tasks){
*totalScheTasks = 0;
ccTaskPool[lastEmptyTask].readyId = taskId;
checkCudaErrors(cudaMemcpyAsync((int*)&ggTaskPool[lastEmptyTask].readyId, (int*)&ccTaskPool[lastEmptyTask].readyId,
sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
round_count = 0;
int i;
while(*totalScheTasks < num_tasks){
checkCudaErrors(cudaMemcpyAsync(totalScheTasks, totalScheTasksDev, sizeof(int), cudaMemcpyDeviceToHost, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
}
*totalScheTasks = 0;
checkCudaErrors(cudaMemcpyAsync(totalScheTasksDev, totalScheTasks, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
taskIndex = 0;
taskId = 0;
lastEmptyTask = 0;
}
void runtime_destroy(){
*done = 1;
checkCudaErrors(cudaMemcpyAsync(doneDev, done, sizeof(int), cudaMemcpyHostToDevice, runtime_stream));
checkCudaErrors(cudaStreamSynchronize(runtime_stream));
}
void runtime_free(){
checkCudaErrors(cudaStreamDestroy(master_kernel_stream));
checkCudaErrors(cudaStreamDestroy(runtime_stream));
checkCudaErrors(cudaFreeHost(done));
checkCudaErrors(cudaFreeHost(ccTaskPool));
checkCudaErrors(cudaFreeHost(totalScheTasks));
checkCudaErrors(cudaFree(doneDev));
checkCudaErrors(cudaFree(ggTaskPool));
checkCudaErrors(cudaFree(totalScheTasksDev));
}
|
812322217c4cef49c78ce8d40dbf774185aba293.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _USE_MATH_DEFINES
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <float.h>
#include <algorithm>
#include "kernel_hip.cuh"
using namespace std;
__global__ void waterFilterKernel(char *out, const GPUAtom *inWaters, const float centx, const float centy, const float centz, const float maxdist, const size_t nWaters)
{
//Find where we are in the GPU
int i = blockIdx.x * blockDim.x + threadIdx.x;
//Make sure we aren't trying to access outside our pre-definied dimensions
if (i < nWaters)
{
//Get the distance between the water and the center of geometry
float distx = inWaters[i].x - centx;
float disty = inWaters[i].y - centy;
float distz = inWaters[i].z - centz;
float dist = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
//Flag if it is within the proper distance or not
out[i] = (dist < maxdist);
}
}
__global__ void bondDistKernel(char *out, const GPUAtom *inProtein, const GPUAtom *inWaters, const size_t nProteins, const size_t nWaters)
{
//Find where we are in the GPU
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
//Make sure we aren't trying to access outside our pre-definied dimensions
if (i < nWaters && j < nProteins)
{
//out[(j * nWaters) + i] = 'n'; //Set default to "No bond"
if (i % 3 == 0) //Only look for oxygen atoms, which should be every third atom starting at atom index 0
{
//Get the distance between the heavy atoms
float distx = inWaters[i].x - inProtein[j].x;
float disty = inWaters[i].y - inProtein[j].y;
float distz = inWaters[i].z - inProtein[j].z;
float dist = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
out[(j * nWaters) + i] = (dist < 3.5f);
}
}
}
__global__ void waterToAcceptorKernel(char *out, const GPUAtom *inAcceptor, const GPUAtom *inWater, const size_t nAcceptors, const size_t nWaters)
{
//Find where we are in the GPU
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
//Make sure we aren't trying to access outside our pre-definied dimensions
if (i < nWaters && j < nAcceptors)
{
if (out[(j * nWaters) + i] == true) //Make sure we are in bonding distance from before
{
//Find which hydrogen is between the acceptor and the oxygen
float distx = inWater[i + 1].x - inAcceptor[j].x;
float disty = inWater[i + 1].y - inAcceptor[j].y;
float distz = inWater[i + 1].z - inAcceptor[j].z;
float dist1 = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
distx = inWater[i + 2].x - inAcceptor[j].x;
disty = inWater[i + 2].y - inAcceptor[j].y;
distz = inWater[i + 2].z - inAcceptor[j].z;
float dist2 = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
int closestindex = -1;
if (dist1 < dist2)
{
closestindex = i + 1;
}
else
{
closestindex = i + 2;
}
//Calculate the angle parameters
distx = inWater[i].x - inAcceptor[j].x;
disty = inWater[i].y - inAcceptor[j].y;
distz = inWater[i].z - inAcceptor[j].z;
float a = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
distx = inWater[i].x - inWater[closestindex].x;
disty = inWater[i].y - inWater[closestindex].y;
distz = inWater[i].z - inWater[closestindex].z;
float b = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
distx = inWater[closestindex].x - inAcceptor[j].x;
disty = inWater[closestindex].y - inAcceptor[j].y;
distz = inWater[closestindex].z - inAcceptor[j].z;
float c = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
float theta = (acosf(((a * a) + (b*b) - (c*c)) / (2 * a * b))) * (180.0f / M_PI);
if (theta > 30.0f) //If the angle is too large, change the bond to not a bond
{
out[(j * nWaters) + i] = false;
}
}
}
}
__global__ void donorToWaterKernel(char *out, const GPUAtom *inDonor, const GPUAtom *inLinker, const GPUAtom *inWater, const size_t nDonors, const size_t nLinkers, const size_t nWaters)
{
//Find where we are in the GPU
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
//Make sure we aren't trying to access outside our pre-definied dimensions
if (i < nWaters && j < nDonors)
{
if (out[(j * nWaters) + i] == true) //Make sure we are in bonding distance from before
{
int closestindex = -1;
float mindist = FLT_MAX; //Equivalent to "c"
//Find the bridging linker hydrogen in the residue
for (int k = 0; k < nLinkers; k++)
{
if (inLinker[k].resid == inDonor[j].resid) //Hydrogen belongs to same residue
{
float distx = inLinker[k].x - inWater[i].x;
float disty = inLinker[k].y - inWater[i].y;
float distz = inLinker[k].z - inWater[i].z;
float dist = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
if (dist < mindist)
{
mindist = dist;
closestindex = k;
}
else if (inDonor[j].resid < inLinker[k].resid)
{
break;
}
}
}
//Calculate the angle parameter
float distx = inLinker[closestindex].x - inDonor[j].x;
float disty = inLinker[closestindex].y - inDonor[j].y;
float distz = inLinker[closestindex].z - inDonor[j].z;
float a = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
distx = inWater[i].x - inDonor[j].x;
disty = inWater[i].y - inDonor[j].y;
distz = inWater[i].z - inDonor[j].z;
float b = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
float theta = (acosf(((a * a) + (b*b) - (mindist*mindist)) / (2 * a * b))) * (180.0f / M_PI);
if (theta > 30.0f) //If the angle is too large, change the bond to not a bond
{
out[(j * nWaters) + i] = false;
}
}
}
}
__global__ void timelineMapKernel(char * outMap, int * timeline, int * tllookup, int * boundAAs, const int currwater, const int window, const int threshold, const int nframes, const int nAAs)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; //Frame
int j = blockIdx.y * blockDim.y + threadIdx.y; //AA
if (i < (nframes - window) && j < nAAs)
{
int boundframes = 0;
for (int currwindow = 0; currwindow < window; currwindow++)
{
for (int currsearch = tllookup[i + currwindow]; currsearch < tllookup[i + currwindow + 1]; currsearch += 2)
{
if ((timeline[currsearch] == boundAAs[j]) && (timeline[currsearch + 1] == currwater))
{
boundframes++;
}
}
}
outMap[(j * nframes) + i] = (boundframes >= threshold);
}
}
__global__ void visitAndBridgerAnalysisKernel(char * outbridger, char * outvisitlist, int * outframesbound, const char * timelinemap, const int nframes, const int nAAs)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; //Frame
if (i < nframes)
{
int boundcount = 0;
for (int j = 0; j < nAAs; j++)
{
if (timelinemap[(j*nframes) + i])
{
boundcount++;
outvisitlist[j] = true; //Might be dangerous
}
}
outbridger[i] = (boundcount > 1);
}
}
hipError_t waterFilterCuda(char *out, const GPUAtom *inWater, const float centx, const float centy, const float centz, const float maxdist, const size_t nWaters, hipDeviceProp_t &deviceProp)
{
// the device arrays
GPUAtom *dev_inWater = 0;
char *dev_out = 0;
hipError_t cudaStatus;
// use div because it's more accurrate than the rounding BS
auto gridDiv = div(nWaters, deviceProp.maxThreadsPerBlock);
auto gridY = gridDiv.quot;
// ass backwards way of rounding up (maybe use the same trick as above? It might be "faster")
if (gridDiv.rem != 0)
gridY++;
// find the block and grid size
auto blockSize = deviceProp.maxThreadsPerBlock;
int gridSize = min(16 * deviceProp.multiProcessorCount, gridY);
// Allocate GPU buffers for vectors
cudaStatus = hipMalloc((void**)&dev_out, nWaters * sizeof(char));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_inWater, nWaters * sizeof(GPUAtom));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_inWater, inWater, nWaters * sizeof(GPUAtom), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
// Launch a kernel on the GPU.
waterFilterKernel << <gridSize, blockSize >> > (dev_out, dev_inWater, centx, centy, centz, maxdist, nWaters);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
cerr << "dielectric kernel launch failed: " << hipGetErrorString(cudaStatus) << endl;
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
cerr << "hipDeviceSynchronize returned error code " << cudaStatus << " after launching density kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << hipGetErrorString(cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(out, dev_out, nWaters * sizeof(char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
// delete all our device arrays
Error:
hipFree(dev_inWater);
hipFree(dev_out);
return cudaStatus;
}
hipError_t bondDistCuda(char *out, const GPUAtom *inProteins, const GPUAtom *inWaters, const size_t nProteins, const size_t nWaters, hipDeviceProp_t &deviceProp)
{
// define device arrays
GPUAtom *dev_protein = 0;
GPUAtom *dev_water = 0;
char *dev_out = 0;
hipError_t cudaStatus;
// Setup the kernel dimensions
int blockDim = sqrt(deviceProp.maxThreadsPerBlock);
auto blockSize = dim3(blockDim, blockDim);
//Waters are chosen for x dimension, since CUDA can handle MUCH more data along the x dimension than y.
auto gridSize = dim3(round((blockDim - 1 + nWaters) / blockDim), round((blockDim - 1 + nProteins) / blockDim));
// Allocate GPU buffers for vectors.
cudaStatus = hipMalloc((void**)&dev_out, nProteins * nWaters * sizeof(char));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_protein, nProteins * sizeof(GPUAtom));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_water, nWaters * sizeof(GPUAtom));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_protein, inProteins, nProteins * sizeof(GPUAtom), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
cudaStatus = hipMemcpy(dev_water, inWaters, nWaters * sizeof(GPUAtom), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
// Launch a kernel on the GPU.
bondDistKernel << <gridSize, blockSize >> > (dev_out, dev_protein, dev_water, nProteins, nWaters);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
cerr << "Distance kernel launch failed: " << hipGetErrorString(cudaStatus) << endl;
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
cerr << "hipDeviceSynchronize returned error code " << cudaStatus << " after launching hbond distance kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << hipGetErrorString(cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(out, dev_out, nProteins * nWaters * sizeof(char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
// clear all our device arrays
Error:
hipFree(dev_protein);
hipFree(dev_water);
hipFree(dev_out);
return cudaStatus;
}
hipError_t waterToAcceptorCuda(char *out, const GPUAtom *inAcceptor, const GPUAtom *inWater, const size_t nAcceptors, const size_t nWaters, hipDeviceProp_t &deviceProp)
{
// define device arrays
GPUAtom *dev_acceptor = 0;
GPUAtom *dev_water = 0;
char *dev_out = 0;
hipError_t cudaStatus;
// Setup the kernel dimensions
int blockDim = sqrt(deviceProp.maxThreadsPerBlock);
auto blockSize = dim3(blockDim, blockDim);
//Waters are chosen for x dimension, since CUDA can handle MUCH more data along the x dimension than y.
auto gridSize = dim3(round((blockDim - 1 + nWaters) / blockDim), round((blockDim - 1 + nAcceptors) / blockDim));
// Allocate GPU buffers for vectors.
cudaStatus = hipMalloc((void**)&dev_out, nAcceptors * nWaters * sizeof(char));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_acceptor, nAcceptors * sizeof(GPUAtom));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_water, nWaters * sizeof(GPUAtom));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_out, out, nAcceptors * nWaters * sizeof(char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
cudaStatus = hipMemcpy(dev_acceptor, inAcceptor, nAcceptors * sizeof(GPUAtom), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
cudaStatus = hipMemcpy(dev_water, inWater, nWaters * sizeof(GPUAtom), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
// Launch a kernel on the GPU.
waterToAcceptorKernel << <gridSize, blockSize >> > (dev_out, dev_acceptor, dev_water, nAcceptors, nWaters);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
cerr << "Donor to water angle kernel launch failed: " << hipGetErrorString(cudaStatus) << endl;
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
cerr << "hipDeviceSynchronize returned error code " << cudaStatus << " after launching donor to water angle kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << hipGetErrorString(cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(out, dev_out, nAcceptors * nWaters * sizeof(char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
// clear all our device arrays
Error:
hipFree(dev_acceptor);
hipFree(dev_water);
hipFree(dev_out);
return cudaStatus;
}
hipError_t donorToWaterCuda(char *out, const GPUAtom *inDonor, const GPUAtom *inLinker, const GPUAtom *inWater, const size_t nDonors, const size_t nLinkers, const size_t nWaters, hipDeviceProp_t &deviceProp)
{
// define device arrays
GPUAtom *dev_donor = 0;
GPUAtom *dev_linker = 0;
GPUAtom *dev_water = 0;
char *dev_out = 0;
hipError_t cudaStatus;
// Setup the kernel dimensions
int blockDim = sqrt(deviceProp.maxThreadsPerBlock);
auto blockSize = dim3(blockDim, blockDim);
//Waters are chosen for x dimension, since CUDA can handle MUCH more data along the x dimension than y.
auto gridSize = dim3(round((blockDim - 1 + nWaters) / blockDim), round((blockDim - 1 + nDonors) / blockDim));
// Allocate GPU buffers for vectors.
cudaStatus = hipMalloc((void**)&dev_out, nDonors * nWaters * sizeof(char));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_donor, nDonors * sizeof(GPUAtom));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_linker, nLinkers * sizeof(GPUAtom));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_water, nWaters * sizeof(GPUAtom));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_out, out, nDonors * nWaters * sizeof(char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
cudaStatus = hipMemcpy(dev_donor, inDonor, nDonors * sizeof(GPUAtom), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
cudaStatus = hipMemcpy(dev_linker, inLinker, nLinkers * sizeof(GPUAtom), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
cudaStatus = hipMemcpy(dev_water, inWater, nWaters * sizeof(GPUAtom), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
// Launch a kernel on the GPU.
donorToWaterKernel << <gridSize, blockSize >> > (dev_out, dev_donor, dev_linker, dev_water, nDonors, nLinkers, nWaters);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
cerr << "Water to acceptor angle kernel launch failed: " << hipGetErrorString(cudaStatus) << endl;
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
cerr << "hipDeviceSynchronize returned error code " << cudaStatus << " after launching water to acceptor angle kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << hipGetErrorString(cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(out, dev_out, nDonors * nWaters * sizeof(char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
// clear all our device arrays
Error:
hipFree(dev_donor);
hipFree(dev_linker);
hipFree(dev_water);
hipFree(dev_out);
return cudaStatus;
}
hipError_t timelineMapCuda(char * outMap, const int * timeline, const int * tllookup, const int * boundAAs, const int currwater, const int window, const int threshold,
const int ntimeline, const int nframes, const int nAAs, hipDeviceProp_t &deviceProp)
{
// define device arrays
char * dev_outMap = 0;
int * dev_timeline = 0;
int * dev_tllookup = 0;
int * dev_boundAAs = 0;
hipError_t cudaStatus;
// Setup the kernel dimensions
int blockDim = sqrt(deviceProp.maxThreadsPerBlock);
auto blockSize = dim3(blockDim, blockDim);
//Waters are chosen for x dimension, since CUDA can handle MUCH more data along the x dimension than y.
auto gridSize = dim3(round((blockDim - 1 + nframes) / blockDim), round((blockDim - 1 + nAAs) / blockDim));
// Allocate GPU buffers for vectors.
cudaStatus = hipMalloc((void**)&dev_outMap, nframes * nAAs * sizeof(char));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_timeline, ntimeline * sizeof(int));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_tllookup, (nframes+1) * sizeof(int));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_boundAAs, nAAs * sizeof(int));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_timeline, timeline, nframes * nAAs * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
cudaStatus = hipMemcpy(dev_tllookup, tllookup, (nframes+1) * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
cudaStatus = hipMemcpy(dev_boundAAs, boundAAs, nAAs * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
timelineMapKernel << <gridSize, blockSize >> > (dev_outMap, dev_timeline, dev_tllookup, dev_boundAAs, currwater, window, threshold, nframes, nAAs);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
cerr << "Timeline map kernel launch failed: " << hipGetErrorString(cudaStatus) << endl;
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
cerr << "hipDeviceSynchronize returned error code " << cudaStatus << " after launching timeline map kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << hipGetErrorString(cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(outMap, dev_outMap, nframes * nAAs * sizeof(char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
// clear all our device arrays
Error:
hipFree(dev_outMap);
hipFree(dev_timeline);
hipFree(dev_tllookup);
hipFree(dev_boundAAs);
return cudaStatus;
}
hipError_t visitAndBridgerAnalysisCuda(char * outbridger, char * outvisitlist, int * outframesbound, const char * timelinemap, const int nframes, const int nAAs, hipDeviceProp_t &deviceProp)
{
// the device arrays
char * dev_outbridger = 0;
char * dev_outvisitlist = 0;
int * dev_outframesbound = 0;
char * dev_timelinemap = 0;
hipError_t cudaStatus;
// use div because it's more accurrate than the rounding BS
auto gridDiv = div(nframes, deviceProp.maxThreadsPerBlock);
auto gridY = gridDiv.quot;
// ass backwards way of rounding up (maybe use the same trick as above? It might be "faster")
if (gridDiv.rem != 0)
gridY++;
// find the block and grid size
auto blockSize = deviceProp.maxThreadsPerBlock;
int gridSize = min(16 * deviceProp.multiProcessorCount, gridY);
// Allocate GPU buffers for vectors
cudaStatus = hipMalloc((void**)&dev_outbridger, nframes * sizeof(char));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_outvisitlist, nAAs * sizeof(char));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_outframesbound, nframes * sizeof(int));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_timelinemap, nframes * nAAs * sizeof(char));
if (cudaStatus != hipSuccess) {
cerr << "hipMalloc failed!" << endl;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_timelinemap, timelinemap, nframes * nAAs * sizeof(char), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
// Launch a kernel on the GPU. (char * outbridger, char * outvisitlist, int * outframesbound, int * outevents, const char * timelinemap, const int nframes, const int nAAs, hipDeviceProp_t &deviceProp)
visitAndBridgerAnalysisKernel << <gridSize, blockSize >> > (dev_outbridger, dev_outvisitlist, dev_outframesbound, dev_timelinemap, nframes, nAAs);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
cerr << "Visit and bridger analysis kernel launch failed: " << hipGetErrorString(cudaStatus) << endl;
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
cerr << "hipDeviceSynchronize returned error code " << cudaStatus << " after launching visit and bridger analysis kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << hipGetErrorString(cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(outbridger, dev_outbridger, nframes * sizeof(char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
cudaStatus = hipMemcpy(outvisitlist, dev_outvisitlist, nAAs * sizeof(char), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
cudaStatus = hipMemcpy(outframesbound, dev_outframesbound, nframes * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
cerr << "hipMemcpy failed!" << endl;
goto Error;
}
// delete all our device arrays
Error:
hipFree(dev_outbridger);
hipFree(dev_outvisitlist);
hipFree(dev_outframesbound);
hipFree(dev_timelinemap);
return cudaStatus;
} | 812322217c4cef49c78ce8d40dbf774185aba293.cu | #define _USE_MATH_DEFINES
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <float.h>
#include <algorithm>
#include "kernel.cuh"
using namespace std;
__global__ void waterFilterKernel(char *out, const GPUAtom *inWaters, const float centx, const float centy, const float centz, const float maxdist, const size_t nWaters)
{
//Find where we are in the GPU
int i = blockIdx.x * blockDim.x + threadIdx.x;
//Make sure we aren't trying to access outside our pre-definied dimensions
if (i < nWaters)
{
//Get the distance between the water and the center of geometry
float distx = inWaters[i].x - centx;
float disty = inWaters[i].y - centy;
float distz = inWaters[i].z - centz;
float dist = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
//Flag if it is within the proper distance or not
out[i] = (dist < maxdist);
}
}
__global__ void bondDistKernel(char *out, const GPUAtom *inProtein, const GPUAtom *inWaters, const size_t nProteins, const size_t nWaters)
{
//Find where we are in the GPU
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
//Make sure we aren't trying to access outside our pre-definied dimensions
if (i < nWaters && j < nProteins)
{
//out[(j * nWaters) + i] = 'n'; //Set default to "No bond"
if (i % 3 == 0) //Only look for oxygen atoms, which should be every third atom starting at atom index 0
{
//Get the distance between the heavy atoms
float distx = inWaters[i].x - inProtein[j].x;
float disty = inWaters[i].y - inProtein[j].y;
float distz = inWaters[i].z - inProtein[j].z;
float dist = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
out[(j * nWaters) + i] = (dist < 3.5f);
}
}
}
__global__ void waterToAcceptorKernel(char *out, const GPUAtom *inAcceptor, const GPUAtom *inWater, const size_t nAcceptors, const size_t nWaters)
{
//Find where we are in the GPU
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
//Make sure we aren't trying to access outside our pre-definied dimensions
if (i < nWaters && j < nAcceptors)
{
if (out[(j * nWaters) + i] == true) //Make sure we are in bonding distance from before
{
//Find which hydrogen is between the acceptor and the oxygen
float distx = inWater[i + 1].x - inAcceptor[j].x;
float disty = inWater[i + 1].y - inAcceptor[j].y;
float distz = inWater[i + 1].z - inAcceptor[j].z;
float dist1 = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
distx = inWater[i + 2].x - inAcceptor[j].x;
disty = inWater[i + 2].y - inAcceptor[j].y;
distz = inWater[i + 2].z - inAcceptor[j].z;
float dist2 = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
int closestindex = -1;
if (dist1 < dist2)
{
closestindex = i + 1;
}
else
{
closestindex = i + 2;
}
//Calculate the angle parameters
distx = inWater[i].x - inAcceptor[j].x;
disty = inWater[i].y - inAcceptor[j].y;
distz = inWater[i].z - inAcceptor[j].z;
float a = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
distx = inWater[i].x - inWater[closestindex].x;
disty = inWater[i].y - inWater[closestindex].y;
distz = inWater[i].z - inWater[closestindex].z;
float b = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
distx = inWater[closestindex].x - inAcceptor[j].x;
disty = inWater[closestindex].y - inAcceptor[j].y;
distz = inWater[closestindex].z - inAcceptor[j].z;
float c = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
float theta = (acosf(((a * a) + (b*b) - (c*c)) / (2 * a * b))) * (180.0f / M_PI);
if (theta > 30.0f) //If the angle is too large, change the bond to not a bond
{
out[(j * nWaters) + i] = false;
}
}
}
}
__global__ void donorToWaterKernel(char *out, const GPUAtom *inDonor, const GPUAtom *inLinker, const GPUAtom *inWater, const size_t nDonors, const size_t nLinkers, const size_t nWaters)
{
//Find where we are in the GPU
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
//Make sure we aren't trying to access outside our pre-definied dimensions
if (i < nWaters && j < nDonors)
{
if (out[(j * nWaters) + i] == true) //Make sure we are in bonding distance from before
{
int closestindex = -1;
float mindist = FLT_MAX; //Equivalent to "c"
//Find the bridging linker hydrogen in the residue
for (int k = 0; k < nLinkers; k++)
{
if (inLinker[k].resid == inDonor[j].resid) //Hydrogen belongs to same residue
{
float distx = inLinker[k].x - inWater[i].x;
float disty = inLinker[k].y - inWater[i].y;
float distz = inLinker[k].z - inWater[i].z;
float dist = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
if (dist < mindist)
{
mindist = dist;
closestindex = k;
}
else if (inDonor[j].resid < inLinker[k].resid)
{
break;
}
}
}
//Calculate the angle parameter
float distx = inLinker[closestindex].x - inDonor[j].x;
float disty = inLinker[closestindex].y - inDonor[j].y;
float distz = inLinker[closestindex].z - inDonor[j].z;
float a = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
distx = inWater[i].x - inDonor[j].x;
disty = inWater[i].y - inDonor[j].y;
distz = inWater[i].z - inDonor[j].z;
float b = sqrtf((distx * distx) + (disty * disty) + (distz * distz));
float theta = (acosf(((a * a) + (b*b) - (mindist*mindist)) / (2 * a * b))) * (180.0f / M_PI);
if (theta > 30.0f) //If the angle is too large, change the bond to not a bond
{
out[(j * nWaters) + i] = false;
}
}
}
}
__global__ void timelineMapKernel(char * outMap, int * timeline, int * tllookup, int * boundAAs, const int currwater, const int window, const int threshold, const int nframes, const int nAAs)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; //Frame
int j = blockIdx.y * blockDim.y + threadIdx.y; //AA
if (i < (nframes - window) && j < nAAs)
{
int boundframes = 0;
for (int currwindow = 0; currwindow < window; currwindow++)
{
for (int currsearch = tllookup[i + currwindow]; currsearch < tllookup[i + currwindow + 1]; currsearch += 2)
{
if ((timeline[currsearch] == boundAAs[j]) && (timeline[currsearch + 1] == currwater))
{
boundframes++;
}
}
}
outMap[(j * nframes) + i] = (boundframes >= threshold);
}
}
__global__ void visitAndBridgerAnalysisKernel(char * outbridger, char * outvisitlist, int * outframesbound, const char * timelinemap, const int nframes, const int nAAs)
{
int i = blockIdx.x * blockDim.x + threadIdx.x; //Frame
if (i < nframes)
{
int boundcount = 0;
for (int j = 0; j < nAAs; j++)
{
if (timelinemap[(j*nframes) + i])
{
boundcount++;
outvisitlist[j] = true; //Might be dangerous
}
}
outbridger[i] = (boundcount > 1);
}
}
cudaError_t waterFilterCuda(char *out, const GPUAtom *inWater, const float centx, const float centy, const float centz, const float maxdist, const size_t nWaters, cudaDeviceProp &deviceProp)
{
// the device arrays
GPUAtom *dev_inWater = 0;
char *dev_out = 0;
cudaError_t cudaStatus;
// use div because it's more accurrate than the rounding BS
auto gridDiv = div(nWaters, deviceProp.maxThreadsPerBlock);
auto gridY = gridDiv.quot;
// ass backwards way of rounding up (maybe use the same trick as above? It might be "faster")
if (gridDiv.rem != 0)
gridY++;
// find the block and grid size
auto blockSize = deviceProp.maxThreadsPerBlock;
int gridSize = min(16 * deviceProp.multiProcessorCount, gridY);
// Allocate GPU buffers for vectors
cudaStatus = cudaMalloc((void**)&dev_out, nWaters * sizeof(char));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_inWater, nWaters * sizeof(GPUAtom));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_inWater, inWater, nWaters * sizeof(GPUAtom), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
// Launch a kernel on the GPU.
waterFilterKernel << <gridSize, blockSize >> > (dev_out, dev_inWater, centx, centy, centz, maxdist, nWaters);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
cerr << "dielectric kernel launch failed: " << cudaGetErrorString(cudaStatus) << endl;
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
cerr << "cudaDeviceSynchronize returned error code " << cudaStatus << " after launching density kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << cudaGetErrorString(cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(out, dev_out, nWaters * sizeof(char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
// delete all our device arrays
Error:
cudaFree(dev_inWater);
cudaFree(dev_out);
return cudaStatus;
}
cudaError_t bondDistCuda(char *out, const GPUAtom *inProteins, const GPUAtom *inWaters, const size_t nProteins, const size_t nWaters, cudaDeviceProp &deviceProp)
{
// define device arrays
GPUAtom *dev_protein = 0;
GPUAtom *dev_water = 0;
char *dev_out = 0;
cudaError_t cudaStatus;
// Setup the kernel dimensions
int blockDim = sqrt(deviceProp.maxThreadsPerBlock);
auto blockSize = dim3(blockDim, blockDim);
//Waters are chosen for x dimension, since CUDA can handle MUCH more data along the x dimension than y.
auto gridSize = dim3(round((blockDim - 1 + nWaters) / blockDim), round((blockDim - 1 + nProteins) / blockDim));
// Allocate GPU buffers for vectors.
cudaStatus = cudaMalloc((void**)&dev_out, nProteins * nWaters * sizeof(char));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_protein, nProteins * sizeof(GPUAtom));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_water, nWaters * sizeof(GPUAtom));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_protein, inProteins, nProteins * sizeof(GPUAtom), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_water, inWaters, nWaters * sizeof(GPUAtom), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
// Launch a kernel on the GPU.
bondDistKernel << <gridSize, blockSize >> > (dev_out, dev_protein, dev_water, nProteins, nWaters);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
cerr << "Distance kernel launch failed: " << cudaGetErrorString(cudaStatus) << endl;
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
cerr << "cudaDeviceSynchronize returned error code " << cudaStatus << " after launching hbond distance kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << cudaGetErrorString(cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(out, dev_out, nProteins * nWaters * sizeof(char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
// clear all our device arrays
Error:
cudaFree(dev_protein);
cudaFree(dev_water);
cudaFree(dev_out);
return cudaStatus;
}
cudaError_t waterToAcceptorCuda(char *out, const GPUAtom *inAcceptor, const GPUAtom *inWater, const size_t nAcceptors, const size_t nWaters, cudaDeviceProp &deviceProp)
{
// define device arrays
GPUAtom *dev_acceptor = 0;
GPUAtom *dev_water = 0;
char *dev_out = 0;
cudaError_t cudaStatus;
// Setup the kernel dimensions
int blockDim = sqrt(deviceProp.maxThreadsPerBlock);
auto blockSize = dim3(blockDim, blockDim);
//Waters are chosen for x dimension, since CUDA can handle MUCH more data along the x dimension than y.
auto gridSize = dim3(round((blockDim - 1 + nWaters) / blockDim), round((blockDim - 1 + nAcceptors) / blockDim));
// Allocate GPU buffers for vectors.
cudaStatus = cudaMalloc((void**)&dev_out, nAcceptors * nWaters * sizeof(char));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_acceptor, nAcceptors * sizeof(GPUAtom));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_water, nWaters * sizeof(GPUAtom));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_out, out, nAcceptors * nWaters * sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_acceptor, inAcceptor, nAcceptors * sizeof(GPUAtom), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_water, inWater, nWaters * sizeof(GPUAtom), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
// Launch a kernel on the GPU.
waterToAcceptorKernel << <gridSize, blockSize >> > (dev_out, dev_acceptor, dev_water, nAcceptors, nWaters);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
cerr << "Donor to water angle kernel launch failed: " << cudaGetErrorString(cudaStatus) << endl;
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
cerr << "cudaDeviceSynchronize returned error code " << cudaStatus << " after launching donor to water angle kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << cudaGetErrorString(cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(out, dev_out, nAcceptors * nWaters * sizeof(char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
// clear all our device arrays
Error:
cudaFree(dev_acceptor);
cudaFree(dev_water);
cudaFree(dev_out);
return cudaStatus;
}
cudaError_t donorToWaterCuda(char *out, const GPUAtom *inDonor, const GPUAtom *inLinker, const GPUAtom *inWater, const size_t nDonors, const size_t nLinkers, const size_t nWaters, cudaDeviceProp &deviceProp)
{
// define device arrays
GPUAtom *dev_donor = 0;
GPUAtom *dev_linker = 0;
GPUAtom *dev_water = 0;
char *dev_out = 0;
cudaError_t cudaStatus;
// Setup the kernel dimensions
int blockDim = sqrt(deviceProp.maxThreadsPerBlock);
auto blockSize = dim3(blockDim, blockDim);
//Waters are chosen for x dimension, since CUDA can handle MUCH more data along the x dimension than y.
auto gridSize = dim3(round((blockDim - 1 + nWaters) / blockDim), round((blockDim - 1 + nDonors) / blockDim));
// Allocate GPU buffers for vectors.
cudaStatus = cudaMalloc((void**)&dev_out, nDonors * nWaters * sizeof(char));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_donor, nDonors * sizeof(GPUAtom));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_linker, nLinkers * sizeof(GPUAtom));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_water, nWaters * sizeof(GPUAtom));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_out, out, nDonors * nWaters * sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_donor, inDonor, nDonors * sizeof(GPUAtom), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_linker, inLinker, nLinkers * sizeof(GPUAtom), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_water, inWater, nWaters * sizeof(GPUAtom), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
// Launch a kernel on the GPU.
donorToWaterKernel << <gridSize, blockSize >> > (dev_out, dev_donor, dev_linker, dev_water, nDonors, nLinkers, nWaters);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
cerr << "Water to acceptor angle kernel launch failed: " << cudaGetErrorString(cudaStatus) << endl;
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
cerr << "cudaDeviceSynchronize returned error code " << cudaStatus << " after launching water to acceptor angle kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << cudaGetErrorString(cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(out, dev_out, nDonors * nWaters * sizeof(char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
// clear all our device arrays
Error:
cudaFree(dev_donor);
cudaFree(dev_linker);
cudaFree(dev_water);
cudaFree(dev_out);
return cudaStatus;
}
cudaError_t timelineMapCuda(char * outMap, const int * timeline, const int * tllookup, const int * boundAAs, const int currwater, const int window, const int threshold,
const int ntimeline, const int nframes, const int nAAs, cudaDeviceProp &deviceProp)
{
// define device arrays
char * dev_outMap = 0;
int * dev_timeline = 0;
int * dev_tllookup = 0;
int * dev_boundAAs = 0;
cudaError_t cudaStatus;
// Setup the kernel dimensions
int blockDim = sqrt(deviceProp.maxThreadsPerBlock);
auto blockSize = dim3(blockDim, blockDim);
//Waters are chosen for x dimension, since CUDA can handle MUCH more data along the x dimension than y.
auto gridSize = dim3(round((blockDim - 1 + nframes) / blockDim), round((blockDim - 1 + nAAs) / blockDim));
// Allocate GPU buffers for vectors.
cudaStatus = cudaMalloc((void**)&dev_outMap, nframes * nAAs * sizeof(char));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_timeline, ntimeline * sizeof(int));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_tllookup, (nframes+1) * sizeof(int));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_boundAAs, nAAs * sizeof(int));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_timeline, timeline, nframes * nAAs * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_tllookup, tllookup, (nframes+1) * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_boundAAs, boundAAs, nAAs * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
timelineMapKernel << <gridSize, blockSize >> > (dev_outMap, dev_timeline, dev_tllookup, dev_boundAAs, currwater, window, threshold, nframes, nAAs);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
cerr << "Timeline map kernel launch failed: " << cudaGetErrorString(cudaStatus) << endl;
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
cerr << "cudaDeviceSynchronize returned error code " << cudaStatus << " after launching timeline map kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << cudaGetErrorString(cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(outMap, dev_outMap, nframes * nAAs * sizeof(char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
// clear all our device arrays
Error:
cudaFree(dev_outMap);
cudaFree(dev_timeline);
cudaFree(dev_tllookup);
cudaFree(dev_boundAAs);
return cudaStatus;
}
cudaError_t visitAndBridgerAnalysisCuda(char * outbridger, char * outvisitlist, int * outframesbound, const char * timelinemap, const int nframes, const int nAAs, cudaDeviceProp &deviceProp)
{
// the device arrays
char * dev_outbridger = 0;
char * dev_outvisitlist = 0;
int * dev_outframesbound = 0;
char * dev_timelinemap = 0;
cudaError_t cudaStatus;
// use div because it's more accurrate than the rounding BS
auto gridDiv = div(nframes, deviceProp.maxThreadsPerBlock);
auto gridY = gridDiv.quot;
// ass backwards way of rounding up (maybe use the same trick as above? It might be "faster")
if (gridDiv.rem != 0)
gridY++;
// find the block and grid size
auto blockSize = deviceProp.maxThreadsPerBlock;
int gridSize = min(16 * deviceProp.multiProcessorCount, gridY);
// Allocate GPU buffers for vectors
cudaStatus = cudaMalloc((void**)&dev_outbridger, nframes * sizeof(char));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_outvisitlist, nAAs * sizeof(char));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_outframesbound, nframes * sizeof(int));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_timelinemap, nframes * nAAs * sizeof(char));
if (cudaStatus != cudaSuccess) {
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_timelinemap, timelinemap, nframes * nAAs * sizeof(char), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
// Launch a kernel on the GPU. (char * outbridger, char * outvisitlist, int * outframesbound, int * outevents, const char * timelinemap, const int nframes, const int nAAs, cudaDeviceProp &deviceProp)
visitAndBridgerAnalysisKernel << <gridSize, blockSize >> > (dev_outbridger, dev_outvisitlist, dev_outframesbound, dev_timelinemap, nframes, nAAs);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
cerr << "Visit and bridger analysis kernel launch failed: " << cudaGetErrorString(cudaStatus) << endl;
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
cerr << "cudaDeviceSynchronize returned error code " << cudaStatus << " after launching visit and bridger analysis kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << cudaGetErrorString(cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(outbridger, dev_outbridger, nframes * sizeof(char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
cudaStatus = cudaMemcpy(outvisitlist, dev_outvisitlist, nAAs * sizeof(char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
cudaStatus = cudaMemcpy(outframesbound, dev_outframesbound, nframes * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
// delete all our device arrays
Error:
cudaFree(dev_outbridger);
cudaFree(dev_outvisitlist);
cudaFree(dev_outframesbound);
cudaFree(dev_timelinemap);
return cudaStatus;
} |
b0d2b70c57baf72bdfce02b73207ddeedddc1918.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/prior_box_op.h"
namespace paddle {
namespace operators {
template <typename T>
__device__ inline T clip(T in) {
return min(max(in, 0.), 1.);
}
template <typename T>
__global__ void GenPriorBox(T* out, const T* aspect_ratios, const int height,
const int width, const int im_height,
const int im_width, const int as_num,
const T offset, const T step_width,
const T step_height, const T* min_sizes,
const T* max_sizes, const int min_num, bool is_clip,
bool min_max_aspect_ratios_order) {
int num_priors = max_sizes ? as_num * min_num + min_num : as_num * min_num;
int box_num = height * width * num_priors;
CUDA_KERNEL_LOOP(i, box_num) {
int h = i / (num_priors * width);
int w = (i / num_priors) % width;
int p = i % num_priors;
int m = max_sizes ? p / (as_num + 1) : p / as_num;
T cx = (w + offset) * step_width;
T cy = (h + offset) * step_height;
T bw, bh;
T min_size = min_sizes[m];
if (max_sizes) {
int s = p % (as_num + 1);
if (!min_max_aspect_ratios_order) {
if (s < as_num) {
T ar = aspect_ratios[s];
bw = min_size * sqrt(ar) / 2.;
bh = min_size / sqrt(ar) / 2.;
} else {
T max_size = max_sizes[m];
bw = sqrt(min_size * max_size) / 2.;
bh = bw;
}
} else {
if (s == 0) {
bw = bh = min_size / 2.;
} else if (s == 1) {
T max_size = max_sizes[m];
bw = sqrt(min_size * max_size) / 2.;
bh = bw;
} else {
T ar = aspect_ratios[s - 1];
bw = min_size * sqrt(ar) / 2.;
bh = min_size / sqrt(ar) / 2.;
}
}
} else {
int s = p % as_num;
T ar = aspect_ratios[s];
bw = min_size * sqrt(ar) / 2.;
bh = min_size / sqrt(ar) / 2.;
}
T xmin = (cx - bw) / im_width;
T ymin = (cy - bh) / im_height;
T xmax = (cx + bw) / im_width;
T ymax = (cy + bh) / im_height;
out[i * 4] = is_clip ? clip<T>(xmin) : xmin;
out[i * 4 + 1] = is_clip ? clip<T>(ymin) : ymin;
out[i * 4 + 2] = is_clip ? clip<T>(xmax) : xmax;
out[i * 4 + 3] = is_clip ? clip<T>(ymax) : ymax;
}
}
template <typename T>
__global__ void SetVariance(T* out, const T* var, const int vnum,
const int num) {
CUDA_KERNEL_LOOP(i, num) { out[i] = var[i % vnum]; }
}
template <typename T>
class PriorBoxOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<paddle::framework::Tensor>("Input");
auto* image = ctx.Input<paddle::framework::Tensor>("Image");
auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes");
auto* vars = ctx.Output<paddle::framework::Tensor>("Variances");
auto min_sizes = ctx.Attr<std::vector<float>>("min_sizes");
auto max_sizes = ctx.Attr<std::vector<float>>("max_sizes");
auto input_aspect_ratio = ctx.Attr<std::vector<float>>("aspect_ratios");
auto variances = ctx.Attr<std::vector<float>>("variances");
auto flip = ctx.Attr<bool>("flip");
auto clip = ctx.Attr<bool>("clip");
auto min_max_aspect_ratios_order =
ctx.Attr<bool>("min_max_aspect_ratios_order");
std::vector<float> aspect_ratios;
ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios);
T step_w = static_cast<T>(ctx.Attr<float>("step_w"));
T step_h = static_cast<T>(ctx.Attr<float>("step_h"));
T offset = static_cast<T>(ctx.Attr<float>("offset"));
auto im_width = image->dims()[3];
auto im_height = image->dims()[2];
auto width = input->dims()[3];
auto height = input->dims()[2];
T step_width, step_height;
if (step_w == 0 || step_h == 0) {
step_width = static_cast<T>(im_width) / width;
step_height = static_cast<T>(im_height) / height;
} else {
step_width = step_w;
step_height = step_h;
}
int num_priors = aspect_ratios.size() * min_sizes.size();
if (max_sizes.size() > 0) {
num_priors += max_sizes.size();
}
int min_num = static_cast<int>(min_sizes.size());
int box_num = width * height * num_priors;
int block = 512;
int grid = (box_num + block - 1) / block;
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
boxes->mutable_data<T>(ctx.GetPlace());
vars->mutable_data<T>(ctx.GetPlace());
framework::Tensor r;
framework::TensorFromVector(aspect_ratios, ctx.device_context(), &r);
framework::Tensor min;
framework::TensorFromVector(min_sizes, ctx.device_context(), &min);
T* max_data = nullptr;
framework::Tensor max;
if (max_sizes.size() > 0) {
framework::TensorFromVector(max_sizes, ctx.device_context(), &max);
max_data = max.data<T>();
}
hipLaunchKernelGGL(( GenPriorBox<T>), dim3(grid), dim3(block), 0, stream,
boxes->data<T>(), r.data<T>(), height, width, im_height, im_width,
aspect_ratios.size(), offset, step_width, step_height, min.data<T>(),
max_data, min_num, clip, min_max_aspect_ratios_order);
framework::Tensor v;
framework::TensorFromVector(variances, ctx.device_context(), &v);
grid = (box_num * 4 + block - 1) / block;
hipLaunchKernelGGL(( SetVariance<T>), dim3(grid), dim3(block), 0, stream, vars->data<T>(), v.data<T>(),
variances.size(), box_num * 4);
}
}; // namespace operators
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(prior_box, ops::PriorBoxOpCUDAKernel<float>,
ops::PriorBoxOpCUDAKernel<double>);
| b0d2b70c57baf72bdfce02b73207ddeedddc1918.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/prior_box_op.h"
namespace paddle {
namespace operators {
template <typename T>
__device__ inline T clip(T in) {
return min(max(in, 0.), 1.);
}
template <typename T>
__global__ void GenPriorBox(T* out, const T* aspect_ratios, const int height,
const int width, const int im_height,
const int im_width, const int as_num,
const T offset, const T step_width,
const T step_height, const T* min_sizes,
const T* max_sizes, const int min_num, bool is_clip,
bool min_max_aspect_ratios_order) {
int num_priors = max_sizes ? as_num * min_num + min_num : as_num * min_num;
int box_num = height * width * num_priors;
CUDA_KERNEL_LOOP(i, box_num) {
int h = i / (num_priors * width);
int w = (i / num_priors) % width;
int p = i % num_priors;
int m = max_sizes ? p / (as_num + 1) : p / as_num;
T cx = (w + offset) * step_width;
T cy = (h + offset) * step_height;
T bw, bh;
T min_size = min_sizes[m];
if (max_sizes) {
int s = p % (as_num + 1);
if (!min_max_aspect_ratios_order) {
if (s < as_num) {
T ar = aspect_ratios[s];
bw = min_size * sqrt(ar) / 2.;
bh = min_size / sqrt(ar) / 2.;
} else {
T max_size = max_sizes[m];
bw = sqrt(min_size * max_size) / 2.;
bh = bw;
}
} else {
if (s == 0) {
bw = bh = min_size / 2.;
} else if (s == 1) {
T max_size = max_sizes[m];
bw = sqrt(min_size * max_size) / 2.;
bh = bw;
} else {
T ar = aspect_ratios[s - 1];
bw = min_size * sqrt(ar) / 2.;
bh = min_size / sqrt(ar) / 2.;
}
}
} else {
int s = p % as_num;
T ar = aspect_ratios[s];
bw = min_size * sqrt(ar) / 2.;
bh = min_size / sqrt(ar) / 2.;
}
T xmin = (cx - bw) / im_width;
T ymin = (cy - bh) / im_height;
T xmax = (cx + bw) / im_width;
T ymax = (cy + bh) / im_height;
out[i * 4] = is_clip ? clip<T>(xmin) : xmin;
out[i * 4 + 1] = is_clip ? clip<T>(ymin) : ymin;
out[i * 4 + 2] = is_clip ? clip<T>(xmax) : xmax;
out[i * 4 + 3] = is_clip ? clip<T>(ymax) : ymax;
}
}
template <typename T>
__global__ void SetVariance(T* out, const T* var, const int vnum,
const int num) {
CUDA_KERNEL_LOOP(i, num) { out[i] = var[i % vnum]; }
}
template <typename T>
class PriorBoxOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<paddle::framework::Tensor>("Input");
auto* image = ctx.Input<paddle::framework::Tensor>("Image");
auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes");
auto* vars = ctx.Output<paddle::framework::Tensor>("Variances");
auto min_sizes = ctx.Attr<std::vector<float>>("min_sizes");
auto max_sizes = ctx.Attr<std::vector<float>>("max_sizes");
auto input_aspect_ratio = ctx.Attr<std::vector<float>>("aspect_ratios");
auto variances = ctx.Attr<std::vector<float>>("variances");
auto flip = ctx.Attr<bool>("flip");
auto clip = ctx.Attr<bool>("clip");
auto min_max_aspect_ratios_order =
ctx.Attr<bool>("min_max_aspect_ratios_order");
std::vector<float> aspect_ratios;
ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios);
T step_w = static_cast<T>(ctx.Attr<float>("step_w"));
T step_h = static_cast<T>(ctx.Attr<float>("step_h"));
T offset = static_cast<T>(ctx.Attr<float>("offset"));
auto im_width = image->dims()[3];
auto im_height = image->dims()[2];
auto width = input->dims()[3];
auto height = input->dims()[2];
T step_width, step_height;
if (step_w == 0 || step_h == 0) {
step_width = static_cast<T>(im_width) / width;
step_height = static_cast<T>(im_height) / height;
} else {
step_width = step_w;
step_height = step_h;
}
int num_priors = aspect_ratios.size() * min_sizes.size();
if (max_sizes.size() > 0) {
num_priors += max_sizes.size();
}
int min_num = static_cast<int>(min_sizes.size());
int box_num = width * height * num_priors;
int block = 512;
int grid = (box_num + block - 1) / block;
auto stream =
ctx.template device_context<platform::CUDADeviceContext>().stream();
boxes->mutable_data<T>(ctx.GetPlace());
vars->mutable_data<T>(ctx.GetPlace());
framework::Tensor r;
framework::TensorFromVector(aspect_ratios, ctx.device_context(), &r);
framework::Tensor min;
framework::TensorFromVector(min_sizes, ctx.device_context(), &min);
T* max_data = nullptr;
framework::Tensor max;
if (max_sizes.size() > 0) {
framework::TensorFromVector(max_sizes, ctx.device_context(), &max);
max_data = max.data<T>();
}
GenPriorBox<T><<<grid, block, 0, stream>>>(
boxes->data<T>(), r.data<T>(), height, width, im_height, im_width,
aspect_ratios.size(), offset, step_width, step_height, min.data<T>(),
max_data, min_num, clip, min_max_aspect_ratios_order);
framework::Tensor v;
framework::TensorFromVector(variances, ctx.device_context(), &v);
grid = (box_num * 4 + block - 1) / block;
SetVariance<T><<<grid, block, 0, stream>>>(vars->data<T>(), v.data<T>(),
variances.size(), box_num * 4);
}
}; // namespace operators
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(prior_box, ops::PriorBoxOpCUDAKernel<float>,
ops::PriorBoxOpCUDAKernel<double>);
|
5fe5331881c6d9536272e8b23060c89aafb41b9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// RUN: %clang_cc1 -std=c++14 -triple x86_64-unknown-linux-gnu -fsyntax-only -verify %s
// RUN: %clang_cc1 -std=c++14 -triple nvptx64-nvidia-cuda -fsyntax-only -fcuda-is-device -verify %s
#include "Inputs/cuda.h"
// Opaque return types used to check that we pick the right overloads.
struct HostReturnTy {};
struct HostReturnTy2 {};
struct DeviceReturnTy {};
struct DeviceReturnTy2 {};
struct HostDeviceReturnTy {};
struct TemplateReturnTy {};
struct CorrectOverloadRetTy{};
#if __CUDA_ARCH__
// expected-note@-2 {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'IncorrectOverloadRetTy' to 'const CorrectOverloadRetTy &' for 1st argument}}
// expected-note@-3 {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'IncorrectOverloadRetTy' to 'CorrectOverloadRetTy &&' for 1st argument}}
#endif
struct IncorrectOverloadRetTy{};
typedef HostReturnTy (*HostFnPtr)();
typedef DeviceReturnTy (*DeviceFnPtr)();
typedef HostDeviceReturnTy (*HostDeviceFnPtr)();
typedef void (*GlobalFnPtr)(); // __global__ functions must return void.
// CurrentReturnTy is {HostReturnTy,DeviceReturnTy} during {host,device}
// compilation.
#ifdef __CUDA_ARCH__
typedef DeviceReturnTy CurrentReturnTy;
#else
typedef HostReturnTy CurrentReturnTy;
#endif
// CurrentFnPtr is a function pointer to a {host,device} function during
// {host,device} compilation.
typedef CurrentReturnTy (*CurrentFnPtr)();
// Host and unattributed functions can't be overloaded.
__host__ void hh() {} // expected-note {{previous definition is here}}
void hh() {} // expected-error {{redefinition of 'hh'}}
// H/D overloading is OK.
__host__ HostReturnTy dh() { return HostReturnTy(); }
__device__ DeviceReturnTy dh() { return DeviceReturnTy(); }
// H/HD and D/HD are not allowed.
__host__ __device__ int hdh() { return 0; } // expected-note {{previous declaration is here}}
__host__ int hdh() { return 0; }
// expected-error@-1 {{__host__ function 'hdh' cannot overload __host__ __device__ function 'hdh'}}
__host__ int hhd() { return 0; } // expected-note {{previous declaration is here}}
__host__ __device__ int hhd() { return 0; }
// expected-error@-1 {{__host__ __device__ function 'hhd' cannot overload __host__ function 'hhd'}}
__host__ __device__ int hdd() { return 0; } // expected-note {{previous declaration is here}}
__device__ int hdd() { return 0; }
// expected-error@-1 {{__device__ function 'hdd' cannot overload __host__ __device__ function 'hdd'}}
__device__ int dhd() { return 0; } // expected-note {{previous declaration is here}}
__host__ __device__ int dhd() { return 0; }
// expected-error@-1 {{__host__ __device__ function 'dhd' cannot overload __device__ function 'dhd'}}
// Same tests for extern "C" functions.
extern "C" __host__ int chh() { return 0; } // expected-note {{previous definition is here}}
extern "C" int chh() { return 0; } // expected-error {{redefinition of 'chh'}}
// H/D overloading is OK.
extern "C" __device__ DeviceReturnTy cdh() { return DeviceReturnTy(); }
extern "C" __host__ HostReturnTy cdh() { return HostReturnTy(); }
// H/HD and D/HD overloading is not allowed.
extern "C" __host__ __device__ int chhd1() { return 0; } // expected-note {{previous declaration is here}}
extern "C" __host__ int chhd1() { return 0; }
// expected-error@-1 {{__host__ function 'chhd1' cannot overload __host__ __device__ function 'chhd1'}}
extern "C" __host__ int chhd2() { return 0; } // expected-note {{previous declaration is here}}
extern "C" __host__ __device__ int chhd2() { return 0; }
// expected-error@-1 {{__host__ __device__ function 'chhd2' cannot overload __host__ function 'chhd2'}}
// Helper functions to verify calling restrictions.
__device__ DeviceReturnTy d() { return DeviceReturnTy(); }
// expected-note@-1 1+ {{'d' declared here}}
// expected-note@-2 1+ {{candidate function not viable: call to __device__ function from __host__ function}}
// expected-note@-3 0+ {{candidate function not viable: call to __device__ function from __host__ __device__ function}}
__host__ HostReturnTy h() { return HostReturnTy(); }
// expected-note@-1 1+ {{'h' declared here}}
// expected-note@-2 1+ {{candidate function not viable: call to __host__ function from __device__ function}}
// expected-note@-3 0+ {{candidate function not viable: call to __host__ function from __host__ __device__ function}}
// expected-note@-4 1+ {{candidate function not viable: call to __host__ function from __global__ function}}
__global__ void g() {}
// expected-note@-1 1+ {{'g' declared here}}
// expected-note@-2 1+ {{candidate function not viable: call to __global__ function from __device__ function}}
// expected-note@-3 0+ {{candidate function not viable: call to __global__ function from __host__ __device__ function}}
// expected-note@-4 1+ {{candidate function not viable: call to __global__ function from __global__ function}}
extern "C" __device__ DeviceReturnTy cd() { return DeviceReturnTy(); }
// expected-note@-1 1+ {{'cd' declared here}}
// expected-note@-2 1+ {{candidate function not viable: call to __device__ function from __host__ function}}
// expected-note@-3 0+ {{candidate function not viable: call to __device__ function from __host__ __device__ function}}
extern "C" __host__ HostReturnTy ch() { return HostReturnTy(); }
// expected-note@-1 1+ {{'ch' declared here}}
// expected-note@-2 1+ {{candidate function not viable: call to __host__ function from __device__ function}}
// expected-note@-3 0+ {{candidate function not viable: call to __host__ function from __host__ __device__ function}}
// expected-note@-4 1+ {{candidate function not viable: call to __host__ function from __global__ function}}
__host__ void hostf() {
DeviceFnPtr fp_d = d; // expected-error {{reference to __device__ function 'd' in __host__ function}}
DeviceReturnTy ret_d = d(); // expected-error {{no matching function for call to 'd'}}
DeviceFnPtr fp_cd = cd; // expected-error {{reference to __device__ function 'cd' in __host__ function}}
DeviceReturnTy ret_cd = cd(); // expected-error {{no matching function for call to 'cd'}}
HostFnPtr fp_h = h;
HostReturnTy ret_h = h();
HostFnPtr fp_ch = ch;
HostReturnTy ret_ch = ch();
HostFnPtr fp_dh = dh;
HostReturnTy ret_dh = dh();
HostFnPtr fp_cdh = cdh;
HostReturnTy ret_cdh = cdh();
GlobalFnPtr fp_g = g;
g(); // expected-error {{call to global function 'g' not configured}}
hipLaunchKernelGGL(( g), dim3(0), dim3(0), 0, 0, );
}
__device__ void devicef() {
DeviceFnPtr fp_d = d;
DeviceReturnTy ret_d = d();
DeviceFnPtr fp_cd = cd;
DeviceReturnTy ret_cd = cd();
HostFnPtr fp_h = h; // expected-error {{reference to __host__ function 'h' in __device__ function}}
HostReturnTy ret_h = h(); // expected-error {{no matching function for call to 'h'}}
HostFnPtr fp_ch = ch; // expected-error {{reference to __host__ function 'ch' in __device__ function}}
HostReturnTy ret_ch = ch(); // expected-error {{no matching function for call to 'ch'}}
DeviceFnPtr fp_dh = dh;
DeviceReturnTy ret_dh = dh();
DeviceFnPtr fp_cdh = cdh;
DeviceReturnTy ret_cdh = cdh();
GlobalFnPtr fp_g = g; // expected-error {{reference to __global__ function 'g' in __device__ function}}
g(); // expected-error {{no matching function for call to 'g'}}
hipLaunchKernelGGL(( g), dim3(0),dim3(0), 0, 0, ); // expected-error {{reference to __global__ function 'g' in __device__ function}}
}
__global__ void globalf() {
DeviceFnPtr fp_d = d;
DeviceReturnTy ret_d = d();
DeviceFnPtr fp_cd = cd;
DeviceReturnTy ret_cd = cd();
HostFnPtr fp_h = h; // expected-error {{reference to __host__ function 'h' in __global__ function}}
HostReturnTy ret_h = h(); // expected-error {{no matching function for call to 'h'}}
HostFnPtr fp_ch = ch; // expected-error {{reference to __host__ function 'ch' in __global__ function}}
HostReturnTy ret_ch = ch(); // expected-error {{no matching function for call to 'ch'}}
DeviceFnPtr fp_dh = dh;
DeviceReturnTy ret_dh = dh();
DeviceFnPtr fp_cdh = cdh;
DeviceReturnTy ret_cdh = cdh();
GlobalFnPtr fp_g = g; // expected-error {{reference to __global__ function 'g' in __global__ function}}
g(); // expected-error {{no matching function for call to 'g'}}
hipLaunchKernelGGL(( g), dim3(0),dim3(0), 0, 0, ); // expected-error {{reference to __global__ function 'g' in __global__ function}}
}
__host__ __device__ void hostdevicef() {
DeviceFnPtr fp_d = d;
DeviceReturnTy ret_d = d();
DeviceFnPtr fp_cd = cd;
DeviceReturnTy ret_cd = cd();
#if !defined(__CUDA_ARCH__)
// expected-error@-5 {{reference to __device__ function 'd' in __host__ __device__ function}}
// expected-error@-5 {{reference to __device__ function 'd' in __host__ __device__ function}}
// expected-error@-5 {{reference to __device__ function 'cd' in __host__ __device__ function}}
// expected-error@-5 {{reference to __device__ function 'cd' in __host__ __device__ function}}
#endif
HostFnPtr fp_h = h;
HostReturnTy ret_h = h();
HostFnPtr fp_ch = ch;
HostReturnTy ret_ch = ch();
#if defined(__CUDA_ARCH__)
// expected-error@-5 {{reference to __host__ function 'h' in __host__ __device__ function}}
// expected-error@-5 {{reference to __host__ function 'h' in __host__ __device__ function}}
// expected-error@-5 {{reference to __host__ function 'ch' in __host__ __device__ function}}
// expected-error@-5 {{reference to __host__ function 'ch' in __host__ __device__ function}}
#endif
CurrentFnPtr fp_dh = dh;
CurrentReturnTy ret_dh = dh();
CurrentFnPtr fp_cdh = cdh;
CurrentReturnTy ret_cdh = cdh();
GlobalFnPtr fp_g = g;
#if defined(__CUDA_ARCH__)
// expected-error@-2 {{reference to __global__ function 'g' in __host__ __device__ function}}
#endif
g();
#if defined (__CUDA_ARCH__)
// expected-error@-2 {{reference to __global__ function 'g' in __host__ __device__ function}}
#else
// expected-error@-4 {{call to global function 'g' not configured}}
#endif
hipLaunchKernelGGL(( g), dim3(0),dim3(0), 0, 0, );
#if defined(__CUDA_ARCH__)
// expected-error@-2 {{reference to __global__ function 'g' in __host__ __device__ function}}
#endif
}
// Test for address of overloaded function resolution in the global context.
HostFnPtr fp_h = h;
HostFnPtr fp_ch = ch;
CurrentFnPtr fp_dh = dh;
CurrentFnPtr fp_cdh = cdh;
GlobalFnPtr fp_g = g;
// Test overloading of destructors
// Can't mix H and unattributed destructors
struct d_h {
~d_h() {} // expected-note {{previous definition is here}}
__host__ ~d_h() {} // expected-error {{destructor cannot be redeclared}}
};
// HD is OK
struct d_hd {
__host__ __device__ ~d_hd() {}
};
// Test overloading of member functions
struct m_h {
void operator delete(void *ptr); // expected-note {{previous declaration is here}}
__host__ void operator delete(void *ptr); // expected-error {{class member cannot be redeclared}}
};
// D/H overloading is OK
struct m_dh {
__device__ void operator delete(void *ptr);
__host__ void operator delete(void *ptr);
};
// HD by itself is OK
struct m_hd {
__device__ __host__ void operator delete(void *ptr);
};
struct m_hhd {
__host__ void operator delete(void *ptr) {} // expected-note {{previous declaration is here}}
__host__ __device__ void operator delete(void *ptr) {}
// expected-error@-1 {{__host__ __device__ function 'operator delete' cannot overload __host__ function 'operator delete'}}
};
struct m_hdh {
__host__ __device__ void operator delete(void *ptr) {} // expected-note {{previous declaration is here}}
__host__ void operator delete(void *ptr) {}
// expected-error@-1 {{__host__ function 'operator delete' cannot overload __host__ __device__ function 'operator delete'}}
};
struct m_dhd {
__device__ void operator delete(void *ptr) {} // expected-note {{previous declaration is here}}
__host__ __device__ void operator delete(void *ptr) {}
// expected-error@-1 {{__host__ __device__ function 'operator delete' cannot overload __device__ function 'operator delete'}}
};
struct m_hdd {
__host__ __device__ void operator delete(void *ptr) {} // expected-note {{previous declaration is here}}
__device__ void operator delete(void *ptr) {}
// expected-error@-1 {{__device__ function 'operator delete' cannot overload __host__ __device__ function 'operator delete'}}
};
// __global__ functions can't be overloaded based on attribute
// difference.
struct G {
friend void friend_of_g(G &arg); // expected-note {{previous declaration is here}}
private:
int x; // expected-note {{declared private here}}
};
__global__ void friend_of_g(G &arg) { int x = arg.x; }
// expected-error@-1 {{__global__ function 'friend_of_g' cannot overload __host__ function 'friend_of_g'}}
// expected-error@-2 {{'x' is a private member of 'G'}}
void friend_of_g(G &arg) { int x = arg.x; }
// HD functions are sometimes allowed to call H or D functions -- this
// is an artifact of the source-to-source splitting performed by nvcc
// that we need to mimic. During device mode compilation in nvcc, host
// functions aren't present at all, so don't participate in
// overloading. But in clang, H and D functions are present in both
// compilation modes. Clang normally uses the target attribute as a
// tiebreaker between overloads with otherwise identical priority, but
// in order to match nvcc's behavior, we sometimes need to wholly
// discard overloads that would not be present during compilation
// under nvcc.
template <typename T> TemplateReturnTy template_vs_function(T arg) {
return TemplateReturnTy();
}
__device__ DeviceReturnTy template_vs_function(float arg) {
return DeviceReturnTy();
}
// Here we expect to call the templated function during host compilation, even
// if -fcuda-disable-target-call-checks is passed, and even though C++ overload
// rules prefer the non-templated function.
__host__ __device__ void test_host_device_calls_template(void) {
#ifdef __CUDA_ARCH__
typedef DeviceReturnTy ExpectedReturnTy;
#else
typedef TemplateReturnTy ExpectedReturnTy;
#endif
ExpectedReturnTy ret1 = template_vs_function(1.0f);
ExpectedReturnTy ret2 = template_vs_function(2.0);
}
// Calls from __host__ and __device__ functions should always call the
// overloaded function that matches their mode.
__host__ void test_host_calls_template_fn() {
TemplateReturnTy ret1 = template_vs_function(1.0f);
TemplateReturnTy ret2 = template_vs_function(2.0);
}
__device__ void test_device_calls_template_fn() {
DeviceReturnTy ret1 = template_vs_function(1.0f);
DeviceReturnTy ret2 = template_vs_function(2.0);
}
// If we have a mix of HD and H-only or D-only candidates in the overload set,
// normal C++ overload resolution rules apply first.
template <typename T> TemplateReturnTy template_vs_hd_function(T arg)
{
return TemplateReturnTy();
}
__host__ __device__ HostDeviceReturnTy template_vs_hd_function(float arg) {
return HostDeviceReturnTy();
}
__host__ __device__ void test_host_device_calls_hd_template() {
#ifdef __CUDA_ARCH__
typedef HostDeviceReturnTy ExpectedReturnTy;
#else
typedef TemplateReturnTy ExpectedReturnTy;
#endif
HostDeviceReturnTy ret1 = template_vs_hd_function(1.0f);
ExpectedReturnTy ret2 = template_vs_hd_function(1);
}
__host__ void test_host_calls_hd_template() {
HostDeviceReturnTy ret1 = template_vs_hd_function(1.0f);
TemplateReturnTy ret2 = template_vs_hd_function(1);
}
__device__ void test_device_calls_hd_template() {
HostDeviceReturnTy ret1 = template_vs_hd_function(1.0f);
// Host-only function template is not callable with strict call checks,
// so for device side HD function will be the only choice.
HostDeviceReturnTy ret2 = template_vs_hd_function(1);
}
// Check that overloads still work the same way on both host and
// device side when the overload set contains only functions from one
// side of compilation.
__device__ DeviceReturnTy device_only_function(int arg) { return DeviceReturnTy(); }
__device__ DeviceReturnTy2 device_only_function(float arg) { return DeviceReturnTy2(); }
#ifndef __CUDA_ARCH__
// expected-note@-3 2{{'device_only_function' declared here}}
// expected-note@-3 2{{'device_only_function' declared here}}
#endif
__host__ HostReturnTy host_only_function(int arg) { return HostReturnTy(); }
__host__ HostReturnTy2 host_only_function(float arg) { return HostReturnTy2(); }
#ifdef __CUDA_ARCH__
// expected-note@-3 2{{'host_only_function' declared here}}
// expected-note@-3 2{{'host_only_function' declared here}}
#endif
__host__ __device__ void test_host_device_single_side_overloading() {
DeviceReturnTy ret1 = device_only_function(1);
DeviceReturnTy2 ret2 = device_only_function(1.0f);
#ifndef __CUDA_ARCH__
// expected-error@-3 {{reference to __device__ function 'device_only_function' in __host__ __device__ function}}
// expected-error@-3 {{reference to __device__ function 'device_only_function' in __host__ __device__ function}}
#endif
HostReturnTy ret3 = host_only_function(1);
HostReturnTy2 ret4 = host_only_function(1.0f);
#ifdef __CUDA_ARCH__
// expected-error@-3 {{reference to __host__ function 'host_only_function' in __host__ __device__ function}}
// expected-error@-3 {{reference to __host__ function 'host_only_function' in __host__ __device__ function}}
#endif
}
// wrong-sided overloading should not cause diagnostic unless it is emitted.
// This inline function is not emitted.
inline __host__ __device__ void test_host_device_wrong_side_overloading_inline_no_diag() {
DeviceReturnTy ret1 = device_only_function(1);
DeviceReturnTy2 ret2 = device_only_function(1.0f);
HostReturnTy ret3 = host_only_function(1);
HostReturnTy2 ret4 = host_only_function(1.0f);
}
// wrong-sided overloading should cause diagnostic if it is emitted.
// This inline function is emitted since it is called by an emitted function.
inline __host__ __device__ void test_host_device_wrong_side_overloading_inline_diag() {
DeviceReturnTy ret1 = device_only_function(1);
DeviceReturnTy2 ret2 = device_only_function(1.0f);
#ifndef __CUDA_ARCH__
// expected-error@-3 {{reference to __device__ function 'device_only_function' in __host__ __device__ function}}
// expected-error@-3 {{reference to __device__ function 'device_only_function' in __host__ __device__ function}}
#endif
HostReturnTy ret3 = host_only_function(1);
HostReturnTy2 ret4 = host_only_function(1.0f);
#ifdef __CUDA_ARCH__
// expected-error@-3 {{reference to __host__ function 'host_only_function' in __host__ __device__ function}}
// expected-error@-3 {{reference to __host__ function 'host_only_function' in __host__ __device__ function}}
#endif
}
__host__ __device__ void test_host_device_wrong_side_overloading_inline_diag_caller() {
test_host_device_wrong_side_overloading_inline_diag();
// expected-note@-1 {{called by 'test_host_device_wrong_side_overloading_inline_diag_caller'}}
}
// Verify that we allow overloading function templates.
template <typename T> __host__ T template_overload(const T &a) { return a; };
template <typename T> __device__ T template_overload(const T &a) { return a; };
__host__ void test_host_template_overload() {
template_overload(1); // OK. Attribute-based overloading picks __host__ variant.
}
__device__ void test_device_template_overload() {
template_overload(1); // OK. Attribute-based overloading picks __device__ variant.
}
// Two classes with `operator-` defined. One of them is device only.
struct C1;
struct C2;
__device__
int operator-(const C1 &x, const C1 &y);
int operator-(const C2 &x, const C2 &y);
template <typename T>
__host__ __device__ int constexpr_overload(const T &x, const T &y) {
return x - y;
}
// Verify that function overloading doesn't prune candidate wrongly.
int test_constexpr_overload(C2 &x, C2 &y) {
return constexpr_overload(x, y);
}
// Verify no ambiguity for new operator.
void *a = new int;
__device__ void *b = new int;
// expected-error@-1{{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
// Verify no ambiguity for new operator.
template<typename _Tp> _Tp&& f();
template<typename _Tp, typename = decltype(new _Tp(f<_Tp>()))>
void __test();
void foo() {
__test<int>();
}
// Test resolving implicit host device candidate vs wrong-sided candidate.
// In device compilation, implicit host device caller choose implicit host
// device candidate and wrong-sided candidate with equal preference.
// Resolution result should not change with/without pragma.
namespace ImplicitHostDeviceVsWrongSided {
CorrectOverloadRetTy callee(double x);
#pragma clang force_cuda_host_device begin
IncorrectOverloadRetTy callee(int x);
inline CorrectOverloadRetTy implicit_hd_caller() {
return callee(1.0);
}
#pragma clang force_cuda_host_device end
}
// Test resolving implicit host device candidate vs same-sided candidate.
// In host compilation, implicit host device caller choose implicit host
// device candidate and same-sided candidate with equal preference.
// Resolution result should not change with/without pragma.
namespace ImplicitHostDeviceVsSameSide {
IncorrectOverloadRetTy callee(int x);
#pragma clang force_cuda_host_device begin
CorrectOverloadRetTy callee(double x);
inline CorrectOverloadRetTy implicit_hd_caller() {
return callee(1.0);
}
#pragma clang force_cuda_host_device end
}
// Test resolving explicit host device candidate vs. wrong-sided candidate.
// Explicit host device caller favors host device candidate against wrong-sided
// candidate.
namespace ExplicitHostDeviceVsWrongSided {
CorrectOverloadRetTy callee(double x);
__host__ __device__ IncorrectOverloadRetTy callee(int x);
inline __host__ __device__ CorrectOverloadRetTy explicit_hd_caller() {
return callee(1.0);
#if __CUDA_ARCH__
// expected-error@-2 {{no viable conversion from returned value of type 'IncorrectOverloadRetTy' to function return type 'CorrectOverloadRetTy'}}
#endif
}
}
// In the implicit host device function 'caller', the second 'callee' should be
// chosen since it has better match, even though it is an implicit host device
// function whereas the first 'callee' is a host function. A diagnostic will be
// emitted if the first 'callee' is chosen since deduced return type cannot be
// used before it is defined.
namespace ImplicitHostDeviceByConstExpr {
template <class a> a b;
auto callee(...);
template <class d> constexpr auto callee(d) -> decltype(0);
struct e {
template <class ad, class... f> static auto g(ad, f...) {
return h<e, decltype(b<f>)...>;
}
struct i {
template <class, class... f> static constexpr auto caller(f... k) {
return callee(k...);
}
};
template <class, class... f> static auto h() {
return i::caller<int, f...>;
}
};
class l {
l() {
e::g([] {}, this);
}
};
}
| 5fe5331881c6d9536272e8b23060c89aafb41b9a.cu | // REQUIRES: x86-registered-target
// REQUIRES: nvptx-registered-target
// RUN: %clang_cc1 -std=c++14 -triple x86_64-unknown-linux-gnu -fsyntax-only -verify %s
// RUN: %clang_cc1 -std=c++14 -triple nvptx64-nvidia-cuda -fsyntax-only -fcuda-is-device -verify %s
#include "Inputs/cuda.h"
// Opaque return types used to check that we pick the right overloads.
struct HostReturnTy {};
struct HostReturnTy2 {};
struct DeviceReturnTy {};
struct DeviceReturnTy2 {};
struct HostDeviceReturnTy {};
struct TemplateReturnTy {};
struct CorrectOverloadRetTy{};
#if __CUDA_ARCH__
// expected-note@-2 {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'IncorrectOverloadRetTy' to 'const CorrectOverloadRetTy &' for 1st argument}}
// expected-note@-3 {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'IncorrectOverloadRetTy' to 'CorrectOverloadRetTy &&' for 1st argument}}
#endif
struct IncorrectOverloadRetTy{};
typedef HostReturnTy (*HostFnPtr)();
typedef DeviceReturnTy (*DeviceFnPtr)();
typedef HostDeviceReturnTy (*HostDeviceFnPtr)();
typedef void (*GlobalFnPtr)(); // __global__ functions must return void.
// CurrentReturnTy is {HostReturnTy,DeviceReturnTy} during {host,device}
// compilation.
#ifdef __CUDA_ARCH__
typedef DeviceReturnTy CurrentReturnTy;
#else
typedef HostReturnTy CurrentReturnTy;
#endif
// CurrentFnPtr is a function pointer to a {host,device} function during
// {host,device} compilation.
typedef CurrentReturnTy (*CurrentFnPtr)();
// Host and unattributed functions can't be overloaded.
__host__ void hh() {} // expected-note {{previous definition is here}}
void hh() {} // expected-error {{redefinition of 'hh'}}
// H/D overloading is OK.
__host__ HostReturnTy dh() { return HostReturnTy(); }
__device__ DeviceReturnTy dh() { return DeviceReturnTy(); }
// H/HD and D/HD are not allowed.
__host__ __device__ int hdh() { return 0; } // expected-note {{previous declaration is here}}
__host__ int hdh() { return 0; }
// expected-error@-1 {{__host__ function 'hdh' cannot overload __host__ __device__ function 'hdh'}}
__host__ int hhd() { return 0; } // expected-note {{previous declaration is here}}
__host__ __device__ int hhd() { return 0; }
// expected-error@-1 {{__host__ __device__ function 'hhd' cannot overload __host__ function 'hhd'}}
__host__ __device__ int hdd() { return 0; } // expected-note {{previous declaration is here}}
__device__ int hdd() { return 0; }
// expected-error@-1 {{__device__ function 'hdd' cannot overload __host__ __device__ function 'hdd'}}
__device__ int dhd() { return 0; } // expected-note {{previous declaration is here}}
__host__ __device__ int dhd() { return 0; }
// expected-error@-1 {{__host__ __device__ function 'dhd' cannot overload __device__ function 'dhd'}}
// Same tests for extern "C" functions.
extern "C" __host__ int chh() { return 0; } // expected-note {{previous definition is here}}
extern "C" int chh() { return 0; } // expected-error {{redefinition of 'chh'}}
// H/D overloading is OK.
extern "C" __device__ DeviceReturnTy cdh() { return DeviceReturnTy(); }
extern "C" __host__ HostReturnTy cdh() { return HostReturnTy(); }
// H/HD and D/HD overloading is not allowed.
extern "C" __host__ __device__ int chhd1() { return 0; } // expected-note {{previous declaration is here}}
extern "C" __host__ int chhd1() { return 0; }
// expected-error@-1 {{__host__ function 'chhd1' cannot overload __host__ __device__ function 'chhd1'}}
extern "C" __host__ int chhd2() { return 0; } // expected-note {{previous declaration is here}}
extern "C" __host__ __device__ int chhd2() { return 0; }
// expected-error@-1 {{__host__ __device__ function 'chhd2' cannot overload __host__ function 'chhd2'}}
// Helper functions to verify calling restrictions.
__device__ DeviceReturnTy d() { return DeviceReturnTy(); }
// expected-note@-1 1+ {{'d' declared here}}
// expected-note@-2 1+ {{candidate function not viable: call to __device__ function from __host__ function}}
// expected-note@-3 0+ {{candidate function not viable: call to __device__ function from __host__ __device__ function}}
__host__ HostReturnTy h() { return HostReturnTy(); }
// expected-note@-1 1+ {{'h' declared here}}
// expected-note@-2 1+ {{candidate function not viable: call to __host__ function from __device__ function}}
// expected-note@-3 0+ {{candidate function not viable: call to __host__ function from __host__ __device__ function}}
// expected-note@-4 1+ {{candidate function not viable: call to __host__ function from __global__ function}}
__global__ void g() {}
// expected-note@-1 1+ {{'g' declared here}}
// expected-note@-2 1+ {{candidate function not viable: call to __global__ function from __device__ function}}
// expected-note@-3 0+ {{candidate function not viable: call to __global__ function from __host__ __device__ function}}
// expected-note@-4 1+ {{candidate function not viable: call to __global__ function from __global__ function}}
extern "C" __device__ DeviceReturnTy cd() { return DeviceReturnTy(); }
// expected-note@-1 1+ {{'cd' declared here}}
// expected-note@-2 1+ {{candidate function not viable: call to __device__ function from __host__ function}}
// expected-note@-3 0+ {{candidate function not viable: call to __device__ function from __host__ __device__ function}}
extern "C" __host__ HostReturnTy ch() { return HostReturnTy(); }
// expected-note@-1 1+ {{'ch' declared here}}
// expected-note@-2 1+ {{candidate function not viable: call to __host__ function from __device__ function}}
// expected-note@-3 0+ {{candidate function not viable: call to __host__ function from __host__ __device__ function}}
// expected-note@-4 1+ {{candidate function not viable: call to __host__ function from __global__ function}}
__host__ void hostf() {
DeviceFnPtr fp_d = d; // expected-error {{reference to __device__ function 'd' in __host__ function}}
DeviceReturnTy ret_d = d(); // expected-error {{no matching function for call to 'd'}}
DeviceFnPtr fp_cd = cd; // expected-error {{reference to __device__ function 'cd' in __host__ function}}
DeviceReturnTy ret_cd = cd(); // expected-error {{no matching function for call to 'cd'}}
HostFnPtr fp_h = h;
HostReturnTy ret_h = h();
HostFnPtr fp_ch = ch;
HostReturnTy ret_ch = ch();
HostFnPtr fp_dh = dh;
HostReturnTy ret_dh = dh();
HostFnPtr fp_cdh = cdh;
HostReturnTy ret_cdh = cdh();
GlobalFnPtr fp_g = g;
g(); // expected-error {{call to global function 'g' not configured}}
g<<<0, 0>>>();
}
__device__ void devicef() {
DeviceFnPtr fp_d = d;
DeviceReturnTy ret_d = d();
DeviceFnPtr fp_cd = cd;
DeviceReturnTy ret_cd = cd();
HostFnPtr fp_h = h; // expected-error {{reference to __host__ function 'h' in __device__ function}}
HostReturnTy ret_h = h(); // expected-error {{no matching function for call to 'h'}}
HostFnPtr fp_ch = ch; // expected-error {{reference to __host__ function 'ch' in __device__ function}}
HostReturnTy ret_ch = ch(); // expected-error {{no matching function for call to 'ch'}}
DeviceFnPtr fp_dh = dh;
DeviceReturnTy ret_dh = dh();
DeviceFnPtr fp_cdh = cdh;
DeviceReturnTy ret_cdh = cdh();
GlobalFnPtr fp_g = g; // expected-error {{reference to __global__ function 'g' in __device__ function}}
g(); // expected-error {{no matching function for call to 'g'}}
g<<<0,0>>>(); // expected-error {{reference to __global__ function 'g' in __device__ function}}
}
__global__ void globalf() {
DeviceFnPtr fp_d = d;
DeviceReturnTy ret_d = d();
DeviceFnPtr fp_cd = cd;
DeviceReturnTy ret_cd = cd();
HostFnPtr fp_h = h; // expected-error {{reference to __host__ function 'h' in __global__ function}}
HostReturnTy ret_h = h(); // expected-error {{no matching function for call to 'h'}}
HostFnPtr fp_ch = ch; // expected-error {{reference to __host__ function 'ch' in __global__ function}}
HostReturnTy ret_ch = ch(); // expected-error {{no matching function for call to 'ch'}}
DeviceFnPtr fp_dh = dh;
DeviceReturnTy ret_dh = dh();
DeviceFnPtr fp_cdh = cdh;
DeviceReturnTy ret_cdh = cdh();
GlobalFnPtr fp_g = g; // expected-error {{reference to __global__ function 'g' in __global__ function}}
g(); // expected-error {{no matching function for call to 'g'}}
g<<<0,0>>>(); // expected-error {{reference to __global__ function 'g' in __global__ function}}
}
__host__ __device__ void hostdevicef() {
DeviceFnPtr fp_d = d;
DeviceReturnTy ret_d = d();
DeviceFnPtr fp_cd = cd;
DeviceReturnTy ret_cd = cd();
#if !defined(__CUDA_ARCH__)
// expected-error@-5 {{reference to __device__ function 'd' in __host__ __device__ function}}
// expected-error@-5 {{reference to __device__ function 'd' in __host__ __device__ function}}
// expected-error@-5 {{reference to __device__ function 'cd' in __host__ __device__ function}}
// expected-error@-5 {{reference to __device__ function 'cd' in __host__ __device__ function}}
#endif
HostFnPtr fp_h = h;
HostReturnTy ret_h = h();
HostFnPtr fp_ch = ch;
HostReturnTy ret_ch = ch();
#if defined(__CUDA_ARCH__)
// expected-error@-5 {{reference to __host__ function 'h' in __host__ __device__ function}}
// expected-error@-5 {{reference to __host__ function 'h' in __host__ __device__ function}}
// expected-error@-5 {{reference to __host__ function 'ch' in __host__ __device__ function}}
// expected-error@-5 {{reference to __host__ function 'ch' in __host__ __device__ function}}
#endif
CurrentFnPtr fp_dh = dh;
CurrentReturnTy ret_dh = dh();
CurrentFnPtr fp_cdh = cdh;
CurrentReturnTy ret_cdh = cdh();
GlobalFnPtr fp_g = g;
#if defined(__CUDA_ARCH__)
// expected-error@-2 {{reference to __global__ function 'g' in __host__ __device__ function}}
#endif
g();
#if defined (__CUDA_ARCH__)
// expected-error@-2 {{reference to __global__ function 'g' in __host__ __device__ function}}
#else
// expected-error@-4 {{call to global function 'g' not configured}}
#endif
g<<<0,0>>>();
#if defined(__CUDA_ARCH__)
// expected-error@-2 {{reference to __global__ function 'g' in __host__ __device__ function}}
#endif
}
// Test for address of overloaded function resolution in the global context.
HostFnPtr fp_h = h;
HostFnPtr fp_ch = ch;
CurrentFnPtr fp_dh = dh;
CurrentFnPtr fp_cdh = cdh;
GlobalFnPtr fp_g = g;
// Test overloading of destructors
// Can't mix H and unattributed destructors
struct d_h {
~d_h() {} // expected-note {{previous definition is here}}
__host__ ~d_h() {} // expected-error {{destructor cannot be redeclared}}
};
// HD is OK
struct d_hd {
__host__ __device__ ~d_hd() {}
};
// Test overloading of member functions
struct m_h {
void operator delete(void *ptr); // expected-note {{previous declaration is here}}
__host__ void operator delete(void *ptr); // expected-error {{class member cannot be redeclared}}
};
// D/H overloading is OK
struct m_dh {
__device__ void operator delete(void *ptr);
__host__ void operator delete(void *ptr);
};
// HD by itself is OK
struct m_hd {
__device__ __host__ void operator delete(void *ptr);
};
struct m_hhd {
__host__ void operator delete(void *ptr) {} // expected-note {{previous declaration is here}}
__host__ __device__ void operator delete(void *ptr) {}
// expected-error@-1 {{__host__ __device__ function 'operator delete' cannot overload __host__ function 'operator delete'}}
};
struct m_hdh {
__host__ __device__ void operator delete(void *ptr) {} // expected-note {{previous declaration is here}}
__host__ void operator delete(void *ptr) {}
// expected-error@-1 {{__host__ function 'operator delete' cannot overload __host__ __device__ function 'operator delete'}}
};
struct m_dhd {
__device__ void operator delete(void *ptr) {} // expected-note {{previous declaration is here}}
__host__ __device__ void operator delete(void *ptr) {}
// expected-error@-1 {{__host__ __device__ function 'operator delete' cannot overload __device__ function 'operator delete'}}
};
struct m_hdd {
__host__ __device__ void operator delete(void *ptr) {} // expected-note {{previous declaration is here}}
__device__ void operator delete(void *ptr) {}
// expected-error@-1 {{__device__ function 'operator delete' cannot overload __host__ __device__ function 'operator delete'}}
};
// __global__ functions can't be overloaded based on attribute
// difference.
struct G {
friend void friend_of_g(G &arg); // expected-note {{previous declaration is here}}
private:
int x; // expected-note {{declared private here}}
};
__global__ void friend_of_g(G &arg) { int x = arg.x; }
// expected-error@-1 {{__global__ function 'friend_of_g' cannot overload __host__ function 'friend_of_g'}}
// expected-error@-2 {{'x' is a private member of 'G'}}
void friend_of_g(G &arg) { int x = arg.x; }
// HD functions are sometimes allowed to call H or D functions -- this
// is an artifact of the source-to-source splitting performed by nvcc
// that we need to mimic. During device mode compilation in nvcc, host
// functions aren't present at all, so don't participate in
// overloading. But in clang, H and D functions are present in both
// compilation modes. Clang normally uses the target attribute as a
// tiebreaker between overloads with otherwise identical priority, but
// in order to match nvcc's behavior, we sometimes need to wholly
// discard overloads that would not be present during compilation
// under nvcc.
template <typename T> TemplateReturnTy template_vs_function(T arg) {
return TemplateReturnTy();
}
__device__ DeviceReturnTy template_vs_function(float arg) {
return DeviceReturnTy();
}
// Here we expect to call the templated function during host compilation, even
// if -fcuda-disable-target-call-checks is passed, and even though C++ overload
// rules prefer the non-templated function.
__host__ __device__ void test_host_device_calls_template(void) {
#ifdef __CUDA_ARCH__
typedef DeviceReturnTy ExpectedReturnTy;
#else
typedef TemplateReturnTy ExpectedReturnTy;
#endif
ExpectedReturnTy ret1 = template_vs_function(1.0f);
ExpectedReturnTy ret2 = template_vs_function(2.0);
}
// Calls from __host__ and __device__ functions should always call the
// overloaded function that matches their mode.
__host__ void test_host_calls_template_fn() {
TemplateReturnTy ret1 = template_vs_function(1.0f);
TemplateReturnTy ret2 = template_vs_function(2.0);
}
__device__ void test_device_calls_template_fn() {
DeviceReturnTy ret1 = template_vs_function(1.0f);
DeviceReturnTy ret2 = template_vs_function(2.0);
}
// If we have a mix of HD and H-only or D-only candidates in the overload set,
// normal C++ overload resolution rules apply first.
template <typename T> TemplateReturnTy template_vs_hd_function(T arg)
{
return TemplateReturnTy();
}
__host__ __device__ HostDeviceReturnTy template_vs_hd_function(float arg) {
return HostDeviceReturnTy();
}
__host__ __device__ void test_host_device_calls_hd_template() {
#ifdef __CUDA_ARCH__
typedef HostDeviceReturnTy ExpectedReturnTy;
#else
typedef TemplateReturnTy ExpectedReturnTy;
#endif
HostDeviceReturnTy ret1 = template_vs_hd_function(1.0f);
ExpectedReturnTy ret2 = template_vs_hd_function(1);
}
__host__ void test_host_calls_hd_template() {
HostDeviceReturnTy ret1 = template_vs_hd_function(1.0f);
TemplateReturnTy ret2 = template_vs_hd_function(1);
}
__device__ void test_device_calls_hd_template() {
HostDeviceReturnTy ret1 = template_vs_hd_function(1.0f);
// Host-only function template is not callable with strict call checks,
// so for device side HD function will be the only choice.
HostDeviceReturnTy ret2 = template_vs_hd_function(1);
}
// Check that overloads still work the same way on both host and
// device side when the overload set contains only functions from one
// side of compilation.
__device__ DeviceReturnTy device_only_function(int arg) { return DeviceReturnTy(); }
__device__ DeviceReturnTy2 device_only_function(float arg) { return DeviceReturnTy2(); }
#ifndef __CUDA_ARCH__
// expected-note@-3 2{{'device_only_function' declared here}}
// expected-note@-3 2{{'device_only_function' declared here}}
#endif
__host__ HostReturnTy host_only_function(int arg) { return HostReturnTy(); }
__host__ HostReturnTy2 host_only_function(float arg) { return HostReturnTy2(); }
#ifdef __CUDA_ARCH__
// expected-note@-3 2{{'host_only_function' declared here}}
// expected-note@-3 2{{'host_only_function' declared here}}
#endif
__host__ __device__ void test_host_device_single_side_overloading() {
DeviceReturnTy ret1 = device_only_function(1);
DeviceReturnTy2 ret2 = device_only_function(1.0f);
#ifndef __CUDA_ARCH__
// expected-error@-3 {{reference to __device__ function 'device_only_function' in __host__ __device__ function}}
// expected-error@-3 {{reference to __device__ function 'device_only_function' in __host__ __device__ function}}
#endif
HostReturnTy ret3 = host_only_function(1);
HostReturnTy2 ret4 = host_only_function(1.0f);
#ifdef __CUDA_ARCH__
// expected-error@-3 {{reference to __host__ function 'host_only_function' in __host__ __device__ function}}
// expected-error@-3 {{reference to __host__ function 'host_only_function' in __host__ __device__ function}}
#endif
}
// wrong-sided overloading should not cause diagnostic unless it is emitted.
// This inline function is not emitted.
inline __host__ __device__ void test_host_device_wrong_side_overloading_inline_no_diag() {
DeviceReturnTy ret1 = device_only_function(1);
DeviceReturnTy2 ret2 = device_only_function(1.0f);
HostReturnTy ret3 = host_only_function(1);
HostReturnTy2 ret4 = host_only_function(1.0f);
}
// wrong-sided overloading should cause diagnostic if it is emitted.
// This inline function is emitted since it is called by an emitted function.
inline __host__ __device__ void test_host_device_wrong_side_overloading_inline_diag() {
DeviceReturnTy ret1 = device_only_function(1);
DeviceReturnTy2 ret2 = device_only_function(1.0f);
#ifndef __CUDA_ARCH__
// expected-error@-3 {{reference to __device__ function 'device_only_function' in __host__ __device__ function}}
// expected-error@-3 {{reference to __device__ function 'device_only_function' in __host__ __device__ function}}
#endif
HostReturnTy ret3 = host_only_function(1);
HostReturnTy2 ret4 = host_only_function(1.0f);
#ifdef __CUDA_ARCH__
// expected-error@-3 {{reference to __host__ function 'host_only_function' in __host__ __device__ function}}
// expected-error@-3 {{reference to __host__ function 'host_only_function' in __host__ __device__ function}}
#endif
}
__host__ __device__ void test_host_device_wrong_side_overloading_inline_diag_caller() {
test_host_device_wrong_side_overloading_inline_diag();
// expected-note@-1 {{called by 'test_host_device_wrong_side_overloading_inline_diag_caller'}}
}
// Verify that we allow overloading function templates.
template <typename T> __host__ T template_overload(const T &a) { return a; };
template <typename T> __device__ T template_overload(const T &a) { return a; };
__host__ void test_host_template_overload() {
template_overload(1); // OK. Attribute-based overloading picks __host__ variant.
}
__device__ void test_device_template_overload() {
template_overload(1); // OK. Attribute-based overloading picks __device__ variant.
}
// Two classes with `operator-` defined. One of them is device only.
struct C1;
struct C2;
__device__
int operator-(const C1 &x, const C1 &y);
int operator-(const C2 &x, const C2 &y);
template <typename T>
__host__ __device__ int constexpr_overload(const T &x, const T &y) {
return x - y;
}
// Verify that function overloading doesn't prune candidate wrongly.
int test_constexpr_overload(C2 &x, C2 &y) {
return constexpr_overload(x, y);
}
// Verify no ambiguity for new operator.
void *a = new int;
__device__ void *b = new int;
// expected-error@-1{{dynamic initialization is not supported for __device__, __constant__, and __shared__ variables.}}
// Verify no ambiguity for new operator.
template<typename _Tp> _Tp&& f();
template<typename _Tp, typename = decltype(new _Tp(f<_Tp>()))>
void __test();
void foo() {
__test<int>();
}
// Test resolving implicit host device candidate vs wrong-sided candidate.
// In device compilation, implicit host device caller choose implicit host
// device candidate and wrong-sided candidate with equal preference.
// Resolution result should not change with/without pragma.
namespace ImplicitHostDeviceVsWrongSided {
CorrectOverloadRetTy callee(double x);
#pragma clang force_cuda_host_device begin
IncorrectOverloadRetTy callee(int x);
inline CorrectOverloadRetTy implicit_hd_caller() {
return callee(1.0);
}
#pragma clang force_cuda_host_device end
}
// Test resolving implicit host device candidate vs same-sided candidate.
// In host compilation, implicit host device caller choose implicit host
// device candidate and same-sided candidate with equal preference.
// Resolution result should not change with/without pragma.
namespace ImplicitHostDeviceVsSameSide {
IncorrectOverloadRetTy callee(int x);
#pragma clang force_cuda_host_device begin
CorrectOverloadRetTy callee(double x);
inline CorrectOverloadRetTy implicit_hd_caller() {
return callee(1.0);
}
#pragma clang force_cuda_host_device end
}
// Test resolving explicit host device candidate vs. wrong-sided candidate.
// Explicit host device caller favors host device candidate against wrong-sided
// candidate.
namespace ExplicitHostDeviceVsWrongSided {
CorrectOverloadRetTy callee(double x);
__host__ __device__ IncorrectOverloadRetTy callee(int x);
inline __host__ __device__ CorrectOverloadRetTy explicit_hd_caller() {
return callee(1.0);
#if __CUDA_ARCH__
// expected-error@-2 {{no viable conversion from returned value of type 'IncorrectOverloadRetTy' to function return type 'CorrectOverloadRetTy'}}
#endif
}
}
// In the implicit host device function 'caller', the second 'callee' should be
// chosen since it has better match, even though it is an implicit host device
// function whereas the first 'callee' is a host function. A diagnostic will be
// emitted if the first 'callee' is chosen since deduced return type cannot be
// used before it is defined.
namespace ImplicitHostDeviceByConstExpr {
template <class a> a b;
auto callee(...);
template <class d> constexpr auto callee(d) -> decltype(0);
struct e {
template <class ad, class... f> static auto g(ad, f...) {
return h<e, decltype(b<f>)...>;
}
struct i {
template <class, class... f> static constexpr auto caller(f... k) {
return callee(k...);
}
};
template <class, class... f> static auto h() {
return i::caller<int, f...>;
}
};
class l {
l() {
e::g([] {}, this);
}
};
}
|
3c1ee78793052dcff2f5307453404e30e86e15a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../include/TimeSeriesPredictor.cuh"
TimeSeriesPredictor::TimeSeriesPredictor(std::vector<float> data, int numberOfNodes, int populationSize, int windowSize) : distribution(0.0, 1.0) {
this->data = data;
std::random_device rd;
mt = std::mt19937(rd());
this->numberOfNodes = numberOfNodes;
this->populationSize = populationSize;
this->windowSize = windowSize;
this->k = 5.0;
this->currentMean = 0;
}
TimeSeriesPredictor::~TimeSeriesPredictor() {
gpuErrchk(hipFree(this->fitnessGpu));
gpuErrchk(hipFree(this->dataWeightsGpu));
gpuErrchk(hipFree(this->mapWeightsGpu));
gpuErrchk(hipFree(this->dataGpu));
gpuErrchk(hipFree(this->mapInputGpu));
delete [] this->fitnessHost;
}
auto TimeSeriesPredictor::prepareGpuMemory() -> void {
gpuErrchk(hipMalloc((void**)&this->dataGpu, this->data.size() * sizeof(float)));
gpuErrchk(hipMalloc((void**)&this->fitnessGpu, this->populationSize * sizeof(float)));
gpuErrchk(hipMalloc((void**)&this->dataWeightsGpu, this->populationSize * this->windowSize * this->numberOfNodes * sizeof(float)));
gpuErrchk(hipMalloc((void**)&this->mapWeightsGpu, this->populationSize * this->numberOfNodes * this->numberOfNodes * sizeof(float)));
gpuErrchk(hipMalloc((void**)&this->mapInputGpu, this->populationSize * this->numberOfNodes * sizeof(float)));
this->fitnessHost = new float[this->populationSize];
gpuErrchk(hipMemcpy(this->dataGpu, this->data.data(), this->data.size() * sizeof(float), hipMemcpyHostToDevice));
}
auto TimeSeriesPredictor::train() -> std::vector<float> {
int generation = 0;
Chromosome bestCandidate, previousBestCandidate;
previousBestCandidate.fitness = 0.0f;
std::cout << "Generating initial population...\n";
this->generatePopulation();
this->prepareGpuMemory();
std::cout << "Training started...\n";
while(true) {
std::vector<Chromosome> nextGen;
this->launchCudaKernel();
bestCandidate = this->maxFitness(this->population);
std::cout << "-----GEN " << generation << " -------" << std::endl;
std::cout << "Best fitness: " << bestCandidate.fitness << std::endl;
std::cout << "Mean fitness: " << this->currentMean << std::endl;
if(generation == 100 || abs(1.0 - bestCandidate.fitness) < 1e-4) break;
while(nextGen.size() < this->populationSize) {
auto parents = this->tournamentSelection();
if(this->distribution(mt) < 0.5) {
auto children = this->crossover(parents[0], parents[1]);
for(auto child: children) {
nextGen.push_back(this->mutate(child));
}
} else {
nextGen.push_back(this->mutate(parents[0]));
}
}
this->population = nextGen;
previousBestCandidate = bestCandidate;
++generation;
}
// return this->maxFitness(this->population).genes;
return bestCandidate.genes;
}
auto TimeSeriesPredictor::printPopulation() -> void {
for(auto& chr : this->population) {
printf("singular fitness: %f \n", chr.fitness);
printf("Genes: \n");
for(auto& gene : chr.genes) {
printf(" %f ", gene);
}
printf("\n");
}
}
auto TimeSeriesPredictor::maxFitness(std::vector<Chromosome> population) -> Chromosome {
float maxFitness = -1.0;
Chromosome max;
for(auto chr: population) {
if(chr.fitness > maxFitness) {
maxFitness = chr.fitness;
max = chr;
}
}
return max;
}
auto TimeSeriesPredictor::crossover(Chromosome chr1, Chromosome chr2) -> std::vector<Chromosome> {
int w = this->windowSize;
int n = this->numberOfNodes;
std::vector<float> temp(chr1.genes.begin() + w * n / 2, chr1.genes.begin() + w * n + n * n / 2);
memcpy(static_cast<void *>(chr1.genes.data() + (w * n) / 2), static_cast<void *>(chr2.genes.data() + (w * n) / 2), sizeof(float)*(w * n / 2 + n * n / 2));
memcpy(static_cast<void *>(chr2.genes.data() + (w * n) / 2), static_cast<void *>(temp.data()), sizeof(float)*(w * n / 2 + n * n / 2));
return std::vector<Chromosome> {chr1, chr2};
}
auto TimeSeriesPredictor::mutate(Chromosome chr) -> Chromosome {
for(int i = 0; i < chr.genes.size(); ++i) {
if(this->distribution(mt) < 0.1) {
chr.genes[i] = distribution(mt) * 2 - 1;
}
}
return chr;
}
auto TimeSeriesPredictor::randomGenes(int size) -> Chromosome::Chromosome {
Chromosome chr;
for(auto i = 0; i < size; ++i){
chr.genes.push_back(this->distribution(mt) * 2 - 1);
}
return chr;
}
auto TimeSeriesPredictor::generatePopulation() -> void {
int n = this->numberOfNodes;
for(int i = 0; i < this->populationSize; ++i) {
this->population.push_back(randomGenes(pow(n, 2) + n * this->windowSize));
}
}
auto TimeSeriesPredictor::tournamentSelection() -> std::vector<Chromosome> {
std::vector<Chromosome> result;
for(int i = 0; i < 2; ++i) {
auto tournamentPopulation = this->randomSampleFromPopulation(5);
result.push_back(this->maxFitness(tournamentPopulation));
}
return result;
}
auto TimeSeriesPredictor::randomSampleFromPopulation(int size) -> std::vector<Chromosome> {
std::vector<Chromosome> result;
for(int i = 0; i < size; ++i) {
int r = rand() % this->populationSize;
result.push_back(this->population[r]);
}
return result;
}
auto TimeSeriesPredictor::launchCudaKernel() -> void {
int w = this->windowSize;
int n = this->numberOfNodes;
for(int i = 0; i < this->populationSize; ++i) {
float * weights = this->population[i].genes.data();
gpuErrchk(hipMemcpy(&this->dataWeightsGpu[i * w * n], weights, w * n * sizeof(float), hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(&this->mapWeightsGpu[i * n * n], &weights[w * n], n * n * sizeof(float), hipMemcpyHostToDevice));
}
gpuErrchk(hipMemset(this->mapInputGpu, 0, this->populationSize * n * sizeof(float)));
hipLaunchKernelGGL(( calculate_fitness), dim3(4), dim3(512), 0, 0,
this->dataWeightsGpu,
this->mapWeightsGpu,
this->dataGpu,
this->fitnessGpu,
this->mapInputGpu,
w,
n,
this->populationSize,
this->data.size()
);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(fitnessHost, fitnessGpu, this->populationSize * sizeof(float), hipMemcpyDeviceToHost));
auto sum = 0.0;
auto count = 0;
for(int i = 0; i < this->populationSize; ++i) {
sum += fitnessHost[i];
++count;
this->population[i].fitness = fitnessHost[i];
}
this->currentMean = sum / count;
}
| 3c1ee78793052dcff2f5307453404e30e86e15a8.cu | #include "../include/TimeSeriesPredictor.cuh"
TimeSeriesPredictor::TimeSeriesPredictor(std::vector<float> data, int numberOfNodes, int populationSize, int windowSize) : distribution(0.0, 1.0) {
this->data = data;
std::random_device rd;
mt = std::mt19937(rd());
this->numberOfNodes = numberOfNodes;
this->populationSize = populationSize;
this->windowSize = windowSize;
this->k = 5.0;
this->currentMean = 0;
}
TimeSeriesPredictor::~TimeSeriesPredictor() {
gpuErrchk(cudaFree(this->fitnessGpu));
gpuErrchk(cudaFree(this->dataWeightsGpu));
gpuErrchk(cudaFree(this->mapWeightsGpu));
gpuErrchk(cudaFree(this->dataGpu));
gpuErrchk(cudaFree(this->mapInputGpu));
delete [] this->fitnessHost;
}
auto TimeSeriesPredictor::prepareGpuMemory() -> void {
gpuErrchk(cudaMalloc((void**)&this->dataGpu, this->data.size() * sizeof(float)));
gpuErrchk(cudaMalloc((void**)&this->fitnessGpu, this->populationSize * sizeof(float)));
gpuErrchk(cudaMalloc((void**)&this->dataWeightsGpu, this->populationSize * this->windowSize * this->numberOfNodes * sizeof(float)));
gpuErrchk(cudaMalloc((void**)&this->mapWeightsGpu, this->populationSize * this->numberOfNodes * this->numberOfNodes * sizeof(float)));
gpuErrchk(cudaMalloc((void**)&this->mapInputGpu, this->populationSize * this->numberOfNodes * sizeof(float)));
this->fitnessHost = new float[this->populationSize];
gpuErrchk(cudaMemcpy(this->dataGpu, this->data.data(), this->data.size() * sizeof(float), cudaMemcpyHostToDevice));
}
auto TimeSeriesPredictor::train() -> std::vector<float> {
int generation = 0;
Chromosome bestCandidate, previousBestCandidate;
previousBestCandidate.fitness = 0.0f;
std::cout << "Generating initial population...\n";
this->generatePopulation();
this->prepareGpuMemory();
std::cout << "Training started...\n";
while(true) {
std::vector<Chromosome> nextGen;
this->launchCudaKernel();
bestCandidate = this->maxFitness(this->population);
std::cout << "-----GEN " << generation << " -------" << std::endl;
std::cout << "Best fitness: " << bestCandidate.fitness << std::endl;
std::cout << "Mean fitness: " << this->currentMean << std::endl;
if(generation == 100 || abs(1.0 - bestCandidate.fitness) < 1e-4) break;
while(nextGen.size() < this->populationSize) {
auto parents = this->tournamentSelection();
if(this->distribution(mt) < 0.5) {
auto children = this->crossover(parents[0], parents[1]);
for(auto child: children) {
nextGen.push_back(this->mutate(child));
}
} else {
nextGen.push_back(this->mutate(parents[0]));
}
}
this->population = nextGen;
previousBestCandidate = bestCandidate;
++generation;
}
// return this->maxFitness(this->population).genes;
return bestCandidate.genes;
}
auto TimeSeriesPredictor::printPopulation() -> void {
for(auto& chr : this->population) {
printf("singular fitness: %f \n", chr.fitness);
printf("Genes: \n");
for(auto& gene : chr.genes) {
printf(" %f ", gene);
}
printf("\n");
}
}
auto TimeSeriesPredictor::maxFitness(std::vector<Chromosome> population) -> Chromosome {
float maxFitness = -1.0;
Chromosome max;
for(auto chr: population) {
if(chr.fitness > maxFitness) {
maxFitness = chr.fitness;
max = chr;
}
}
return max;
}
auto TimeSeriesPredictor::crossover(Chromosome chr1, Chromosome chr2) -> std::vector<Chromosome> {
int w = this->windowSize;
int n = this->numberOfNodes;
std::vector<float> temp(chr1.genes.begin() + w * n / 2, chr1.genes.begin() + w * n + n * n / 2);
memcpy(static_cast<void *>(chr1.genes.data() + (w * n) / 2), static_cast<void *>(chr2.genes.data() + (w * n) / 2), sizeof(float)*(w * n / 2 + n * n / 2));
memcpy(static_cast<void *>(chr2.genes.data() + (w * n) / 2), static_cast<void *>(temp.data()), sizeof(float)*(w * n / 2 + n * n / 2));
return std::vector<Chromosome> {chr1, chr2};
}
auto TimeSeriesPredictor::mutate(Chromosome chr) -> Chromosome {
for(int i = 0; i < chr.genes.size(); ++i) {
if(this->distribution(mt) < 0.1) {
chr.genes[i] = distribution(mt) * 2 - 1;
}
}
return chr;
}
auto TimeSeriesPredictor::randomGenes(int size) -> Chromosome::Chromosome {
Chromosome chr;
for(auto i = 0; i < size; ++i){
chr.genes.push_back(this->distribution(mt) * 2 - 1);
}
return chr;
}
auto TimeSeriesPredictor::generatePopulation() -> void {
int n = this->numberOfNodes;
for(int i = 0; i < this->populationSize; ++i) {
this->population.push_back(randomGenes(pow(n, 2) + n * this->windowSize));
}
}
auto TimeSeriesPredictor::tournamentSelection() -> std::vector<Chromosome> {
std::vector<Chromosome> result;
for(int i = 0; i < 2; ++i) {
auto tournamentPopulation = this->randomSampleFromPopulation(5);
result.push_back(this->maxFitness(tournamentPopulation));
}
return result;
}
auto TimeSeriesPredictor::randomSampleFromPopulation(int size) -> std::vector<Chromosome> {
std::vector<Chromosome> result;
for(int i = 0; i < size; ++i) {
int r = rand() % this->populationSize;
result.push_back(this->population[r]);
}
return result;
}
auto TimeSeriesPredictor::launchCudaKernel() -> void {
int w = this->windowSize;
int n = this->numberOfNodes;
for(int i = 0; i < this->populationSize; ++i) {
float * weights = this->population[i].genes.data();
gpuErrchk(cudaMemcpy(&this->dataWeightsGpu[i * w * n], weights, w * n * sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(&this->mapWeightsGpu[i * n * n], &weights[w * n], n * n * sizeof(float), cudaMemcpyHostToDevice));
}
gpuErrchk(cudaMemset(this->mapInputGpu, 0, this->populationSize * n * sizeof(float)));
calculate_fitness<<<4, 512>>>(
this->dataWeightsGpu,
this->mapWeightsGpu,
this->dataGpu,
this->fitnessGpu,
this->mapInputGpu,
w,
n,
this->populationSize,
this->data.size()
);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(fitnessHost, fitnessGpu, this->populationSize * sizeof(float), cudaMemcpyDeviceToHost));
auto sum = 0.0;
auto count = 0;
for(int i = 0; i < this->populationSize; ++i) {
sum += fitnessHost[i];
++count;
this->population[i].fitness = fitnessHost[i];
}
this->currentMean = sum / count;
}
|
5183f6cfc27b05da9cd9053eac43df9bec823346.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "cuda_utility.cuh"
#include <cassert>
#ifndef _CUDA_UTIL_FUNC_
void allocateArray(void **devPtr, size_t size)
{
CudaSafeCall(hipMalloc(devPtr, size));
}
template < typename T >
void allocateUM(T *devPtr, size_t size)
{
CudaSafeCall(hipMallocManaged(devPtr, size));
}
void freeArray(void *devPtr)
{
CudaSafeCall(hipFree(devPtr));
}
void threadSync()
{
CudaSafeCall(hipDeviceSynchronize());
}
void copyD2H(void* dst, void* src, size_t memSize) {
CudaSafeCall(hipMemcpy(dst, src, memSize, hipMemcpyDeviceToHost));
}
void copyH2D(void* dst, void* src, size_t memSize) {
CudaSafeCall(hipMemcpy(dst, src, memSize, hipMemcpyHostToDevice));
}
#define copyH2S(symbol, src, memSize) \
CudaSafeCall(hipMemcpyToSymbol(symbol, src, memSize))
#endif
| 5183f6cfc27b05da9cd9053eac43df9bec823346.cu | #include <cuda.h>
#include "cuda_utility.cuh"
#include <cassert>
#ifndef _CUDA_UTIL_FUNC_
void allocateArray(void **devPtr, size_t size)
{
CudaSafeCall(cudaMalloc(devPtr, size));
}
template < typename T >
void allocateUM(T *devPtr, size_t size)
{
CudaSafeCall(cudaMallocManaged(devPtr, size));
}
void freeArray(void *devPtr)
{
CudaSafeCall(cudaFree(devPtr));
}
void threadSync()
{
CudaSafeCall(cudaThreadSynchronize());
}
void copyD2H(void* dst, void* src, size_t memSize) {
CudaSafeCall(cudaMemcpy(dst, src, memSize, cudaMemcpyDeviceToHost));
}
void copyH2D(void* dst, void* src, size_t memSize) {
CudaSafeCall(cudaMemcpy(dst, src, memSize, cudaMemcpyHostToDevice));
}
#define copyH2S(symbol, src, memSize) \
CudaSafeCall(cudaMemcpyToSymbol(symbol, src, memSize))
#endif
|
ccbddac6807c270ba78f630f81b01214b0d482fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
printf("ola\n");
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
}
| ccbddac6807c270ba78f630f81b01214b0d482fb.cu | #include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
printf("ola\n");
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
|
20db87502bab07bee29c1a52bd9f558cab96c420.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-04-24
*/
#include "Rectify.cuh"
#include "../XDevice.h"
namespace nts{ // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
hard rectify computation (Cuda kernel)
rectify : y = x if x >= 0
0 if x < 0
>> input - input tensor
>> output - output tensor
>> size - size of input/output
*/
template<class T>
__global__
void KernelRectify(T * x, T * y, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
T p = x[i];
if(p < (T)0.0)
p = (T)0.0;
y[i] = p;
}
}
/*
rectify function y = max(0, x)
>> x - input tensor
>> y - result
*/
void _CudaRectify(const XTensor * x, XTensor * y)
{
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
if (x->dataType == DEFAULT_DTYPE) {
hipLaunchKernelGGL(( KernelRectify), dim3(dim3(gridSize[0])), dim3(dim3(blockSize[0])), 0, 0,
(DTYPE*)x->data, (DTYPE*)y->data, x->unitNum);
}
else if (x->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
hipLaunchKernelGGL(( KernelRectify), dim3(dim3(gridSize[0])), dim3(blockSize[0]) >> >
((__half*)x->data, (__half*)y->data, x->unitNum);
#else
ShowNTErrors("Recompile the code with HALF_PRECISION!");
#endif
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
BacktoCudaDev(x->devID, devIDBackup);
}
/*
rectify backward computation of dE/dx (Cuda kernel)
dy/dx = 1 if x >= 0
0 otherwise
>> dedy - dE/dy
>> dedx - dE/dx
>> x - input of the function
>> size - size of output/input
*/
template<class T>
__global__
void( KernelRectify)Backward(T * dedy, T * dedx, T * x, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
T s = x[i];
if(s >= (T)0.0)
dedx[i] = dedy[i];
else
dedx[i] = (T)0.0;
}
}
/*
backward computation (Cuda version)
dE/dx = dE/dy * dy/dx
rectify : y = s if s >= 0
0 if s < 0
and dy/ds = 1 if s >= 0
0 otherwise
>> y - output of the rectify function
>> x - input of the rectify function
>> dedy - dE/dy
>> dedx - dE/dx
*/
void _CudaRectifyBackward(XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx)
{
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
/* dE/ds = dE/dy * dy/ds */
if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) {
( KernelRectify)Backward, dim3(gridSize[0]),dim3(blockSize[0]), 0, 0,
(DTYPE*)dedy->data,
(DTYPE*)dedx->data,
(DTYPE*)x->data,
x->unitNum);
}
else if (x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
KernelRectifyBackward<<<dim3(gridSize[0]), dim3(blockSize[0]) >> >
((__half*)dedy->data,
(__half*)dedx->data,
(__half*)x->data,
x->unitNum);
#else
ShowNTErrors("Recompile the code with HALF_PRECISION!");
#endif
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
BacktoCudaDev(x->devID, devIDBackup);
}
#endif
} // namespace nts(NiuTrans.Tensor) | 20db87502bab07bee29c1a52bd9f558cab96c420.cu | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-04-24
*/
#include "Rectify.cuh"
#include "../XDevice.h"
namespace nts{ // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
hard rectify computation (Cuda kernel)
rectify : y = x if x >= 0
0 if x < 0
>> input - input tensor
>> output - output tensor
>> size - size of input/output
*/
template<class T>
__global__
void KernelRectify(T * x, T * y, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
T p = x[i];
if(p < (T)0.0)
p = (T)0.0;
y[i] = p;
}
}
/*
rectify function y = max(0, x)
>> x - input tensor
>> y - result
*/
void _CudaRectify(const XTensor * x, XTensor * y)
{
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
if (x->dataType == DEFAULT_DTYPE) {
KernelRectify<<<dim3(gridSize[0]), dim3(blockSize[0])>>>
((DTYPE*)x->data, (DTYPE*)y->data, x->unitNum);
}
else if (x->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
KernelRectify<<<dim3(gridSize[0]), dim3(blockSize[0]) >> >
((__half*)x->data, (__half*)y->data, x->unitNum);
#else
ShowNTErrors("Recompile the code with HALF_PRECISION!");
#endif
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
BacktoCudaDev(x->devID, devIDBackup);
}
/*
rectify backward computation of dE/dx (Cuda kernel)
dy/dx = 1 if x >= 0
0 otherwise
>> dedy - dE/dy
>> dedx - dE/dx
>> x - input of the function
>> size - size of output/input
*/
template<class T>
__global__
void KernelRectifyBackward(T * dedy, T * dedx, T * x, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
T s = x[i];
if(s >= (T)0.0)
dedx[i] = dedy[i];
else
dedx[i] = (T)0.0;
}
}
/*
backward computation (Cuda version)
dE/dx = dE/dy * dy/dx
rectify : y = s if s >= 0
0 if s < 0
and dy/ds = 1 if s >= 0
0 otherwise
>> y - output of the rectify function
>> x - input of the rectify function
>> dedy - dE/dy
>> dedx - dE/dx
*/
void _CudaRectifyBackward(XTensor * y, XTensor * x,
XTensor * dedy, XTensor * dedx)
{
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
/* dE/ds = dE/dy * dy/ds */
if (x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE) {
KernelRectifyBackward<<<dim3(gridSize[0]),dim3(blockSize[0])>>>
((DTYPE*)dedy->data,
(DTYPE*)dedx->data,
(DTYPE*)x->data,
x->unitNum);
}
else if (x->dataType == X_FLOAT16 && y->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
KernelRectifyBackward<<<dim3(gridSize[0]), dim3(blockSize[0]) >> >
((__half*)dedy->data,
(__half*)dedx->data,
(__half*)x->data,
x->unitNum);
#else
ShowNTErrors("Recompile the code with HALF_PRECISION!");
#endif
}
else {
// TODO!!
ShowNTErrors("TODO!");
}
BacktoCudaDev(x->devID, devIDBackup);
}
#endif
} // namespace nts(NiuTrans.Tensor) |
1e5dae7103829343c1372c794139b0f53cbbe75f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "MD_ED_D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *S = NULL;
hipMalloc(&S, XSIZE*YSIZE);
float *T = NULL;
hipMalloc(&T, XSIZE*YSIZE);
int trainSize = XSIZE*YSIZE;
int window_size = XSIZE*YSIZE;
int dimensions = 1;
float *data_out = NULL;
hipMalloc(&data_out, XSIZE*YSIZE);
int task = 1;
int gm = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
MD_ED_D), dim3(gridBlock),dim3(threadBlock), 0, 0, S,T,trainSize,window_size,dimensions,data_out,task,gm);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
MD_ED_D), dim3(gridBlock),dim3(threadBlock), 0, 0, S,T,trainSize,window_size,dimensions,data_out,task,gm);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
MD_ED_D), dim3(gridBlock),dim3(threadBlock), 0, 0, S,T,trainSize,window_size,dimensions,data_out,task,gm);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1e5dae7103829343c1372c794139b0f53cbbe75f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "MD_ED_D.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *S = NULL;
cudaMalloc(&S, XSIZE*YSIZE);
float *T = NULL;
cudaMalloc(&T, XSIZE*YSIZE);
int trainSize = XSIZE*YSIZE;
int window_size = XSIZE*YSIZE;
int dimensions = 1;
float *data_out = NULL;
cudaMalloc(&data_out, XSIZE*YSIZE);
int task = 1;
int gm = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
MD_ED_D<<<gridBlock,threadBlock>>>(S,T,trainSize,window_size,dimensions,data_out,task,gm);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
MD_ED_D<<<gridBlock,threadBlock>>>(S,T,trainSize,window_size,dimensions,data_out,task,gm);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
MD_ED_D<<<gridBlock,threadBlock>>>(S,T,trainSize,window_size,dimensions,data_out,task,gm);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b2cd154fa6c64296ba571ba62daab4827d3c75d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/gpu/device/vecmath.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "internal_shared.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace mathfunc
{
//////////////////////////////////////////////////////////////////////////////////////
// Compare
template <typename T1, typename T2>
struct NotEqual
{
__device__ __forceinline__ uchar operator()(const T1& src1, const T2& src2)
{
return static_cast<uchar>(static_cast<int>(src1 != src2) * 255);
}
};
template <typename T1, typename T2>
inline void compare_ne(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, hipStream_t stream)
{
NotEqual<T1, T2> op;
transform(static_cast< DevMem2D_<T1> >(src1), static_cast< DevMem2D_<T2> >(src2), dst, op, stream);
}
void compare_ne_8uc4(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, hipStream_t stream)
{
compare_ne<uint, uint>(src1, src2, dst, stream);
}
void compare_ne_32f(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, hipStream_t stream)
{
compare_ne<float, float>(src1, src2, dst, stream);
}
//////////////////////////////////////////////////////////////////////////
// Unary bitwise logical matrix operations
enum { UN_OP_NOT };
template <typename T, int opid>
struct UnOp;
template <typename T>
struct UnOp<T, UN_OP_NOT>
{
static __device__ __forceinline__ T call(T v) { return ~v; }
};
template <int opid>
__global__ void bitwiseUnOpKernel(int rows, int width, const PtrStep src, PtrStep dst)
{
const int x = (blockDim.x * blockIdx.x + threadIdx.x) * 4;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (y < rows)
{
uchar* dst_ptr = dst.ptr(y) + x;
const uchar* src_ptr = src.ptr(y) + x;
if (x + sizeof(uint) - 1 < width)
{
*(uint*)dst_ptr = UnOp<uint, opid>::call(*(uint*)src_ptr);
}
else
{
const uchar* src_end = src.ptr(y) + width;
while (src_ptr < src_end)
{
*dst_ptr++ = UnOp<uchar, opid>::call(*src_ptr++);
}
}
}
}
template <int opid>
void bitwiseUnOp(int rows, int width, const PtrStep src, PtrStep dst,
hipStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(width, threads.x * sizeof(uint)),
divUp(rows, threads.y));
hipLaunchKernelGGL(( bitwiseUnOpKernel<opid>), dim3(grid), dim3(threads), 0, 0, rows, width, src, dst);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, int opid>
__global__ void bitwiseUnOpKernel(int rows, int cols, int cn, const PtrStep src,
const PtrStep mask, PtrStep dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < cols && y < rows && mask.ptr(y)[x / cn])
{
T* dst_row = (T*)dst.ptr(y);
const T* src_row = (const T*)src.ptr(y);
dst_row[x] = UnOp<T, opid>::call(src_row[x]);
}
}
template <typename T, int opid>
void bitwiseUnOp(int rows, int cols, int cn, const PtrStep src,
const PtrStep mask, PtrStep dst, hipStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
hipLaunchKernelGGL(( bitwiseUnOpKernel<T, opid>), dim3(grid), dim3(threads), 0, 0, rows, cols, cn, src, mask, dst);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void bitwiseNotCaller(int rows, int cols, int elem_size1, int cn,
const PtrStep src, PtrStep dst, hipStream_t stream)
{
bitwiseUnOp<UN_OP_NOT>(rows, cols * elem_size1 * cn, src, dst, stream);
}
template <typename T>
void bitwiseMaskNotCaller(int rows, int cols, int cn, const PtrStep src,
const PtrStep mask, PtrStep dst, hipStream_t stream)
{
bitwiseUnOp<T, UN_OP_NOT>(rows, cols * cn, cn, src, mask, dst, stream);
}
template void bitwiseMaskNotCaller<uchar>(int, int, int, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskNotCaller<ushort>(int, int, int, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskNotCaller<uint>(int, int, int, const PtrStep, const PtrStep, PtrStep, hipStream_t);
//////////////////////////////////////////////////////////////////////////
// Binary bitwise logical matrix operations
enum { BIN_OP_OR, BIN_OP_AND, BIN_OP_XOR };
template <typename T, int opid>
struct BinOp;
template <typename T>
struct BinOp<T, BIN_OP_OR>
{
static __device__ __forceinline__ T call(T a, T b) { return a | b; }
};
template <typename T>
struct BinOp<T, BIN_OP_AND>
{
static __device__ __forceinline__ T call(T a, T b) { return a & b; }
};
template <typename T>
struct BinOp<T, BIN_OP_XOR>
{
static __device__ __forceinline__ T call(T a, T b) { return a ^ b; }
};
template <int opid>
__global__ void bitwiseBinOpKernel(int rows, int width, const PtrStep src1,
const PtrStep src2, PtrStep dst)
{
const int x = (blockDim.x * blockIdx.x + threadIdx.x) * 4;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (y < rows)
{
uchar* dst_ptr = dst.ptr(y) + x;
const uchar* src1_ptr = src1.ptr(y) + x;
const uchar* src2_ptr = src2.ptr(y) + x;
if (x + sizeof(uint) - 1 < width)
{
*(uint*)dst_ptr = BinOp<uint, opid>::call(*(uint*)src1_ptr, *(uint*)src2_ptr);
}
else
{
const uchar* src1_end = src1.ptr(y) + width;
while (src1_ptr < src1_end)
{
*dst_ptr++ = BinOp<uchar, opid>::call(*src1_ptr++, *src2_ptr++);
}
}
}
}
template <int opid>
void bitwiseBinOp(int rows, int width, const PtrStep src1, const PtrStep src2,
PtrStep dst, hipStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(width, threads.x * sizeof(uint)), divUp(rows, threads.y));
hipLaunchKernelGGL(( bitwiseBinOpKernel<opid>), dim3(grid), dim3(threads), 0, 0, rows, width, src1, src2, dst);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T, int opid>
__global__ void bitwiseBinOpKernel(
int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < cols && y < rows && mask.ptr(y)[x / cn])
{
T* dst_row = (T*)dst.ptr(y);
const T* src1_row = (const T*)src1.ptr(y);
const T* src2_row = (const T*)src2.ptr(y);
dst_row[x] = BinOp<T, opid>::call(src1_row[x], src2_row[x]);
}
}
template <typename T, int opid>
void bitwiseBinOp(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, hipStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
hipLaunchKernelGGL(( bitwiseBinOpKernel<T, opid>), dim3(grid), dim3(threads), 0, 0, rows, cols, cn, src1, src2, mask, dst);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void bitwiseOrCaller(int rows, int cols, int elem_size1, int cn, const PtrStep src1,
const PtrStep src2, PtrStep dst, hipStream_t stream)
{
bitwiseBinOp<BIN_OP_OR>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);
}
template <typename T>
void bitwiseMaskOrCaller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, hipStream_t stream)
{
bitwiseBinOp<T, BIN_OP_OR>(rows, cols * cn, cn, src1, src2, mask, dst, stream);
}
template void bitwiseMaskOrCaller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskOrCaller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskOrCaller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
void bitwiseAndCaller(int rows, int cols, int elem_size1, int cn, const PtrStep src1,
const PtrStep src2, PtrStep dst, hipStream_t stream)
{
bitwiseBinOp<BIN_OP_AND>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);
}
template <typename T>
void bitwiseMaskAndCaller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, hipStream_t stream)
{
bitwiseBinOp<T, BIN_OP_AND>(rows, cols * cn, cn, src1, src2, mask, dst, stream);
}
template void bitwiseMaskAndCaller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskAndCaller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskAndCaller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
void bitwiseXorCaller(int rows, int cols, int elem_size1, int cn, const PtrStep src1,
const PtrStep src2, PtrStep dst, hipStream_t stream)
{
bitwiseBinOp<BIN_OP_XOR>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);
}
template <typename T>
void bitwiseMaskXorCaller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, hipStream_t stream)
{
bitwiseBinOp<T, BIN_OP_XOR>(rows, cols * cn, cn, src1, src2, mask, dst, stream);
}
template void bitwiseMaskXorCaller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskXorCaller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
template void bitwiseMaskXorCaller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, hipStream_t);
//////////////////////////////////////////////////////////////////////////
// min/max
struct MinOp
{
template <typename T>
__device__ __forceinline__ T operator()(T a, T b)
{
return min(a, b);
}
__device__ __forceinline__ float operator()(float a, float b)
{
return fmin(a, b);
}
__device__ __forceinline__ double operator()(double a, double b)
{
return fmin(a, b);
}
};
struct MaxOp
{
template <typename T>
__device__ __forceinline__ T operator()(T a, T b)
{
return max(a, b);
}
__device__ __forceinline__ float operator()(float a, float b)
{
return fmax(a, b);
}
__device__ __forceinline__ double operator()(double a, double b)
{
return fmax(a, b);
}
};
template <typename T> struct ScalarMinOp
{
T s;
explicit ScalarMinOp(T s_) : s(s_) {}
__device__ __forceinline__ T operator()(T a)
{
return min(a, s);
}
};
template <> struct ScalarMinOp<float>
{
float s;
explicit ScalarMinOp(float s_) : s(s_) {}
__device__ __forceinline__ float operator()(float a)
{
return fmin(a, s);
}
};
template <> struct ScalarMinOp<double>
{
double s;
explicit ScalarMinOp(double s_) : s(s_) {}
__device__ __forceinline__ double operator()(double a)
{
return fmin(a, s);
}
};
template <typename T> struct ScalarMaxOp
{
T s;
explicit ScalarMaxOp(T s_) : s(s_) {}
__device__ __forceinline__ T operator()(T a)
{
return max(a, s);
}
};
template <> struct ScalarMaxOp<float>
{
float s;
explicit ScalarMaxOp(float s_) : s(s_) {}
__device__ __forceinline__ float operator()(float a)
{
return fmax(a, s);
}
};
template <> struct ScalarMaxOp<double>
{
double s;
explicit ScalarMaxOp(double s_) : s(s_) {}
__device__ __forceinline__ double operator()(double a)
{
return fmax(a, s);
}
};
template <typename T>
void min_gpu(const DevMem2D_<T>& src1, const DevMem2D_<T>& src2, const DevMem2D_<T>& dst, hipStream_t stream)
{
MinOp op;
transform(src1, src2, dst, op, stream);
}
template void min_gpu<uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, hipStream_t stream);
template void min_gpu<schar >(const DevMem2D_<schar>& src1, const DevMem2D_<schar>& src2, const DevMem2D_<schar>& dst, hipStream_t stream);
template void min_gpu<ushort>(const DevMem2D_<ushort>& src1, const DevMem2D_<ushort>& src2, const DevMem2D_<ushort>& dst, hipStream_t stream);
template void min_gpu<short >(const DevMem2D_<short>& src1, const DevMem2D_<short>& src2, const DevMem2D_<short>& dst, hipStream_t stream);
template void min_gpu<int >(const DevMem2D_<int>& src1, const DevMem2D_<int>& src2, const DevMem2D_<int>& dst, hipStream_t stream);
template void min_gpu<float >(const DevMem2D_<float>& src1, const DevMem2D_<float>& src2, const DevMem2D_<float>& dst, hipStream_t stream);
template void min_gpu<double>(const DevMem2D_<double>& src1, const DevMem2D_<double>& src2, const DevMem2D_<double>& dst, hipStream_t stream);
template <typename T>
void max_gpu(const DevMem2D_<T>& src1, const DevMem2D_<T>& src2, const DevMem2D_<T>& dst, hipStream_t stream)
{
MaxOp op;
transform(src1, src2, dst, op, stream);
}
template void max_gpu<uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, hipStream_t stream);
template void max_gpu<schar >(const DevMem2D_<schar>& src1, const DevMem2D_<schar>& src2, const DevMem2D_<schar>& dst, hipStream_t stream);
template void max_gpu<ushort>(const DevMem2D_<ushort>& src1, const DevMem2D_<ushort>& src2, const DevMem2D_<ushort>& dst, hipStream_t stream);
template void max_gpu<short >(const DevMem2D_<short>& src1, const DevMem2D_<short>& src2, const DevMem2D_<short>& dst, hipStream_t stream);
template void max_gpu<int >(const DevMem2D_<int>& src1, const DevMem2D_<int>& src2, const DevMem2D_<int>& dst, hipStream_t stream);
template void max_gpu<float >(const DevMem2D_<float>& src1, const DevMem2D_<float>& src2, const DevMem2D_<float>& dst, hipStream_t stream);
template void max_gpu<double>(const DevMem2D_<double>& src1, const DevMem2D_<double>& src2, const DevMem2D_<double>& dst, hipStream_t stream);
template <typename T>
void min_gpu(const DevMem2D_<T>& src1, T src2, const DevMem2D_<T>& dst, hipStream_t stream)
{
ScalarMinOp<T> op(src2);
transform(src1, dst, op, stream);
}
template void min_gpu<uchar >(const DevMem2D& src1, uchar src2, const DevMem2D& dst, hipStream_t stream);
template void min_gpu<schar >(const DevMem2D_<schar>& src1, schar src2, const DevMem2D_<schar>& dst, hipStream_t stream);
template void min_gpu<ushort>(const DevMem2D_<ushort>& src1, ushort src2, const DevMem2D_<ushort>& dst, hipStream_t stream);
template void min_gpu<short >(const DevMem2D_<short>& src1, short src2, const DevMem2D_<short>& dst, hipStream_t stream);
template void min_gpu<int >(const DevMem2D_<int>& src1, int src2, const DevMem2D_<int>& dst, hipStream_t stream);
template void min_gpu<float >(const DevMem2D_<float>& src1, float src2, const DevMem2D_<float>& dst, hipStream_t stream);
template void min_gpu<double>(const DevMem2D_<double>& src1, double src2, const DevMem2D_<double>& dst, hipStream_t stream);
template <typename T>
void max_gpu(const DevMem2D_<T>& src1, T src2, const DevMem2D_<T>& dst, hipStream_t stream)
{
ScalarMaxOp<T> op(src2);
transform(src1, dst, op, stream);
}
template void max_gpu<uchar >(const DevMem2D& src1, uchar src2, const DevMem2D& dst, hipStream_t stream);
template void max_gpu<schar >(const DevMem2D_<schar>& src1, schar src2, const DevMem2D_<schar>& dst, hipStream_t stream);
template void max_gpu<ushort>(const DevMem2D_<ushort>& src1, ushort src2, const DevMem2D_<ushort>& dst, hipStream_t stream);
template void max_gpu<short >(const DevMem2D_<short>& src1, short src2, const DevMem2D_<short>& dst, hipStream_t stream);
template void max_gpu<int >(const DevMem2D_<int>& src1, int src2, const DevMem2D_<int>& dst, hipStream_t stream);
template void max_gpu<float >(const DevMem2D_<float>& src1, float src2, const DevMem2D_<float>& dst, hipStream_t stream);
template void max_gpu<double>(const DevMem2D_<double>& src1, double src2, const DevMem2D_<double>& dst, hipStream_t stream);
//////////////////////////////////////////////////////////////////////////
// threshold
template <typename T> struct ThreshBinary
{
ThreshBinary(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? maxVal : 0;
}
private:
T thresh;
T maxVal;
};
template <typename T> struct ThreshBinaryInv
{
ThreshBinaryInv(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? 0 : maxVal;
}
private:
T thresh;
T maxVal;
};
template <typename T> struct ThreshTrunc
{
ThreshTrunc(T thresh_, T) : thresh(thresh_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return min(src, thresh);
}
private:
T thresh;
};
template <> struct ThreshTrunc<float>
{
ThreshTrunc(float thresh_, float) : thresh(thresh_) {}
__device__ __forceinline__ float operator()(const float& src) const
{
return fmin(src, thresh);
}
private:
float thresh;
};
template <> struct ThreshTrunc<double>
{
ThreshTrunc(double thresh_, double) : thresh(thresh_) {}
__device__ __forceinline__ double operator()(const double& src) const
{
return fmin(src, thresh);
}
private:
double thresh;
};
template <typename T> struct ThreshToZero
{
public:
ThreshToZero(T thresh_, T) : thresh(thresh_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? src : 0;
}
private:
T thresh;
};
template <typename T> struct ThreshToZeroInv
{
public:
ThreshToZeroInv(T thresh_, T) : thresh(thresh_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? 0 : src;
}
private:
T thresh;
};
template <template <typename> class Op, typename T>
void threshold_caller(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, T thresh, T maxVal,
hipStream_t stream)
{
Op<T> op(thresh, maxVal);
transform(src, dst, op, stream);
}
template <typename T>
void threshold_gpu(const DevMem2D& src, const DevMem2D& dst, T thresh, T maxVal, int type,
hipStream_t stream)
{
typedef void (*caller_t)(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, T thresh, T maxVal,
hipStream_t stream);
static const caller_t callers[] =
{
threshold_caller<ThreshBinary, T>,
threshold_caller<ThreshBinaryInv, T>,
threshold_caller<ThreshTrunc, T>,
threshold_caller<ThreshToZero, T>,
threshold_caller<ThreshToZeroInv, T>
};
callers[type]((DevMem2D_<T>)src, (DevMem2D_<T>)dst, thresh, maxVal, stream);
}
template void threshold_gpu<uchar>(const DevMem2D& src, const DevMem2D& dst, uchar thresh, uchar maxVal, int type, hipStream_t stream);
template void threshold_gpu<schar>(const DevMem2D& src, const DevMem2D& dst, schar thresh, schar maxVal, int type, hipStream_t stream);
template void threshold_gpu<ushort>(const DevMem2D& src, const DevMem2D& dst, ushort thresh, ushort maxVal, int type, hipStream_t stream);
template void threshold_gpu<short>(const DevMem2D& src, const DevMem2D& dst, short thresh, short maxVal, int type, hipStream_t stream);
template void threshold_gpu<int>(const DevMem2D& src, const DevMem2D& dst, int thresh, int maxVal, int type, hipStream_t stream);
template void threshold_gpu<float>(const DevMem2D& src, const DevMem2D& dst, float thresh, float maxVal, int type, hipStream_t stream);
template void threshold_gpu<double>(const DevMem2D& src, const DevMem2D& dst, double thresh, double maxVal, int type, hipStream_t stream);
}}}
| b2cd154fa6c64296ba571ba62daab4827d3c75d1.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/gpu/device/vecmath.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "internal_shared.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace mathfunc
{
//////////////////////////////////////////////////////////////////////////////////////
// Compare
template <typename T1, typename T2>
struct NotEqual
{
__device__ __forceinline__ uchar operator()(const T1& src1, const T2& src2)
{
return static_cast<uchar>(static_cast<int>(src1 != src2) * 255);
}
};
template <typename T1, typename T2>
inline void compare_ne(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
{
NotEqual<T1, T2> op;
transform(static_cast< DevMem2D_<T1> >(src1), static_cast< DevMem2D_<T2> >(src2), dst, op, stream);
}
void compare_ne_8uc4(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
{
compare_ne<uint, uint>(src1, src2, dst, stream);
}
void compare_ne_32f(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream)
{
compare_ne<float, float>(src1, src2, dst, stream);
}
//////////////////////////////////////////////////////////////////////////
// Unary bitwise logical matrix operations
enum { UN_OP_NOT };
template <typename T, int opid>
struct UnOp;
template <typename T>
struct UnOp<T, UN_OP_NOT>
{
static __device__ __forceinline__ T call(T v) { return ~v; }
};
template <int opid>
__global__ void bitwiseUnOpKernel(int rows, int width, const PtrStep src, PtrStep dst)
{
const int x = (blockDim.x * blockIdx.x + threadIdx.x) * 4;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (y < rows)
{
uchar* dst_ptr = dst.ptr(y) + x;
const uchar* src_ptr = src.ptr(y) + x;
if (x + sizeof(uint) - 1 < width)
{
*(uint*)dst_ptr = UnOp<uint, opid>::call(*(uint*)src_ptr);
}
else
{
const uchar* src_end = src.ptr(y) + width;
while (src_ptr < src_end)
{
*dst_ptr++ = UnOp<uchar, opid>::call(*src_ptr++);
}
}
}
}
template <int opid>
void bitwiseUnOp(int rows, int width, const PtrStep src, PtrStep dst,
cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(width, threads.x * sizeof(uint)),
divUp(rows, threads.y));
bitwiseUnOpKernel<opid><<<grid, threads>>>(rows, width, src, dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, int opid>
__global__ void bitwiseUnOpKernel(int rows, int cols, int cn, const PtrStep src,
const PtrStep mask, PtrStep dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < cols && y < rows && mask.ptr(y)[x / cn])
{
T* dst_row = (T*)dst.ptr(y);
const T* src_row = (const T*)src.ptr(y);
dst_row[x] = UnOp<T, opid>::call(src_row[x]);
}
}
template <typename T, int opid>
void bitwiseUnOp(int rows, int cols, int cn, const PtrStep src,
const PtrStep mask, PtrStep dst, cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
bitwiseUnOpKernel<T, opid><<<grid, threads>>>(rows, cols, cn, src, mask, dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void bitwiseNotCaller(int rows, int cols, int elem_size1, int cn,
const PtrStep src, PtrStep dst, cudaStream_t stream)
{
bitwiseUnOp<UN_OP_NOT>(rows, cols * elem_size1 * cn, src, dst, stream);
}
template <typename T>
void bitwiseMaskNotCaller(int rows, int cols, int cn, const PtrStep src,
const PtrStep mask, PtrStep dst, cudaStream_t stream)
{
bitwiseUnOp<T, UN_OP_NOT>(rows, cols * cn, cn, src, mask, dst, stream);
}
template void bitwiseMaskNotCaller<uchar>(int, int, int, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskNotCaller<ushort>(int, int, int, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskNotCaller<uint>(int, int, int, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
//////////////////////////////////////////////////////////////////////////
// Binary bitwise logical matrix operations
enum { BIN_OP_OR, BIN_OP_AND, BIN_OP_XOR };
template <typename T, int opid>
struct BinOp;
template <typename T>
struct BinOp<T, BIN_OP_OR>
{
static __device__ __forceinline__ T call(T a, T b) { return a | b; }
};
template <typename T>
struct BinOp<T, BIN_OP_AND>
{
static __device__ __forceinline__ T call(T a, T b) { return a & b; }
};
template <typename T>
struct BinOp<T, BIN_OP_XOR>
{
static __device__ __forceinline__ T call(T a, T b) { return a ^ b; }
};
template <int opid>
__global__ void bitwiseBinOpKernel(int rows, int width, const PtrStep src1,
const PtrStep src2, PtrStep dst)
{
const int x = (blockDim.x * blockIdx.x + threadIdx.x) * 4;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (y < rows)
{
uchar* dst_ptr = dst.ptr(y) + x;
const uchar* src1_ptr = src1.ptr(y) + x;
const uchar* src2_ptr = src2.ptr(y) + x;
if (x + sizeof(uint) - 1 < width)
{
*(uint*)dst_ptr = BinOp<uint, opid>::call(*(uint*)src1_ptr, *(uint*)src2_ptr);
}
else
{
const uchar* src1_end = src1.ptr(y) + width;
while (src1_ptr < src1_end)
{
*dst_ptr++ = BinOp<uchar, opid>::call(*src1_ptr++, *src2_ptr++);
}
}
}
}
template <int opid>
void bitwiseBinOp(int rows, int width, const PtrStep src1, const PtrStep src2,
PtrStep dst, cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(width, threads.x * sizeof(uint)), divUp(rows, threads.y));
bitwiseBinOpKernel<opid><<<grid, threads>>>(rows, width, src1, src2, dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T, int opid>
__global__ void bitwiseBinOpKernel(
int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < cols && y < rows && mask.ptr(y)[x / cn])
{
T* dst_row = (T*)dst.ptr(y);
const T* src1_row = (const T*)src1.ptr(y);
const T* src2_row = (const T*)src2.ptr(y);
dst_row[x] = BinOp<T, opid>::call(src1_row[x], src2_row[x]);
}
}
template <typename T, int opid>
void bitwiseBinOp(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, cudaStream_t stream)
{
dim3 threads(16, 16);
dim3 grid(divUp(cols, threads.x), divUp(rows, threads.y));
bitwiseBinOpKernel<T, opid><<<grid, threads>>>(rows, cols, cn, src1, src2, mask, dst);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void bitwiseOrCaller(int rows, int cols, int elem_size1, int cn, const PtrStep src1,
const PtrStep src2, PtrStep dst, cudaStream_t stream)
{
bitwiseBinOp<BIN_OP_OR>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);
}
template <typename T>
void bitwiseMaskOrCaller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, cudaStream_t stream)
{
bitwiseBinOp<T, BIN_OP_OR>(rows, cols * cn, cn, src1, src2, mask, dst, stream);
}
template void bitwiseMaskOrCaller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskOrCaller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskOrCaller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
void bitwiseAndCaller(int rows, int cols, int elem_size1, int cn, const PtrStep src1,
const PtrStep src2, PtrStep dst, cudaStream_t stream)
{
bitwiseBinOp<BIN_OP_AND>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);
}
template <typename T>
void bitwiseMaskAndCaller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, cudaStream_t stream)
{
bitwiseBinOp<T, BIN_OP_AND>(rows, cols * cn, cn, src1, src2, mask, dst, stream);
}
template void bitwiseMaskAndCaller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskAndCaller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskAndCaller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
void bitwiseXorCaller(int rows, int cols, int elem_size1, int cn, const PtrStep src1,
const PtrStep src2, PtrStep dst, cudaStream_t stream)
{
bitwiseBinOp<BIN_OP_XOR>(rows, cols * elem_size1 * cn, src1, src2, dst, stream);
}
template <typename T>
void bitwiseMaskXorCaller(int rows, int cols, int cn, const PtrStep src1, const PtrStep src2,
const PtrStep mask, PtrStep dst, cudaStream_t stream)
{
bitwiseBinOp<T, BIN_OP_XOR>(rows, cols * cn, cn, src1, src2, mask, dst, stream);
}
template void bitwiseMaskXorCaller<uchar>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskXorCaller<ushort>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
template void bitwiseMaskXorCaller<uint>(int, int, int, const PtrStep, const PtrStep, const PtrStep, PtrStep, cudaStream_t);
//////////////////////////////////////////////////////////////////////////
// min/max
struct MinOp
{
template <typename T>
__device__ __forceinline__ T operator()(T a, T b)
{
return min(a, b);
}
__device__ __forceinline__ float operator()(float a, float b)
{
return fmin(a, b);
}
__device__ __forceinline__ double operator()(double a, double b)
{
return fmin(a, b);
}
};
struct MaxOp
{
template <typename T>
__device__ __forceinline__ T operator()(T a, T b)
{
return max(a, b);
}
__device__ __forceinline__ float operator()(float a, float b)
{
return fmax(a, b);
}
__device__ __forceinline__ double operator()(double a, double b)
{
return fmax(a, b);
}
};
template <typename T> struct ScalarMinOp
{
T s;
explicit ScalarMinOp(T s_) : s(s_) {}
__device__ __forceinline__ T operator()(T a)
{
return min(a, s);
}
};
template <> struct ScalarMinOp<float>
{
float s;
explicit ScalarMinOp(float s_) : s(s_) {}
__device__ __forceinline__ float operator()(float a)
{
return fmin(a, s);
}
};
template <> struct ScalarMinOp<double>
{
double s;
explicit ScalarMinOp(double s_) : s(s_) {}
__device__ __forceinline__ double operator()(double a)
{
return fmin(a, s);
}
};
template <typename T> struct ScalarMaxOp
{
T s;
explicit ScalarMaxOp(T s_) : s(s_) {}
__device__ __forceinline__ T operator()(T a)
{
return max(a, s);
}
};
template <> struct ScalarMaxOp<float>
{
float s;
explicit ScalarMaxOp(float s_) : s(s_) {}
__device__ __forceinline__ float operator()(float a)
{
return fmax(a, s);
}
};
template <> struct ScalarMaxOp<double>
{
double s;
explicit ScalarMaxOp(double s_) : s(s_) {}
__device__ __forceinline__ double operator()(double a)
{
return fmax(a, s);
}
};
template <typename T>
void min_gpu(const DevMem2D_<T>& src1, const DevMem2D_<T>& src2, const DevMem2D_<T>& dst, cudaStream_t stream)
{
MinOp op;
transform(src1, src2, dst, op, stream);
}
template void min_gpu<uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void min_gpu<schar >(const DevMem2D_<schar>& src1, const DevMem2D_<schar>& src2, const DevMem2D_<schar>& dst, cudaStream_t stream);
template void min_gpu<ushort>(const DevMem2D_<ushort>& src1, const DevMem2D_<ushort>& src2, const DevMem2D_<ushort>& dst, cudaStream_t stream);
template void min_gpu<short >(const DevMem2D_<short>& src1, const DevMem2D_<short>& src2, const DevMem2D_<short>& dst, cudaStream_t stream);
template void min_gpu<int >(const DevMem2D_<int>& src1, const DevMem2D_<int>& src2, const DevMem2D_<int>& dst, cudaStream_t stream);
template void min_gpu<float >(const DevMem2D_<float>& src1, const DevMem2D_<float>& src2, const DevMem2D_<float>& dst, cudaStream_t stream);
template void min_gpu<double>(const DevMem2D_<double>& src1, const DevMem2D_<double>& src2, const DevMem2D_<double>& dst, cudaStream_t stream);
template <typename T>
void max_gpu(const DevMem2D_<T>& src1, const DevMem2D_<T>& src2, const DevMem2D_<T>& dst, cudaStream_t stream)
{
MaxOp op;
transform(src1, src2, dst, op, stream);
}
template void max_gpu<uchar >(const DevMem2D& src1, const DevMem2D& src2, const DevMem2D& dst, cudaStream_t stream);
template void max_gpu<schar >(const DevMem2D_<schar>& src1, const DevMem2D_<schar>& src2, const DevMem2D_<schar>& dst, cudaStream_t stream);
template void max_gpu<ushort>(const DevMem2D_<ushort>& src1, const DevMem2D_<ushort>& src2, const DevMem2D_<ushort>& dst, cudaStream_t stream);
template void max_gpu<short >(const DevMem2D_<short>& src1, const DevMem2D_<short>& src2, const DevMem2D_<short>& dst, cudaStream_t stream);
template void max_gpu<int >(const DevMem2D_<int>& src1, const DevMem2D_<int>& src2, const DevMem2D_<int>& dst, cudaStream_t stream);
template void max_gpu<float >(const DevMem2D_<float>& src1, const DevMem2D_<float>& src2, const DevMem2D_<float>& dst, cudaStream_t stream);
template void max_gpu<double>(const DevMem2D_<double>& src1, const DevMem2D_<double>& src2, const DevMem2D_<double>& dst, cudaStream_t stream);
template <typename T>
void min_gpu(const DevMem2D_<T>& src1, T src2, const DevMem2D_<T>& dst, cudaStream_t stream)
{
ScalarMinOp<T> op(src2);
transform(src1, dst, op, stream);
}
template void min_gpu<uchar >(const DevMem2D& src1, uchar src2, const DevMem2D& dst, cudaStream_t stream);
template void min_gpu<schar >(const DevMem2D_<schar>& src1, schar src2, const DevMem2D_<schar>& dst, cudaStream_t stream);
template void min_gpu<ushort>(const DevMem2D_<ushort>& src1, ushort src2, const DevMem2D_<ushort>& dst, cudaStream_t stream);
template void min_gpu<short >(const DevMem2D_<short>& src1, short src2, const DevMem2D_<short>& dst, cudaStream_t stream);
template void min_gpu<int >(const DevMem2D_<int>& src1, int src2, const DevMem2D_<int>& dst, cudaStream_t stream);
template void min_gpu<float >(const DevMem2D_<float>& src1, float src2, const DevMem2D_<float>& dst, cudaStream_t stream);
template void min_gpu<double>(const DevMem2D_<double>& src1, double src2, const DevMem2D_<double>& dst, cudaStream_t stream);
template <typename T>
void max_gpu(const DevMem2D_<T>& src1, T src2, const DevMem2D_<T>& dst, cudaStream_t stream)
{
ScalarMaxOp<T> op(src2);
transform(src1, dst, op, stream);
}
template void max_gpu<uchar >(const DevMem2D& src1, uchar src2, const DevMem2D& dst, cudaStream_t stream);
template void max_gpu<schar >(const DevMem2D_<schar>& src1, schar src2, const DevMem2D_<schar>& dst, cudaStream_t stream);
template void max_gpu<ushort>(const DevMem2D_<ushort>& src1, ushort src2, const DevMem2D_<ushort>& dst, cudaStream_t stream);
template void max_gpu<short >(const DevMem2D_<short>& src1, short src2, const DevMem2D_<short>& dst, cudaStream_t stream);
template void max_gpu<int >(const DevMem2D_<int>& src1, int src2, const DevMem2D_<int>& dst, cudaStream_t stream);
template void max_gpu<float >(const DevMem2D_<float>& src1, float src2, const DevMem2D_<float>& dst, cudaStream_t stream);
template void max_gpu<double>(const DevMem2D_<double>& src1, double src2, const DevMem2D_<double>& dst, cudaStream_t stream);
//////////////////////////////////////////////////////////////////////////
// threshold
template <typename T> struct ThreshBinary
{
ThreshBinary(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? maxVal : 0;
}
private:
T thresh;
T maxVal;
};
template <typename T> struct ThreshBinaryInv
{
ThreshBinaryInv(T thresh_, T maxVal_) : thresh(thresh_), maxVal(maxVal_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? 0 : maxVal;
}
private:
T thresh;
T maxVal;
};
template <typename T> struct ThreshTrunc
{
ThreshTrunc(T thresh_, T) : thresh(thresh_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return min(src, thresh);
}
private:
T thresh;
};
template <> struct ThreshTrunc<float>
{
ThreshTrunc(float thresh_, float) : thresh(thresh_) {}
__device__ __forceinline__ float operator()(const float& src) const
{
return fmin(src, thresh);
}
private:
float thresh;
};
template <> struct ThreshTrunc<double>
{
ThreshTrunc(double thresh_, double) : thresh(thresh_) {}
__device__ __forceinline__ double operator()(const double& src) const
{
return fmin(src, thresh);
}
private:
double thresh;
};
template <typename T> struct ThreshToZero
{
public:
ThreshToZero(T thresh_, T) : thresh(thresh_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? src : 0;
}
private:
T thresh;
};
template <typename T> struct ThreshToZeroInv
{
public:
ThreshToZeroInv(T thresh_, T) : thresh(thresh_) {}
__device__ __forceinline__ T operator()(const T& src) const
{
return src > thresh ? 0 : src;
}
private:
T thresh;
};
template <template <typename> class Op, typename T>
void threshold_caller(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, T thresh, T maxVal,
cudaStream_t stream)
{
Op<T> op(thresh, maxVal);
transform(src, dst, op, stream);
}
template <typename T>
void threshold_gpu(const DevMem2D& src, const DevMem2D& dst, T thresh, T maxVal, int type,
cudaStream_t stream)
{
typedef void (*caller_t)(const DevMem2D_<T>& src, const DevMem2D_<T>& dst, T thresh, T maxVal,
cudaStream_t stream);
static const caller_t callers[] =
{
threshold_caller<ThreshBinary, T>,
threshold_caller<ThreshBinaryInv, T>,
threshold_caller<ThreshTrunc, T>,
threshold_caller<ThreshToZero, T>,
threshold_caller<ThreshToZeroInv, T>
};
callers[type]((DevMem2D_<T>)src, (DevMem2D_<T>)dst, thresh, maxVal, stream);
}
template void threshold_gpu<uchar>(const DevMem2D& src, const DevMem2D& dst, uchar thresh, uchar maxVal, int type, cudaStream_t stream);
template void threshold_gpu<schar>(const DevMem2D& src, const DevMem2D& dst, schar thresh, schar maxVal, int type, cudaStream_t stream);
template void threshold_gpu<ushort>(const DevMem2D& src, const DevMem2D& dst, ushort thresh, ushort maxVal, int type, cudaStream_t stream);
template void threshold_gpu<short>(const DevMem2D& src, const DevMem2D& dst, short thresh, short maxVal, int type, cudaStream_t stream);
template void threshold_gpu<int>(const DevMem2D& src, const DevMem2D& dst, int thresh, int maxVal, int type, cudaStream_t stream);
template void threshold_gpu<float>(const DevMem2D& src, const DevMem2D& dst, float thresh, float maxVal, int type, cudaStream_t stream);
template void threshold_gpu<double>(const DevMem2D& src, const DevMem2D& dst, double thresh, double maxVal, int type, cudaStream_t stream);
}}}
|
f140104107ead948127ab1738b428e6951b724b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.hpp"
// Create an NxN Identity matrix A on the device
__global__ void identity_kernel(int N, double *A)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i < N && j < N) A[i + N * j] = (i == j)?1.0:0.0;
}
void identity_device(int N, double *A)
{
const int Nx = 8, Ny = 8;
dim3 threads_per_block(Nx, Ny);
dim3 blocks((N + Nx - 1)/Nx, (N + Ny - 1)/Ny);
hipLaunchKernelGGL(( identity_kernel), dim3(blocks), dim3(threads_per_block), 0, 0, N, A);
}
// Add b to the diagonal of NxN matrix A
__global__ void add_diagonal_kernel(int N, double b, double *A)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i < N && j < N && i == j) A[i + N * j] += b;
}
void add_diagonal(int N, double b, double *A)
{
const int Nx = 8, Ny = 8;
dim3 threads_per_block(Nx, Ny);
dim3 blocks((N + Nx - 1)/Nx, (N + Ny - 1)/Ny);
hipLaunchKernelGGL(( add_diagonal_kernel), dim3(blocks), dim3(threads_per_block), 0, 0, N, b, A);
}
// CUDA kernel for summing log diagonal elements of a matrix,
// using hipcub::DeviceReduce
struct LogSq : public thrust::unary_function<double, double>
{
__host__ __device__ double operator()(double x) const { return 2.0 * log(x); }
};
void sum_log_diag(int N, double *A, double *result, double *work, size_t work_size)
{
auto transform_it = thrust::make_transform_iterator(A, LogSq());
auto sr = make_strided_range(transform_it, transform_it + N*N, N+1);
hipcub::DeviceReduce::Sum(work, work_size, sr.begin(), result, N);
}
// Implementation of trace using hipcub::DeviceReduce
void trace(int N, double *A, double *result, double *work, size_t work_size)
{
auto sr = make_strided_range(A, A + N*N, N+1);
hipcub::DeviceReduce::Sum(work, work_size, sr.begin(), result, N);
} | f140104107ead948127ab1738b428e6951b724b8.cu | #include "util.hpp"
// Create an NxN Identity matrix A on the device
__global__ void identity_kernel(int N, double *A)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i < N && j < N) A[i + N * j] = (i == j)?1.0:0.0;
}
void identity_device(int N, double *A)
{
const int Nx = 8, Ny = 8;
dim3 threads_per_block(Nx, Ny);
dim3 blocks((N + Nx - 1)/Nx, (N + Ny - 1)/Ny);
identity_kernel<<<blocks, threads_per_block>>>(N, A);
}
// Add b to the diagonal of NxN matrix A
__global__ void add_diagonal_kernel(int N, double b, double *A)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
if (i < N && j < N && i == j) A[i + N * j] += b;
}
void add_diagonal(int N, double b, double *A)
{
const int Nx = 8, Ny = 8;
dim3 threads_per_block(Nx, Ny);
dim3 blocks((N + Nx - 1)/Nx, (N + Ny - 1)/Ny);
add_diagonal_kernel<<<blocks, threads_per_block>>>(N, b, A);
}
// CUDA kernel for summing log diagonal elements of a matrix,
// using cub::DeviceReduce
struct LogSq : public thrust::unary_function<double, double>
{
__host__ __device__ double operator()(double x) const { return 2.0 * log(x); }
};
void sum_log_diag(int N, double *A, double *result, double *work, size_t work_size)
{
auto transform_it = thrust::make_transform_iterator(A, LogSq());
auto sr = make_strided_range(transform_it, transform_it + N*N, N+1);
cub::DeviceReduce::Sum(work, work_size, sr.begin(), result, N);
}
// Implementation of trace using cub::DeviceReduce
void trace(int N, double *A, double *result, double *work, size_t work_size)
{
auto sr = make_strided_range(A, A + N*N, N+1);
cub::DeviceReduce::Sum(work, work_size, sr.begin(), result, N);
} |
8bb4eab579e24e351758958b344cdffab9ec9131.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017, The OctNet authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "octnet/gpu/pool.h"
#include "octnet/gpu/gpu.h"
#include "octnet/core/z_curve.h"
#include <thrust/execution_policy.h>
#include <cstdio>
#include <cstdlib>
__global__ void kernel_split_by_prob_struct(octree out, int n_blocks, const octree in, const octree prob, const ot_data_t thr) {
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
const ot_tree_t* itree = octree_get_tree(&in, grid_idx);
ot_tree_t* otree = octree_get_tree(&out, grid_idx);
// const ot_data_t* prob_data = prob.data_ptrs[grid_idx];
const ot_data_t* prob_data = octree_get_data(&prob, grid_idx);
if(!tree_isset_bit(itree, 0)) {
int data_idx = tree_data_idx(itree, 0, 1);
if(prob_data[data_idx] >= thr) {
tree_set_bit(otree, 0);
}
}
else {
tree_set_bit(otree, 0);
for(int bit_idx_l1 = 1; bit_idx_l1 < 9; ++bit_idx_l1) {
if(!tree_isset_bit(itree, bit_idx_l1)) {
int data_idx = tree_data_idx(itree, bit_idx_l1, 1);
if(prob_data[data_idx] >= thr) {
tree_set_bit(otree, bit_idx_l1);
}
}
else {
tree_set_bit(otree, bit_idx_l1);
for(int add_bit_idx_l2 = 0; add_bit_idx_l2 < 8; ++add_bit_idx_l2) {
int bit_idx_l2 = tree_child_bit_idx(bit_idx_l1) + add_bit_idx_l2;
if(!tree_isset_bit(itree, bit_idx_l2)) {
int data_idx = tree_data_idx(itree, bit_idx_l2, 1);
if(prob_data[data_idx] >= thr) {
tree_set_bit(otree, bit_idx_l2);
}
}
else {
tree_set_bit(otree, bit_idx_l2);
}
}
}
}
}
}
}
extern "C"
void octree_split_by_prob_gpu(const octree* in, const octree* prob, const ot_data_t thr, bool check, octree* out) {
if(prob->feature_size != 1) {
printf("[ERROR]: split_by_prob - prob feature size != 1 (is %d)\n", prob->feature_size);
exit(-1);
}
if(check && !octree_equal_trees_gpu(in, prob)) {
printf("[ERROR]: split_by_prob - tree structure of inputs do not match\n");
exit(-1);
}
//struct
octree_cpy_scalars(in, out);
octree_resize_as_gpu(in, out);
octree_clr_trees_gpu(out);
int n_blocks = octree_num_blocks(in);
hipLaunchKernelGGL(( kernel_split_by_prob_struct), dim3(GET_BLOCKS(n_blocks)), dim3(CUDA_NUM_THREADS), 0, 0,
*out, n_blocks, *in, *prob, thr
);
CUDA_POST_KERNEL_CHECK;
octree_upd_n_leafs_gpu(out);
octree_resize_as_gpu(out, out);
octree_upd_prefix_leafs_gpu(out);
octree_cpy_sup_to_sub_gpu(in, out);
}
extern "C"
void octree_split_full_gpu(const octree* in, octree* out) {
octree_resize_as_gpu(in, out);
int n_blocks = octree_num_blocks(in);
ot_tree_t val = ~0;
thrust::fill_n(thrust::device, out->trees, n_blocks * N_TREE_INTS, val);
octree_upd_n_leafs_gpu(out);
octree_upd_prefix_leafs_gpu(out);
octree_resize_as_gpu(out, out);
octree_cpy_sup_to_sub_gpu(in, out);
}
__global__ void kernel_split_reconstruction_surface_struct(octree out, int n_leafs, const octree in, const octree rec, const ot_data_t rec_thr_from, const ot_data_t rec_thr_to) {
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
int in_grid_idx = leaf_idx_to_grid_idx(&in, leaf_idx);
const ot_tree_t* in_tree = octree_get_tree(&in, in_grid_idx);
int in_data_idx = leaf_idx - in.prefix_leafs[in_grid_idx];
int in_bit_idx = data_idx_to_bit_idx(in_tree, in_data_idx);
//dense ind of input
int n, in_d,in_h,in_w;
int depth = octree_ind_to_dense_ind(&in, in_grid_idx, in_bit_idx, &n, &in_d,&in_h,&in_w);
if(depth == 3) {
continue;
}
//get ind of rec (halve resolution)
int ds = in_d/2;
int hs = in_h/2;
int ws = in_w/2;
int rec_gd = ds / 8;
int rec_gh = hs / 8;
int rec_gw = ws / 8;
int rec_bd = ds % 8;
int rec_bh = hs % 8;
int rec_bw = ws % 8;
int rec_grid_idx = octree_grid_idx(&rec, n, rec_gd,rec_gh,rec_gw);
const ot_tree_t* rec_tree = octree_get_tree(&rec, rec_grid_idx);
int rec_bit_idx = tree_bit_idx(rec_tree, rec_bd,rec_bh,rec_bw);
//determine leaf state
int data_idx = tree_data_idx(rec_tree, rec_bit_idx, rec.feature_size);
ot_data_t prob = octree_get_data(&rec, rec_grid_idx)[data_idx];
bool leaf_state = prob >= rec_thr_from && prob <= rec_thr_to;
bool other_state = leaf_state;
//check along faces if a different state exists
int width = width_from_depth(depth_from_bit_idx(rec_bit_idx));
// along d
int grid_idx, bit_idx;
for(int fd = 0; fd < 2; ++fd) {
int d = ds + (fd*(width+1)-1);
int h = hs;
int w = ws;
if(leaf_state == other_state && d >= 0 && h >= 0 && w >= 0 && d < 8 * rec.grid_depth && h < 8 * rec.grid_height && w < 8 * rec.grid_width) {
grid_idx = octree_grid_idx(&rec, n, d / 8, h / 8, w / 8);
const ot_tree_t* tree = octree_get_tree(&rec, grid_idx);
ot_data_t* data = octree_get_data(&rec, grid_idx);
int z = 0;
while(leaf_state == other_state && z < width * width) {
int e1 = z_curve_x(z);
int e2 = z_curve_y(z);
h = hs + e1;
w = ws + e2;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, rec.feature_size);
prob = data[data_idx];
other_state = prob >= rec_thr_from && prob <= rec_thr_to;
int data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt = IMIN(width * width - z, data_cnt * data_cnt);
z += data_cnt;
}
}
}
// along h
for(int fh = 0; fh < 2; ++fh) {
int h = hs + (fh*(width+1)-1);
int d = ds;
int w = ws;
if(leaf_state == other_state && d >= 0 && h >= 0 && w >= 0 && d < 8 * rec.grid_depth && h < 8 * rec.grid_height && w < 8 * rec.grid_width) {
grid_idx = octree_grid_idx(&rec, n, d / 8, h / 8, w / 8);
const ot_tree_t* tree = octree_get_tree(&rec, grid_idx);
ot_data_t* data = octree_get_data(&rec, grid_idx);
int z = 0;
while(leaf_state == other_state && z < width * width) {
int e1 = z_curve_x(z);
int e2 = z_curve_y(z);
d = ds + e1;
w = ws + e2;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, rec.feature_size);
prob = data[data_idx];
other_state = prob >= rec_thr_from && prob <= rec_thr_to;
int data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt = IMIN(width * width - z, data_cnt * data_cnt);
z += data_cnt;
}
}
}
// along w
for(int fw = 0; fw < 2; ++fw) {
int w = ws + (fw*(width+1)-1);
int d = ds;
int h = hs;
if(leaf_state == other_state && d >= 0 && h >= 0 && w >= 0 && d < 8 * rec.grid_depth && h < 8 * rec.grid_height && w < 8 * rec.grid_width) {
grid_idx = octree_grid_idx(&rec, n, d / 8, h / 8, w / 8);
const ot_tree_t* tree = octree_get_tree(&rec, grid_idx);
ot_data_t* data = octree_get_data(&rec, grid_idx);
int z = 0;
while(leaf_state == other_state && z < width * width) {
int e1 = z_curve_x(z);
int e2 = z_curve_y(z);
d = ds + e2;
h = hs + e1;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, rec.feature_size);
prob = data[data_idx];
other_state = prob >= rec_thr_from && prob <= rec_thr_to;
int data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt = IMIN(width * width - z, data_cnt * data_cnt);
z += data_cnt;
}
}
}
// if state change occured, then split leaf (for now full split - full split of shallow octree)
if(leaf_state != other_state) {
ot_tree_t* out_tree = octree_get_tree(&out, in_grid_idx);
for(int tree_idx = 0; tree_idx < N_TREE_INTS; ++tree_idx) {
out_tree[tree_idx] = ~0;
}
}
}
}
extern "C"
void octree_split_reconstruction_surface_gpu(const octree* in, const octree* rec, ot_data_t rec_thr_from, ot_data_t rec_thr_to, octree* out) {
if(rec->feature_size != 1) {
printf("[ERROR] split_reconstruction_surface - feature size of rec has to be 1\n");
exit(-1);
}
if(in->n != rec->n || in->grid_depth/2 != rec->grid_depth || in->grid_height/2 != rec->grid_height || in->grid_width/2 != rec->grid_width) {
printf("[ERROR] split_reconstruction_surface - shape of in and rec are not compatible\n");
exit(-1);
}
octree_resize_as_gpu(in, out);
octree_cpy_trees_gpu_gpu(in, out);
hipLaunchKernelGGL(( kernel_split_reconstruction_surface_struct), dim3(GET_BLOCKS(in->n_leafs)), dim3(CUDA_NUM_THREADS), 0, 0,
*out, in->n_leafs, *in, *rec, rec_thr_from, rec_thr_to
);
CUDA_POST_KERNEL_CHECK;
octree_upd_n_leafs_gpu(out);
octree_resize_as_gpu(out, out);
octree_upd_prefix_leafs_gpu(out);
octree_cpy_sup_to_sub_gpu(in, out);
}
extern "C"
void octree_split_bwd_gpu(const octree* in, const octree* grad_out, octree* grad_in) {
octree_cpy_scalars(in, grad_in);
octree_resize_as_gpu(in, grad_in);
octree_cpy_trees_gpu_gpu(in, grad_in);
octree_cpy_prefix_leafs_gpu_gpu(in, grad_in);
octree_cpy_sub_to_sup_sum_gpu(grad_out, grad_in);
}
// __global__ void kernel_split_dense_reconstruction_surface_struct_surf(octree out, ot_size_t n_blocks, const ot_data_t* reconstruction, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_data_t rec_thr_from, ot_data_t rec_thr_to) {
// CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
// ot_tree_t* tree = octree_get_tree(&out, grid_idx);
// int n,gd,gh,gw;
// octree_split_grid_idx(&out, grid_idx, &n,&gd,&gh,&gw);
// for(int idx = 0; idx < 8*8*8; ++idx) {
// int bw = idx % 8;
// int bh = ((idx - bw) / 8) % 8;
// int bd = idx / (8 * 8);
// int bit_idx_l3 = tree_bit_idx_(bd,bh,bw);
// int bit_idx_l2 = tree_parent_bit_idx(bit_idx_l3);
// int bit_idx_l1 = tree_parent_bit_idx(bit_idx_l2);
// int d = (gd * 8 + bd) / 2;
// int h = (gh * 8 + bh) / 2;
// int w = (gw * 8 + bw) / 2;
// int rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
// bool occ_c = reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
// bool differ = false;
// for(int off = 0; off < 3*3*3 && !differ; ++off) {
// if(off == 13) continue;
// int off_w = off % 3;
// int off_h = ((off - off_w) / 3) % 3;
// int off_d = off / (3*3);
// d = ( gd * 8 + bd + (off_d - 1) ) / 2;
// h = ( gh * 8 + bh + (off_h - 1) ) / 2;
// w = ( gw * 8 + bw + (off_w - 1) ) / 2;
// rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
// bool in_bounds = d>=0 && h>=0 && w>=0 && d<dense_depth && h<dense_height && w<dense_width;
// bool occ = in_bounds && reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
// differ = differ || (in_bounds && (occ_c != occ));
// }
// if(differ) {
// tree_set_bit(tree, 0);
// tree_set_bit(tree, bit_idx_l1);
// tree_set_bit(tree, bit_idx_l2);
// }
// }
// }
// }
__global__ void kernel_split_dense_reconstruction_surface_struct_surf(octree out, ot_size_t n_blocks, const ot_data_t* reconstruction, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_data_t rec_thr_from, ot_data_t rec_thr_to) {
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
ot_tree_t* tree = octree_get_tree(&out, grid_idx);
int n,gd,gh,gw;
octree_split_grid_idx(&out, grid_idx, &n,&gd,&gh,&gw);
for(int idx = 0; idx < 8*8*8; ++idx) {
int bw = idx % 8;
int bh = ((idx - bw) / 8) % 8;
int bd = idx / (8 * 8);
int bit_idx_l3 = tree_bit_idx_(bd,bh,bw);
int bit_idx_l2 = tree_parent_bit_idx(bit_idx_l3);
int bit_idx_l1 = tree_parent_bit_idx(bit_idx_l2);
int d = (gd * 8 + bd) / 2;
int h = (gh * 8 + bh) / 2;
int w = (gw * 8 + bw) / 2;
int rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
bool differ = reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
if(differ) {
tree_set_bit(tree, 0);
tree_set_bit(tree, bit_idx_l1);
tree_set_bit(tree, bit_idx_l2);
}
}
}
}
// __global__ void kernel_split_dense_reconstruction_surface_struct_oct(octree out, ot_size_t n_blocks, const ot_data_t* reconstruction, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_data_t rec_thr_from, ot_data_t rec_thr_to) {
// CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
// ot_tree_t* tree = octree_get_tree(&out, grid_idx);
// int n,gd,gh,gw;
// octree_split_grid_idx(&out, grid_idx, &n,&gd,&gh,&gw);
// bool differ = false;
// for(int idx = 0; idx < 8*8*8 && !differ; ++idx) {
// int bw = idx % 8;
// int bh = ((idx - bw) / 8) % 8;
// int bd = idx / (8 * 8);
// int bit_idx_l3 = tree_bit_idx_(bd,bh,bw);
// int bit_idx_l2 = tree_parent_bit_idx(bit_idx_l3);
// int bit_idx_l1 = tree_parent_bit_idx(bit_idx_l2);
// int d = (gd * 8 + bd) / 2;
// int h = (gh * 8 + bh) / 2;
// int w = (gw * 8 + bw) / 2;
// int rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
// bool occ_c = reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
// for(int off = 0; off < 3*3*3 && !differ; ++off) {
// if(off == 13) continue;
// int off_w = off % 3;
// int off_h = ((off - off_w) / 3) % 3;
// int off_d = off / (3*3);
// d = ( gd * 8 + bd + (off_d - 1) ) / 2;
// h = ( gh * 8 + bh + (off_h - 1) ) / 2;
// w = ( gw * 8 + bw + (off_w - 1) ) / 2;
// rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
// bool in_bounds = d>=0 && h>=0 && w>=0 && d<dense_depth && h<dense_height && w<dense_width;
// bool occ = in_bounds && reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
// differ = differ || (in_bounds && (occ_c != occ));
// }
// }
// if(differ) {
// for(int tree_idx = 0; tree_idx < N_TREE_INTS; ++tree_idx) {
// tree[tree_idx] = ~0;
// }
// }
// }
// }
__global__ void kernel_split_dense_reconstruction_surface_struct_oct(octree out, ot_size_t n_blocks, const ot_data_t* reconstruction, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_data_t rec_thr_from, ot_data_t rec_thr_to) {
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
ot_tree_t* tree = octree_get_tree(&out, grid_idx);
int n,gd,gh,gw;
octree_split_grid_idx(&out, grid_idx, &n,&gd,&gh,&gw);
bool differ = false;
for(int idx = 0; idx < 8*8*8 && !differ; ++idx) {
int bw = idx % 8;
int bh = ((idx - bw) / 8) % 8;
int bd = idx / (8 * 8);
int bit_idx_l3 = tree_bit_idx_(bd,bh,bw);
int bit_idx_l2 = tree_parent_bit_idx(bit_idx_l3);
int bit_idx_l1 = tree_parent_bit_idx(bit_idx_l2);
int d = (gd * 8 + bd) / 2;
int h = (gh * 8 + bh) / 2;
int w = (gw * 8 + bw) / 2;
int rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
bool differ = reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
if(differ) {
for(int tree_idx = 0; tree_idx < N_TREE_INTS; ++tree_idx) {
tree[tree_idx] = ~0;
}
}
}
}
}
__global__ void kernel_split_dense_reconstruction_surface_data(octree out, ot_size_t n_leafs, const ot_data_t* features, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_size_t feature_size) {
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
const int grid_idx = out.data[leaf_idx * out.feature_size];
const ot_tree_t* tree = octree_get_tree(&out, grid_idx);
const int data_idx = leaf_idx - out.prefix_leafs[grid_idx];
const int bit_idx = data_idx_to_bit_idx(tree, data_idx);
int n,d,h,w;
const int cell_depth = octree_ind_to_dense_ind(&out, grid_idx, bit_idx, &n, &d,&h,&w);
const int cell_width = width_from_depth(cell_depth);
const int cell_width3 = cell_width * cell_width * cell_width;
ot_data_t* out_data = octree_get_data(&out, grid_idx) + data_idx * out.feature_size;
for(int f = 0; f < feature_size; ++f) {
ot_data_t val = 0;
for(int idx = 0; idx < cell_width3; ++idx) {
int idx_w = idx % cell_width;
int idx_h = ((idx - idx_w) / cell_width) % cell_width;
int idx_d = idx / (cell_width*cell_width);
int dense_w = (w + idx_w) / 2;
int dense_h = (h + idx_h) / 2;
int dense_d = (d + idx_d) / 2;
val += features[(((n * feature_size + f) * dense_depth + dense_d) * dense_height + dense_h) * dense_width + dense_w];
}
val /= cell_width3;
out_data[f] = val;
}
}
}
extern "C"
void octree_split_dense_reconstruction_surface_gpu(const ot_data_t* features, const ot_data_t* reconstruction, int n, int dense_depth, int dense_height, int dense_width, int feature_size, ot_data_t rec_thr_from, ot_data_t rec_thr_to, int structure_type, octree* out) {
if(dense_depth % 4 != 0 || dense_height % 4 != 0 || dense_width % 4 != 0) {
printf("[ERROR] octree_split_dense_reconstruction_surface_gpu - dense dims has to be a factor of 4\n");
exit(-1);
}
octree_resize_gpu(n, dense_depth/4, dense_height/4, dense_width/4, feature_size, 0, out);
int n_blocks = octree_num_blocks(out);
// compute structure
if(structure_type == 0) {
// printf("[INFO] use full split\n");
ot_tree_t val = ~0;
thrust::fill_n(thrust::device, out->trees, n_blocks * N_TREE_INTS, val);
}
else if(structure_type == 1) {
// printf("[INFO] use surface split\n");
octree_clr_trees_gpu(out);
hipLaunchKernelGGL(( kernel_split_dense_reconstruction_surface_struct_surf), dim3(GET_BLOCKS(n_blocks)), dim3(CUDA_NUM_THREADS), 0, 0,
*out, n_blocks, reconstruction, dense_depth, dense_height, dense_width, rec_thr_from, rec_thr_to
);
CUDA_POST_KERNEL_CHECK;
}
else if(structure_type == 2) {
// printf("[INFO] use octant split\n");
octree_clr_trees_gpu(out);
hipLaunchKernelGGL(( kernel_split_dense_reconstruction_surface_struct_oct), dim3(GET_BLOCKS(n_blocks)), dim3(CUDA_NUM_THREADS), 0, 0,
*out, n_blocks, reconstruction, dense_depth, dense_height, dense_width, rec_thr_from, rec_thr_to
);
CUDA_POST_KERNEL_CHECK;
}
else {
printf("[ERROR] unknown structure_type in octree_split_dense_reconstruction_surface_gpu\n");
exit(-1);
}
octree_upd_n_leafs_gpu(out);
octree_upd_prefix_leafs_gpu(out);
octree_resize_as_gpu(out, out);
// copy features
octree_leaf_idx_to_grid_idx_gpu(out, out->feature_size, out->data_capacity, out->data);
hipLaunchKernelGGL(( kernel_split_dense_reconstruction_surface_data), dim3(GET_BLOCKS(out->n_leafs)), dim3(CUDA_NUM_THREADS), 0, 0,
*out, out->n_leafs, features, dense_depth, dense_height, dense_width, feature_size
);
CUDA_POST_KERNEL_CHECK;
}
__global__ void kernel_split_dense_reconstruction_surface_bwd(ot_data_t* grad_in, ot_size_t n_voxels, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_size_t feature_size, octree const grad_out) {
CUDA_KERNEL_LOOP(vx_idx, n_voxels) {
int n = vx_idx / (dense_depth * dense_height * dense_width);
int w_lr = vx_idx % dense_width;
int h_lr = ((vx_idx - w_lr) / dense_width) % dense_height;
int d_lr = ((((vx_idx - w_lr) / dense_width) - h_lr) / dense_height) % dense_depth;
for(int f = 0; f < feature_size; ++f) {
int grad_in_idx = (((n * feature_size + f) * dense_depth + d_lr) * dense_height + h_lr) * dense_width + w_lr;
grad_in[grad_in_idx] = 0;
}
for(int up = 0; up < 2*2*2; ++up) {
int up_w = up % 2;
int w_hr = w_lr + up_w;
int h_hr = h_lr + ((up - up_w) / 2) % 2;
int d_hr = d_lr + up / (2 * 2);
int bd = d_hr % 8;
int bh = h_hr % 8;
int bw = w_hr % 8;
int gd = d_hr / 8;
int gh = h_hr / 8;
int gw = w_hr / 8;
int grid_idx = octree_grid_idx(&grad_out, n, gd, gh, gw);
const ot_tree_t* tree = octree_get_tree(&grad_out, grid_idx);
int bit_idx = tree_bit_idx(tree, bd, bh, bw);
int data_idx = tree_data_idx(tree, bit_idx, grad_out.feature_size);
const ot_data_t* grad_out_data = octree_get_data(&grad_out, grid_idx) + data_idx;
for(int f = 0; f < feature_size; ++f) {
int grad_in_idx = (((n * feature_size + f) * dense_depth + d_lr) * dense_height + h_lr) * dense_width + w_lr;
grad_in[grad_in_idx] += grad_out_data[f];
}
}
}
}
extern "C"
void octree_split_dense_reconstruction_surface_bwd_gpu(const octree* grad_out, ot_data_t* grad_in) {
int dense_depth = 4 * grad_out->grid_depth;
int dense_height = 4 * grad_out->grid_height;
int dense_width = 4 * grad_out->grid_width;
int n_voxels = grad_out->n * dense_depth * dense_height * dense_width;
hipLaunchKernelGGL(( kernel_split_dense_reconstruction_surface_bwd), dim3(GET_BLOCKS(n_voxels)), dim3(CUDA_NUM_THREADS), 0, 0,
grad_in, n_voxels, dense_depth, dense_height, dense_width, grad_out->feature_size, *grad_out
);
CUDA_POST_KERNEL_CHECK;
}
__global__ void kernel_octree_split_dense_reconstruction_surface_fres_struct(octree out, ot_size_t n_blocks, const ot_data_t* reconstruction, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_size_t feature_size, float rec_thr_from, float rec_thr_to, int band) {
int band_width = band * 2 + 1;
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
ot_tree_t* tree = octree_get_tree(&out, grid_idx);
int n,gd,gh,gw;
octree_split_grid_idx(&out, grid_idx, &n,&gd,&gh,&gw);
for(int idx = 0; idx < 8*8*8; ++idx) {
bool differ = false;
int bw = idx % 8;
int bh = ((idx - bw) / 8) % 8;
int bd = idx / (8 * 8);
int bit_idx_l3 = tree_bit_idx_(bd,bh,bw);
int bit_idx_l2 = tree_parent_bit_idx(bit_idx_l3);
int bit_idx_l1 = tree_parent_bit_idx(bit_idx_l2);
int dc = (gd * 8 + bd);
int hc = (gh * 8 + bh);
int wc = (gw * 8 + bw);
for(int off = 0; off < band_width*band_width*band_width && !differ; ++off) {
int off_w = off % band_width;
int off_h = ((off - off_w) / band_width) % band_width;
int off_d = off / (band_width*band_width);
int doff = dc + (off_d - band);
int hoff = hc + (off_h - band);
int woff = wc + (off_w - band);
bool in_bounds = doff >= 0 && hoff >= 0 && woff >= 0 && doff < dense_depth && hoff < dense_height && woff < dense_width;
if(!in_bounds) continue;
int rec_idx = ((n * dense_depth + doff) * dense_height + hoff) * dense_width + woff;
bool occ_c = reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
if(!occ_c) continue;
for(int nb = 0; nb < 3*3*3 && !differ; ++nb) {
if(nb == 13) continue;
int nb_w = nb % 3;
int nb_h = ((nb - nb_w) / 3) % 3;
int nb_d = nb / (3*3);
int d = doff + nb_d - 1;
int h = hoff + nb_h - 1;
int w = woff + nb_w - 1;
rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
bool in_bounds = d >= 0 && h >= 0 && w >= 0 && d < dense_depth && h < dense_height && w < dense_width;
bool occ = in_bounds && reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
differ = differ || (in_bounds && (occ_c != occ));
}
}
if(differ) {
tree_set_bit(tree, 0);
tree_set_bit(tree, bit_idx_l1);
tree_set_bit(tree, bit_idx_l2);
}
}
}
}
__global__ void kernel_octree_split_dense_reconstruction_surface_fres_data(octree out, ot_size_t n_leafs, const ot_data_t* features, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_size_t feature_size) {
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
// const int grid_idx = out.data[leaf_idx * out->feature_size];
const int grid_idx = leaf_idx_to_grid_idx(&out, leaf_idx);
const ot_tree_t* tree = octree_get_tree(&out, grid_idx);
const int data_idx = leaf_idx - out.prefix_leafs[grid_idx];
const int bit_idx = data_idx_to_bit_idx(tree, data_idx);
int n,d,h,w;
const int cell_depth = octree_ind_to_dense_ind(&out, grid_idx, bit_idx, &n, &d,&h,&w);
const int cell_width = width_from_depth(cell_depth);
const int cell_width3 = cell_width * cell_width * cell_width;
ot_data_t* out_data = octree_get_data(&out, grid_idx) + data_idx * out.feature_size;
for(int f = 0; f < feature_size; ++f) {
ot_data_t val = 0;
for(int idx = 0; idx < cell_width3; ++idx) {
int idx_w = idx % cell_width;
int idx_h = ((idx - idx_w) / cell_width) % cell_width;
int idx_d = idx / (cell_width*cell_width);
int dense_w = (w + idx_w);
int dense_h = (h + idx_h);
int dense_d = (d + idx_d);
ot_data_t feat = features[(((n * feature_size + f) * dense_depth + dense_d) * dense_height + dense_h) * dense_width + dense_w];
val += feat;
}
val /= cell_width3;
out_data[f] = val;
}
}
}
extern "C"
void octree_split_dense_reconstruction_surface_fres_gpu(const ot_data_t* features, const ot_data_t* reconstruction, int n, int dense_depth, int dense_height, int dense_width, int feature_size, ot_data_t rec_thr_from, ot_data_t rec_thr_to, int band, octree* out) {
if(dense_depth % 8 != 0 || dense_height % 8 != 0 || dense_width % 8 != 0) {
printf("[ERROR] octrecpue_split_dense_reconstruction_surface_fres_gpu - dense dims has to be a factor of 8\n");
exit(-1);
}
octree_resize_gpu(n, dense_depth/8, dense_height/8, dense_width/8, feature_size, 0, out);
octree_clr_trees_gpu(out);
int n_blocks = octree_num_blocks(out);
hipLaunchKernelGGL(( kernel_octree_split_dense_reconstruction_surface_fres_struct), dim3(GET_BLOCKS(n_blocks)), dim3(CUDA_NUM_THREADS), 0, 0,
*out, n_blocks, reconstruction, dense_depth, dense_height, dense_width, feature_size, rec_thr_from, rec_thr_to, band
);
CUDA_POST_KERNEL_CHECK;
octree_upd_n_leafs_gpu(out);
octree_upd_prefix_leafs_gpu(out);
octree_resize_as_gpu(out, out);
hipLaunchKernelGGL(( kernel_octree_split_dense_reconstruction_surface_fres_data), dim3(GET_BLOCKS(n_blocks)), dim3(CUDA_NUM_THREADS), 0, 0,
*out, out->n_leafs, features, dense_depth, dense_height, dense_width, feature_size
);
CUDA_POST_KERNEL_CHECK;
}
__global__ void kernel_octree_split_dense_reconstruction_surface_fres_bwd(ot_data_t* grad_in, ot_size_t n_voxels, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_size_t feature_size, octree const grad_out) {
CUDA_KERNEL_LOOP(vx_idx, n_voxels) {
int n = vx_idx / (dense_depth * dense_height * dense_width);
int w = vx_idx % dense_width;
int h = ((vx_idx - w) / dense_width) % dense_height;
int d = ((((vx_idx - w) / dense_width) - h) / dense_height) % dense_depth;
for(int f = 0; f < feature_size; ++f) {
int grad_in_idx = (((n * feature_size + f) * dense_depth + d) * dense_height + h) * dense_width + w;
grad_in[grad_in_idx] = 0;
}
int bd = d % 8;
int bh = h % 8;
int bw = w % 8;
int gd = d / 8;
int gh = h / 8;
int gw = w / 8;
int grid_idx = octree_grid_idx(&grad_out, n, gd, gh, gw);
const ot_tree_t* tree = octree_get_tree(&grad_out, grid_idx);
int bit_idx = tree_bit_idx(tree, bd, bh, bw);
int data_idx = tree_data_idx(tree, bit_idx, feature_size);
const ot_data_t* grad_out_data = octree_get_data(&grad_out, grid_idx) + data_idx;
for(int f = 0; f < feature_size; ++f) {
int grad_in_idx = (((n * feature_size + f) * dense_depth + d) * dense_height + h) * dense_width + w;
grad_in[grad_in_idx] += grad_out_data[f];
}
}
}
extern "C"
void octree_split_dense_reconstruction_surface_fres_bwd_gpu(const octree* grad_out, ot_data_t* grad_in) {
int dense_depth = 8 * grad_out->grid_depth;
int dense_height = 8 * grad_out->grid_height;
int dense_width = 8 * grad_out->grid_width;
int feature_size = grad_out->feature_size;
int n_voxels = grad_out->n * dense_depth * dense_height * dense_width;
hipLaunchKernelGGL(( kernel_octree_split_dense_reconstruction_surface_fres_bwd), dim3(GET_BLOCKS(n_voxels)), dim3(CUDA_NUM_THREADS), 0, 0,
grad_in, n_voxels, dense_depth, dense_height, dense_width, feature_size, *grad_out
);
CUDA_POST_KERNEL_CHECK;
}
__global__ void kernel_split_tsdf(octree out, ot_size_t n_blocks, const ot_data_t* reconstruction, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, int band) {
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
ot_tree_t* tree = octree_get_tree(&out, grid_idx);
for(int tree_idx = 0; tree_idx < N_TREE_INTS; ++tree_idx) {
tree[tree_idx] = 0;
}
const int band_width = 2*band+1;
const int band_width_center = (band_width * band_width * band_width - 1) / 2;
int n,gd,gh,gw;
octree_split_grid_idx(&out, grid_idx, &n,&gd,&gh,&gw);
for(int idx = 0; idx < 8*8*8; ++idx) {
int bw = idx % 8;
int bh = ((idx - bw) / 8) % 8;
int bd = idx / (8 * 8);
int bit_idx_l3 = tree_bit_idx_(bd,bh,bw);
int bit_idx_l2 = tree_parent_bit_idx(bit_idx_l3);
int bit_idx_l1 = tree_parent_bit_idx(bit_idx_l2);
int d = (gd * 8 + bd) / 2;
int h = (gh * 8 + bh) / 2;
int w = (gw * 8 + bw) / 2;
int rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
bool pos = reconstruction[rec_idx] >= 0;
bool differ = false;
for(int off = 0; off < band_width*band_width*band_width && !differ; ++off) {
if(off == band_width_center) continue;
int off_w = off % band_width;
int off_h = ((off - off_w) / band_width) % band_width;
int off_d = off / (band_width*band_width);
d = ( gd * 8 + bd + (off_d - band) ) / 2;
h = ( gh * 8 + bh + (off_h - band) ) / 2;
w = ( gw * 8 + bw + (off_w - band) ) / 2;
rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
bool in_bounds = d>=0 && h>=0 && w>=0 && d<dense_depth && h<dense_height && w<dense_width;
bool pos_nb = in_bounds && reconstruction[rec_idx] >= 0;
differ = differ || (in_bounds && (pos != pos_nb));
}
if(differ) {
// if(grid_idx == 1) printf("%d, %d, %d,%d,%d, %d,%d,%d\n", grid_idx, idx, bd,bh,bw, bit_idx_l1,bit_idx_l2,bit_idx_l3);
tree_set_bit(tree, 0);
tree_set_bit(tree, bit_idx_l1);
tree_set_bit(tree, bit_idx_l2);
}
}
}
}
__global__ void kernel_split_tsdf_or_trees(ot_tree_t* out_trees, const int n_tree_ints, const ot_tree_t* guide_trees) {
CUDA_KERNEL_LOOP(idx, n_tree_ints) {
out_trees[idx] |= guide_trees[idx];
}
}
extern "C"
void octree_split_tsdf_gpu(const ot_data_t* features, const ot_data_t* reconstruction, const octree* guide, int n, int dense_depth, int dense_height, int dense_width, int feature_size, int band, octree* out) {
if(dense_depth % 4 != 0 || dense_height % 4 != 0 || dense_width % 4 != 0) {
printf("[ERROR] octree_split_tsdf_gpu - dense dims has to be a factor of 4\n");
exit(-1);
}
if(guide != 0 && (guide->n != n || 4*guide->grid_depth != dense_depth || 4*guide->grid_height != dense_height || 4*guide->grid_width != dense_width)) {
printf("[ERROR] octree_split_tsdf_gpu - dense dims not compatible with guide\n");
exit(-1);
}
octree_resize_gpu(n, dense_depth/4, dense_height/4, dense_width/4, feature_size, 0, out);
int n_blocks = octree_num_blocks(out);
// octree_clr_trees_gpu(out);
hipLaunchKernelGGL(( kernel_split_tsdf), dim3(GET_BLOCKS(n_blocks)), dim3(CUDA_NUM_THREADS), 0, 0,
*out, n_blocks, reconstruction, dense_depth, dense_height, dense_width, band
);
CUDA_POST_KERNEL_CHECK;
// octree_upd_n_leafs_gpu(out);
// printf("[INFO] after split_tsdf=%d\n", out->n_leafs);
if(guide != 0) {
int n_tree_ints = N_TREE_INTS * n_blocks;
hipLaunchKernelGGL(( kernel_split_tsdf_or_trees), dim3(GET_BLOCKS(n_tree_ints)), dim3(CUDA_NUM_THREADS), 0, 0,
out->trees, n_tree_ints, guide->trees
);
CUDA_POST_KERNEL_CHECK;
// octree_upd_n_leafs_gpu(out);
// printf("[INFO] after guide=%d\n", out->n_leafs);
}
octree_upd_n_leafs_gpu(out);
octree_upd_prefix_leafs_gpu(out);
octree_resize_as_gpu(out, out);
// copy features
octree_leaf_idx_to_grid_idx_gpu(out, out->feature_size, out->data_capacity, out->data);
hipLaunchKernelGGL(( kernel_split_dense_reconstruction_surface_data), dim3(GET_BLOCKS(out->n_leafs)), dim3(CUDA_NUM_THREADS), 0, 0,
*out, out->n_leafs, features, dense_depth, dense_height, dense_width, feature_size
);
CUDA_POST_KERNEL_CHECK;
}
| 8bb4eab579e24e351758958b344cdffab9ec9131.cu | // Copyright (c) 2017, The OctNet authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "octnet/gpu/pool.h"
#include "octnet/gpu/gpu.h"
#include "octnet/core/z_curve.h"
#include <thrust/execution_policy.h>
#include <cstdio>
#include <cstdlib>
__global__ void kernel_split_by_prob_struct(octree out, int n_blocks, const octree in, const octree prob, const ot_data_t thr) {
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
const ot_tree_t* itree = octree_get_tree(&in, grid_idx);
ot_tree_t* otree = octree_get_tree(&out, grid_idx);
// const ot_data_t* prob_data = prob.data_ptrs[grid_idx];
const ot_data_t* prob_data = octree_get_data(&prob, grid_idx);
if(!tree_isset_bit(itree, 0)) {
int data_idx = tree_data_idx(itree, 0, 1);
if(prob_data[data_idx] >= thr) {
tree_set_bit(otree, 0);
}
}
else {
tree_set_bit(otree, 0);
for(int bit_idx_l1 = 1; bit_idx_l1 < 9; ++bit_idx_l1) {
if(!tree_isset_bit(itree, bit_idx_l1)) {
int data_idx = tree_data_idx(itree, bit_idx_l1, 1);
if(prob_data[data_idx] >= thr) {
tree_set_bit(otree, bit_idx_l1);
}
}
else {
tree_set_bit(otree, bit_idx_l1);
for(int add_bit_idx_l2 = 0; add_bit_idx_l2 < 8; ++add_bit_idx_l2) {
int bit_idx_l2 = tree_child_bit_idx(bit_idx_l1) + add_bit_idx_l2;
if(!tree_isset_bit(itree, bit_idx_l2)) {
int data_idx = tree_data_idx(itree, bit_idx_l2, 1);
if(prob_data[data_idx] >= thr) {
tree_set_bit(otree, bit_idx_l2);
}
}
else {
tree_set_bit(otree, bit_idx_l2);
}
}
}
}
}
}
}
extern "C"
void octree_split_by_prob_gpu(const octree* in, const octree* prob, const ot_data_t thr, bool check, octree* out) {
if(prob->feature_size != 1) {
printf("[ERROR]: split_by_prob - prob feature size != 1 (is %d)\n", prob->feature_size);
exit(-1);
}
if(check && !octree_equal_trees_gpu(in, prob)) {
printf("[ERROR]: split_by_prob - tree structure of inputs do not match\n");
exit(-1);
}
//struct
octree_cpy_scalars(in, out);
octree_resize_as_gpu(in, out);
octree_clr_trees_gpu(out);
int n_blocks = octree_num_blocks(in);
kernel_split_by_prob_struct<<<GET_BLOCKS(n_blocks), CUDA_NUM_THREADS>>>(
*out, n_blocks, *in, *prob, thr
);
CUDA_POST_KERNEL_CHECK;
octree_upd_n_leafs_gpu(out);
octree_resize_as_gpu(out, out);
octree_upd_prefix_leafs_gpu(out);
octree_cpy_sup_to_sub_gpu(in, out);
}
extern "C"
void octree_split_full_gpu(const octree* in, octree* out) {
octree_resize_as_gpu(in, out);
int n_blocks = octree_num_blocks(in);
ot_tree_t val = ~0;
thrust::fill_n(thrust::device, out->trees, n_blocks * N_TREE_INTS, val);
octree_upd_n_leafs_gpu(out);
octree_upd_prefix_leafs_gpu(out);
octree_resize_as_gpu(out, out);
octree_cpy_sup_to_sub_gpu(in, out);
}
__global__ void kernel_split_reconstruction_surface_struct(octree out, int n_leafs, const octree in, const octree rec, const ot_data_t rec_thr_from, const ot_data_t rec_thr_to) {
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
int in_grid_idx = leaf_idx_to_grid_idx(&in, leaf_idx);
const ot_tree_t* in_tree = octree_get_tree(&in, in_grid_idx);
int in_data_idx = leaf_idx - in.prefix_leafs[in_grid_idx];
int in_bit_idx = data_idx_to_bit_idx(in_tree, in_data_idx);
//dense ind of input
int n, in_d,in_h,in_w;
int depth = octree_ind_to_dense_ind(&in, in_grid_idx, in_bit_idx, &n, &in_d,&in_h,&in_w);
if(depth == 3) {
continue;
}
//get ind of rec (halve resolution)
int ds = in_d/2;
int hs = in_h/2;
int ws = in_w/2;
int rec_gd = ds / 8;
int rec_gh = hs / 8;
int rec_gw = ws / 8;
int rec_bd = ds % 8;
int rec_bh = hs % 8;
int rec_bw = ws % 8;
int rec_grid_idx = octree_grid_idx(&rec, n, rec_gd,rec_gh,rec_gw);
const ot_tree_t* rec_tree = octree_get_tree(&rec, rec_grid_idx);
int rec_bit_idx = tree_bit_idx(rec_tree, rec_bd,rec_bh,rec_bw);
//determine leaf state
int data_idx = tree_data_idx(rec_tree, rec_bit_idx, rec.feature_size);
ot_data_t prob = octree_get_data(&rec, rec_grid_idx)[data_idx];
bool leaf_state = prob >= rec_thr_from && prob <= rec_thr_to;
bool other_state = leaf_state;
//check along faces if a different state exists
int width = width_from_depth(depth_from_bit_idx(rec_bit_idx));
// along d
int grid_idx, bit_idx;
for(int fd = 0; fd < 2; ++fd) {
int d = ds + (fd*(width+1)-1);
int h = hs;
int w = ws;
if(leaf_state == other_state && d >= 0 && h >= 0 && w >= 0 && d < 8 * rec.grid_depth && h < 8 * rec.grid_height && w < 8 * rec.grid_width) {
grid_idx = octree_grid_idx(&rec, n, d / 8, h / 8, w / 8);
const ot_tree_t* tree = octree_get_tree(&rec, grid_idx);
ot_data_t* data = octree_get_data(&rec, grid_idx);
int z = 0;
while(leaf_state == other_state && z < width * width) {
int e1 = z_curve_x(z);
int e2 = z_curve_y(z);
h = hs + e1;
w = ws + e2;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, rec.feature_size);
prob = data[data_idx];
other_state = prob >= rec_thr_from && prob <= rec_thr_to;
int data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt = IMIN(width * width - z, data_cnt * data_cnt);
z += data_cnt;
}
}
}
// along h
for(int fh = 0; fh < 2; ++fh) {
int h = hs + (fh*(width+1)-1);
int d = ds;
int w = ws;
if(leaf_state == other_state && d >= 0 && h >= 0 && w >= 0 && d < 8 * rec.grid_depth && h < 8 * rec.grid_height && w < 8 * rec.grid_width) {
grid_idx = octree_grid_idx(&rec, n, d / 8, h / 8, w / 8);
const ot_tree_t* tree = octree_get_tree(&rec, grid_idx);
ot_data_t* data = octree_get_data(&rec, grid_idx);
int z = 0;
while(leaf_state == other_state && z < width * width) {
int e1 = z_curve_x(z);
int e2 = z_curve_y(z);
d = ds + e1;
w = ws + e2;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, rec.feature_size);
prob = data[data_idx];
other_state = prob >= rec_thr_from && prob <= rec_thr_to;
int data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt = IMIN(width * width - z, data_cnt * data_cnt);
z += data_cnt;
}
}
}
// along w
for(int fw = 0; fw < 2; ++fw) {
int w = ws + (fw*(width+1)-1);
int d = ds;
int h = hs;
if(leaf_state == other_state && d >= 0 && h >= 0 && w >= 0 && d < 8 * rec.grid_depth && h < 8 * rec.grid_height && w < 8 * rec.grid_width) {
grid_idx = octree_grid_idx(&rec, n, d / 8, h / 8, w / 8);
const ot_tree_t* tree = octree_get_tree(&rec, grid_idx);
ot_data_t* data = octree_get_data(&rec, grid_idx);
int z = 0;
while(leaf_state == other_state && z < width * width) {
int e1 = z_curve_x(z);
int e2 = z_curve_y(z);
d = ds + e2;
h = hs + e1;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, rec.feature_size);
prob = data[data_idx];
other_state = prob >= rec_thr_from && prob <= rec_thr_to;
int data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt = IMIN(width * width - z, data_cnt * data_cnt);
z += data_cnt;
}
}
}
// if state change occured, then split leaf (for now full split - full split of shallow octree)
if(leaf_state != other_state) {
ot_tree_t* out_tree = octree_get_tree(&out, in_grid_idx);
for(int tree_idx = 0; tree_idx < N_TREE_INTS; ++tree_idx) {
out_tree[tree_idx] = ~0;
}
}
}
}
extern "C"
void octree_split_reconstruction_surface_gpu(const octree* in, const octree* rec, ot_data_t rec_thr_from, ot_data_t rec_thr_to, octree* out) {
if(rec->feature_size != 1) {
printf("[ERROR] split_reconstruction_surface - feature size of rec has to be 1\n");
exit(-1);
}
if(in->n != rec->n || in->grid_depth/2 != rec->grid_depth || in->grid_height/2 != rec->grid_height || in->grid_width/2 != rec->grid_width) {
printf("[ERROR] split_reconstruction_surface - shape of in and rec are not compatible\n");
exit(-1);
}
octree_resize_as_gpu(in, out);
octree_cpy_trees_gpu_gpu(in, out);
kernel_split_reconstruction_surface_struct<<<GET_BLOCKS(in->n_leafs), CUDA_NUM_THREADS>>>(
*out, in->n_leafs, *in, *rec, rec_thr_from, rec_thr_to
);
CUDA_POST_KERNEL_CHECK;
octree_upd_n_leafs_gpu(out);
octree_resize_as_gpu(out, out);
octree_upd_prefix_leafs_gpu(out);
octree_cpy_sup_to_sub_gpu(in, out);
}
extern "C"
void octree_split_bwd_gpu(const octree* in, const octree* grad_out, octree* grad_in) {
octree_cpy_scalars(in, grad_in);
octree_resize_as_gpu(in, grad_in);
octree_cpy_trees_gpu_gpu(in, grad_in);
octree_cpy_prefix_leafs_gpu_gpu(in, grad_in);
octree_cpy_sub_to_sup_sum_gpu(grad_out, grad_in);
}
// __global__ void kernel_split_dense_reconstruction_surface_struct_surf(octree out, ot_size_t n_blocks, const ot_data_t* reconstruction, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_data_t rec_thr_from, ot_data_t rec_thr_to) {
// CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
// ot_tree_t* tree = octree_get_tree(&out, grid_idx);
// int n,gd,gh,gw;
// octree_split_grid_idx(&out, grid_idx, &n,&gd,&gh,&gw);
// for(int idx = 0; idx < 8*8*8; ++idx) {
// int bw = idx % 8;
// int bh = ((idx - bw) / 8) % 8;
// int bd = idx / (8 * 8);
// int bit_idx_l3 = tree_bit_idx_(bd,bh,bw);
// int bit_idx_l2 = tree_parent_bit_idx(bit_idx_l3);
// int bit_idx_l1 = tree_parent_bit_idx(bit_idx_l2);
// int d = (gd * 8 + bd) / 2;
// int h = (gh * 8 + bh) / 2;
// int w = (gw * 8 + bw) / 2;
// int rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
// bool occ_c = reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
// bool differ = false;
// for(int off = 0; off < 3*3*3 && !differ; ++off) {
// if(off == 13) continue;
// int off_w = off % 3;
// int off_h = ((off - off_w) / 3) % 3;
// int off_d = off / (3*3);
// d = ( gd * 8 + bd + (off_d - 1) ) / 2;
// h = ( gh * 8 + bh + (off_h - 1) ) / 2;
// w = ( gw * 8 + bw + (off_w - 1) ) / 2;
// rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
// bool in_bounds = d>=0 && h>=0 && w>=0 && d<dense_depth && h<dense_height && w<dense_width;
// bool occ = in_bounds && reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
// differ = differ || (in_bounds && (occ_c != occ));
// }
// if(differ) {
// tree_set_bit(tree, 0);
// tree_set_bit(tree, bit_idx_l1);
// tree_set_bit(tree, bit_idx_l2);
// }
// }
// }
// }
__global__ void kernel_split_dense_reconstruction_surface_struct_surf(octree out, ot_size_t n_blocks, const ot_data_t* reconstruction, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_data_t rec_thr_from, ot_data_t rec_thr_to) {
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
ot_tree_t* tree = octree_get_tree(&out, grid_idx);
int n,gd,gh,gw;
octree_split_grid_idx(&out, grid_idx, &n,&gd,&gh,&gw);
for(int idx = 0; idx < 8*8*8; ++idx) {
int bw = idx % 8;
int bh = ((idx - bw) / 8) % 8;
int bd = idx / (8 * 8);
int bit_idx_l3 = tree_bit_idx_(bd,bh,bw);
int bit_idx_l2 = tree_parent_bit_idx(bit_idx_l3);
int bit_idx_l1 = tree_parent_bit_idx(bit_idx_l2);
int d = (gd * 8 + bd) / 2;
int h = (gh * 8 + bh) / 2;
int w = (gw * 8 + bw) / 2;
int rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
bool differ = reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
if(differ) {
tree_set_bit(tree, 0);
tree_set_bit(tree, bit_idx_l1);
tree_set_bit(tree, bit_idx_l2);
}
}
}
}
// __global__ void kernel_split_dense_reconstruction_surface_struct_oct(octree out, ot_size_t n_blocks, const ot_data_t* reconstruction, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_data_t rec_thr_from, ot_data_t rec_thr_to) {
// CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
// ot_tree_t* tree = octree_get_tree(&out, grid_idx);
// int n,gd,gh,gw;
// octree_split_grid_idx(&out, grid_idx, &n,&gd,&gh,&gw);
// bool differ = false;
// for(int idx = 0; idx < 8*8*8 && !differ; ++idx) {
// int bw = idx % 8;
// int bh = ((idx - bw) / 8) % 8;
// int bd = idx / (8 * 8);
// int bit_idx_l3 = tree_bit_idx_(bd,bh,bw);
// int bit_idx_l2 = tree_parent_bit_idx(bit_idx_l3);
// int bit_idx_l1 = tree_parent_bit_idx(bit_idx_l2);
// int d = (gd * 8 + bd) / 2;
// int h = (gh * 8 + bh) / 2;
// int w = (gw * 8 + bw) / 2;
// int rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
// bool occ_c = reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
// for(int off = 0; off < 3*3*3 && !differ; ++off) {
// if(off == 13) continue;
// int off_w = off % 3;
// int off_h = ((off - off_w) / 3) % 3;
// int off_d = off / (3*3);
// d = ( gd * 8 + bd + (off_d - 1) ) / 2;
// h = ( gh * 8 + bh + (off_h - 1) ) / 2;
// w = ( gw * 8 + bw + (off_w - 1) ) / 2;
// rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
// bool in_bounds = d>=0 && h>=0 && w>=0 && d<dense_depth && h<dense_height && w<dense_width;
// bool occ = in_bounds && reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
// differ = differ || (in_bounds && (occ_c != occ));
// }
// }
// if(differ) {
// for(int tree_idx = 0; tree_idx < N_TREE_INTS; ++tree_idx) {
// tree[tree_idx] = ~0;
// }
// }
// }
// }
__global__ void kernel_split_dense_reconstruction_surface_struct_oct(octree out, ot_size_t n_blocks, const ot_data_t* reconstruction, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_data_t rec_thr_from, ot_data_t rec_thr_to) {
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
ot_tree_t* tree = octree_get_tree(&out, grid_idx);
int n,gd,gh,gw;
octree_split_grid_idx(&out, grid_idx, &n,&gd,&gh,&gw);
bool differ = false;
for(int idx = 0; idx < 8*8*8 && !differ; ++idx) {
int bw = idx % 8;
int bh = ((idx - bw) / 8) % 8;
int bd = idx / (8 * 8);
int bit_idx_l3 = tree_bit_idx_(bd,bh,bw);
int bit_idx_l2 = tree_parent_bit_idx(bit_idx_l3);
int bit_idx_l1 = tree_parent_bit_idx(bit_idx_l2);
int d = (gd * 8 + bd) / 2;
int h = (gh * 8 + bh) / 2;
int w = (gw * 8 + bw) / 2;
int rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
bool differ = reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
if(differ) {
for(int tree_idx = 0; tree_idx < N_TREE_INTS; ++tree_idx) {
tree[tree_idx] = ~0;
}
}
}
}
}
__global__ void kernel_split_dense_reconstruction_surface_data(octree out, ot_size_t n_leafs, const ot_data_t* features, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_size_t feature_size) {
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
const int grid_idx = out.data[leaf_idx * out.feature_size];
const ot_tree_t* tree = octree_get_tree(&out, grid_idx);
const int data_idx = leaf_idx - out.prefix_leafs[grid_idx];
const int bit_idx = data_idx_to_bit_idx(tree, data_idx);
int n,d,h,w;
const int cell_depth = octree_ind_to_dense_ind(&out, grid_idx, bit_idx, &n, &d,&h,&w);
const int cell_width = width_from_depth(cell_depth);
const int cell_width3 = cell_width * cell_width * cell_width;
ot_data_t* out_data = octree_get_data(&out, grid_idx) + data_idx * out.feature_size;
for(int f = 0; f < feature_size; ++f) {
ot_data_t val = 0;
for(int idx = 0; idx < cell_width3; ++idx) {
int idx_w = idx % cell_width;
int idx_h = ((idx - idx_w) / cell_width) % cell_width;
int idx_d = idx / (cell_width*cell_width);
int dense_w = (w + idx_w) / 2;
int dense_h = (h + idx_h) / 2;
int dense_d = (d + idx_d) / 2;
val += features[(((n * feature_size + f) * dense_depth + dense_d) * dense_height + dense_h) * dense_width + dense_w];
}
val /= cell_width3;
out_data[f] = val;
}
}
}
extern "C"
void octree_split_dense_reconstruction_surface_gpu(const ot_data_t* features, const ot_data_t* reconstruction, int n, int dense_depth, int dense_height, int dense_width, int feature_size, ot_data_t rec_thr_from, ot_data_t rec_thr_to, int structure_type, octree* out) {
if(dense_depth % 4 != 0 || dense_height % 4 != 0 || dense_width % 4 != 0) {
printf("[ERROR] octree_split_dense_reconstruction_surface_gpu - dense dims has to be a factor of 4\n");
exit(-1);
}
octree_resize_gpu(n, dense_depth/4, dense_height/4, dense_width/4, feature_size, 0, out);
int n_blocks = octree_num_blocks(out);
// compute structure
if(structure_type == 0) {
// printf("[INFO] use full split\n");
ot_tree_t val = ~0;
thrust::fill_n(thrust::device, out->trees, n_blocks * N_TREE_INTS, val);
}
else if(structure_type == 1) {
// printf("[INFO] use surface split\n");
octree_clr_trees_gpu(out);
kernel_split_dense_reconstruction_surface_struct_surf<<<GET_BLOCKS(n_blocks), CUDA_NUM_THREADS>>>(
*out, n_blocks, reconstruction, dense_depth, dense_height, dense_width, rec_thr_from, rec_thr_to
);
CUDA_POST_KERNEL_CHECK;
}
else if(structure_type == 2) {
// printf("[INFO] use octant split\n");
octree_clr_trees_gpu(out);
kernel_split_dense_reconstruction_surface_struct_oct<<<GET_BLOCKS(n_blocks), CUDA_NUM_THREADS>>>(
*out, n_blocks, reconstruction, dense_depth, dense_height, dense_width, rec_thr_from, rec_thr_to
);
CUDA_POST_KERNEL_CHECK;
}
else {
printf("[ERROR] unknown structure_type in octree_split_dense_reconstruction_surface_gpu\n");
exit(-1);
}
octree_upd_n_leafs_gpu(out);
octree_upd_prefix_leafs_gpu(out);
octree_resize_as_gpu(out, out);
// copy features
octree_leaf_idx_to_grid_idx_gpu(out, out->feature_size, out->data_capacity, out->data);
kernel_split_dense_reconstruction_surface_data<<<GET_BLOCKS(out->n_leafs), CUDA_NUM_THREADS>>>(
*out, out->n_leafs, features, dense_depth, dense_height, dense_width, feature_size
);
CUDA_POST_KERNEL_CHECK;
}
__global__ void kernel_split_dense_reconstruction_surface_bwd(ot_data_t* grad_in, ot_size_t n_voxels, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_size_t feature_size, octree const grad_out) {
CUDA_KERNEL_LOOP(vx_idx, n_voxels) {
int n = vx_idx / (dense_depth * dense_height * dense_width);
int w_lr = vx_idx % dense_width;
int h_lr = ((vx_idx - w_lr) / dense_width) % dense_height;
int d_lr = ((((vx_idx - w_lr) / dense_width) - h_lr) / dense_height) % dense_depth;
for(int f = 0; f < feature_size; ++f) {
int grad_in_idx = (((n * feature_size + f) * dense_depth + d_lr) * dense_height + h_lr) * dense_width + w_lr;
grad_in[grad_in_idx] = 0;
}
for(int up = 0; up < 2*2*2; ++up) {
int up_w = up % 2;
int w_hr = w_lr + up_w;
int h_hr = h_lr + ((up - up_w) / 2) % 2;
int d_hr = d_lr + up / (2 * 2);
int bd = d_hr % 8;
int bh = h_hr % 8;
int bw = w_hr % 8;
int gd = d_hr / 8;
int gh = h_hr / 8;
int gw = w_hr / 8;
int grid_idx = octree_grid_idx(&grad_out, n, gd, gh, gw);
const ot_tree_t* tree = octree_get_tree(&grad_out, grid_idx);
int bit_idx = tree_bit_idx(tree, bd, bh, bw);
int data_idx = tree_data_idx(tree, bit_idx, grad_out.feature_size);
const ot_data_t* grad_out_data = octree_get_data(&grad_out, grid_idx) + data_idx;
for(int f = 0; f < feature_size; ++f) {
int grad_in_idx = (((n * feature_size + f) * dense_depth + d_lr) * dense_height + h_lr) * dense_width + w_lr;
grad_in[grad_in_idx] += grad_out_data[f];
}
}
}
}
extern "C"
void octree_split_dense_reconstruction_surface_bwd_gpu(const octree* grad_out, ot_data_t* grad_in) {
int dense_depth = 4 * grad_out->grid_depth;
int dense_height = 4 * grad_out->grid_height;
int dense_width = 4 * grad_out->grid_width;
int n_voxels = grad_out->n * dense_depth * dense_height * dense_width;
kernel_split_dense_reconstruction_surface_bwd<<<GET_BLOCKS(n_voxels), CUDA_NUM_THREADS>>>(
grad_in, n_voxels, dense_depth, dense_height, dense_width, grad_out->feature_size, *grad_out
);
CUDA_POST_KERNEL_CHECK;
}
__global__ void kernel_octree_split_dense_reconstruction_surface_fres_struct(octree out, ot_size_t n_blocks, const ot_data_t* reconstruction, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_size_t feature_size, float rec_thr_from, float rec_thr_to, int band) {
int band_width = band * 2 + 1;
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
ot_tree_t* tree = octree_get_tree(&out, grid_idx);
int n,gd,gh,gw;
octree_split_grid_idx(&out, grid_idx, &n,&gd,&gh,&gw);
for(int idx = 0; idx < 8*8*8; ++idx) {
bool differ = false;
int bw = idx % 8;
int bh = ((idx - bw) / 8) % 8;
int bd = idx / (8 * 8);
int bit_idx_l3 = tree_bit_idx_(bd,bh,bw);
int bit_idx_l2 = tree_parent_bit_idx(bit_idx_l3);
int bit_idx_l1 = tree_parent_bit_idx(bit_idx_l2);
int dc = (gd * 8 + bd);
int hc = (gh * 8 + bh);
int wc = (gw * 8 + bw);
for(int off = 0; off < band_width*band_width*band_width && !differ; ++off) {
int off_w = off % band_width;
int off_h = ((off - off_w) / band_width) % band_width;
int off_d = off / (band_width*band_width);
int doff = dc + (off_d - band);
int hoff = hc + (off_h - band);
int woff = wc + (off_w - band);
bool in_bounds = doff >= 0 && hoff >= 0 && woff >= 0 && doff < dense_depth && hoff < dense_height && woff < dense_width;
if(!in_bounds) continue;
int rec_idx = ((n * dense_depth + doff) * dense_height + hoff) * dense_width + woff;
bool occ_c = reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
if(!occ_c) continue;
for(int nb = 0; nb < 3*3*3 && !differ; ++nb) {
if(nb == 13) continue;
int nb_w = nb % 3;
int nb_h = ((nb - nb_w) / 3) % 3;
int nb_d = nb / (3*3);
int d = doff + nb_d - 1;
int h = hoff + nb_h - 1;
int w = woff + nb_w - 1;
rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
bool in_bounds = d >= 0 && h >= 0 && w >= 0 && d < dense_depth && h < dense_height && w < dense_width;
bool occ = in_bounds && reconstruction[rec_idx] >= rec_thr_from && reconstruction[rec_idx] <= rec_thr_to;
differ = differ || (in_bounds && (occ_c != occ));
}
}
if(differ) {
tree_set_bit(tree, 0);
tree_set_bit(tree, bit_idx_l1);
tree_set_bit(tree, bit_idx_l2);
}
}
}
}
__global__ void kernel_octree_split_dense_reconstruction_surface_fres_data(octree out, ot_size_t n_leafs, const ot_data_t* features, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_size_t feature_size) {
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
// const int grid_idx = out.data[leaf_idx * out->feature_size];
const int grid_idx = leaf_idx_to_grid_idx(&out, leaf_idx);
const ot_tree_t* tree = octree_get_tree(&out, grid_idx);
const int data_idx = leaf_idx - out.prefix_leafs[grid_idx];
const int bit_idx = data_idx_to_bit_idx(tree, data_idx);
int n,d,h,w;
const int cell_depth = octree_ind_to_dense_ind(&out, grid_idx, bit_idx, &n, &d,&h,&w);
const int cell_width = width_from_depth(cell_depth);
const int cell_width3 = cell_width * cell_width * cell_width;
ot_data_t* out_data = octree_get_data(&out, grid_idx) + data_idx * out.feature_size;
for(int f = 0; f < feature_size; ++f) {
ot_data_t val = 0;
for(int idx = 0; idx < cell_width3; ++idx) {
int idx_w = idx % cell_width;
int idx_h = ((idx - idx_w) / cell_width) % cell_width;
int idx_d = idx / (cell_width*cell_width);
int dense_w = (w + idx_w);
int dense_h = (h + idx_h);
int dense_d = (d + idx_d);
ot_data_t feat = features[(((n * feature_size + f) * dense_depth + dense_d) * dense_height + dense_h) * dense_width + dense_w];
val += feat;
}
val /= cell_width3;
out_data[f] = val;
}
}
}
extern "C"
void octree_split_dense_reconstruction_surface_fres_gpu(const ot_data_t* features, const ot_data_t* reconstruction, int n, int dense_depth, int dense_height, int dense_width, int feature_size, ot_data_t rec_thr_from, ot_data_t rec_thr_to, int band, octree* out) {
if(dense_depth % 8 != 0 || dense_height % 8 != 0 || dense_width % 8 != 0) {
printf("[ERROR] octrecpue_split_dense_reconstruction_surface_fres_gpu - dense dims has to be a factor of 8\n");
exit(-1);
}
octree_resize_gpu(n, dense_depth/8, dense_height/8, dense_width/8, feature_size, 0, out);
octree_clr_trees_gpu(out);
int n_blocks = octree_num_blocks(out);
kernel_octree_split_dense_reconstruction_surface_fres_struct<<<GET_BLOCKS(n_blocks), CUDA_NUM_THREADS>>>(
*out, n_blocks, reconstruction, dense_depth, dense_height, dense_width, feature_size, rec_thr_from, rec_thr_to, band
);
CUDA_POST_KERNEL_CHECK;
octree_upd_n_leafs_gpu(out);
octree_upd_prefix_leafs_gpu(out);
octree_resize_as_gpu(out, out);
kernel_octree_split_dense_reconstruction_surface_fres_data<<<GET_BLOCKS(n_blocks), CUDA_NUM_THREADS>>>(
*out, out->n_leafs, features, dense_depth, dense_height, dense_width, feature_size
);
CUDA_POST_KERNEL_CHECK;
}
__global__ void kernel_octree_split_dense_reconstruction_surface_fres_bwd(ot_data_t* grad_in, ot_size_t n_voxels, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, ot_size_t feature_size, octree const grad_out) {
CUDA_KERNEL_LOOP(vx_idx, n_voxels) {
int n = vx_idx / (dense_depth * dense_height * dense_width);
int w = vx_idx % dense_width;
int h = ((vx_idx - w) / dense_width) % dense_height;
int d = ((((vx_idx - w) / dense_width) - h) / dense_height) % dense_depth;
for(int f = 0; f < feature_size; ++f) {
int grad_in_idx = (((n * feature_size + f) * dense_depth + d) * dense_height + h) * dense_width + w;
grad_in[grad_in_idx] = 0;
}
int bd = d % 8;
int bh = h % 8;
int bw = w % 8;
int gd = d / 8;
int gh = h / 8;
int gw = w / 8;
int grid_idx = octree_grid_idx(&grad_out, n, gd, gh, gw);
const ot_tree_t* tree = octree_get_tree(&grad_out, grid_idx);
int bit_idx = tree_bit_idx(tree, bd, bh, bw);
int data_idx = tree_data_idx(tree, bit_idx, feature_size);
const ot_data_t* grad_out_data = octree_get_data(&grad_out, grid_idx) + data_idx;
for(int f = 0; f < feature_size; ++f) {
int grad_in_idx = (((n * feature_size + f) * dense_depth + d) * dense_height + h) * dense_width + w;
grad_in[grad_in_idx] += grad_out_data[f];
}
}
}
extern "C"
void octree_split_dense_reconstruction_surface_fres_bwd_gpu(const octree* grad_out, ot_data_t* grad_in) {
int dense_depth = 8 * grad_out->grid_depth;
int dense_height = 8 * grad_out->grid_height;
int dense_width = 8 * grad_out->grid_width;
int feature_size = grad_out->feature_size;
int n_voxels = grad_out->n * dense_depth * dense_height * dense_width;
kernel_octree_split_dense_reconstruction_surface_fres_bwd<<<GET_BLOCKS(n_voxels), CUDA_NUM_THREADS>>>(
grad_in, n_voxels, dense_depth, dense_height, dense_width, feature_size, *grad_out
);
CUDA_POST_KERNEL_CHECK;
}
__global__ void kernel_split_tsdf(octree out, ot_size_t n_blocks, const ot_data_t* reconstruction, ot_size_t dense_depth, ot_size_t dense_height, ot_size_t dense_width, int band) {
CUDA_KERNEL_LOOP(grid_idx, n_blocks) {
ot_tree_t* tree = octree_get_tree(&out, grid_idx);
for(int tree_idx = 0; tree_idx < N_TREE_INTS; ++tree_idx) {
tree[tree_idx] = 0;
}
const int band_width = 2*band+1;
const int band_width_center = (band_width * band_width * band_width - 1) / 2;
int n,gd,gh,gw;
octree_split_grid_idx(&out, grid_idx, &n,&gd,&gh,&gw);
for(int idx = 0; idx < 8*8*8; ++idx) {
int bw = idx % 8;
int bh = ((idx - bw) / 8) % 8;
int bd = idx / (8 * 8);
int bit_idx_l3 = tree_bit_idx_(bd,bh,bw);
int bit_idx_l2 = tree_parent_bit_idx(bit_idx_l3);
int bit_idx_l1 = tree_parent_bit_idx(bit_idx_l2);
int d = (gd * 8 + bd) / 2;
int h = (gh * 8 + bh) / 2;
int w = (gw * 8 + bw) / 2;
int rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
bool pos = reconstruction[rec_idx] >= 0;
bool differ = false;
for(int off = 0; off < band_width*band_width*band_width && !differ; ++off) {
if(off == band_width_center) continue;
int off_w = off % band_width;
int off_h = ((off - off_w) / band_width) % band_width;
int off_d = off / (band_width*band_width);
d = ( gd * 8 + bd + (off_d - band) ) / 2;
h = ( gh * 8 + bh + (off_h - band) ) / 2;
w = ( gw * 8 + bw + (off_w - band) ) / 2;
rec_idx = ((n * dense_depth + d) * dense_height + h) * dense_width + w;
bool in_bounds = d>=0 && h>=0 && w>=0 && d<dense_depth && h<dense_height && w<dense_width;
bool pos_nb = in_bounds && reconstruction[rec_idx] >= 0;
differ = differ || (in_bounds && (pos != pos_nb));
}
if(differ) {
// if(grid_idx == 1) printf("%d, %d, %d,%d,%d, %d,%d,%d\n", grid_idx, idx, bd,bh,bw, bit_idx_l1,bit_idx_l2,bit_idx_l3);
tree_set_bit(tree, 0);
tree_set_bit(tree, bit_idx_l1);
tree_set_bit(tree, bit_idx_l2);
}
}
}
}
__global__ void kernel_split_tsdf_or_trees(ot_tree_t* out_trees, const int n_tree_ints, const ot_tree_t* guide_trees) {
CUDA_KERNEL_LOOP(idx, n_tree_ints) {
out_trees[idx] |= guide_trees[idx];
}
}
extern "C"
void octree_split_tsdf_gpu(const ot_data_t* features, const ot_data_t* reconstruction, const octree* guide, int n, int dense_depth, int dense_height, int dense_width, int feature_size, int band, octree* out) {
if(dense_depth % 4 != 0 || dense_height % 4 != 0 || dense_width % 4 != 0) {
printf("[ERROR] octree_split_tsdf_gpu - dense dims has to be a factor of 4\n");
exit(-1);
}
if(guide != 0 && (guide->n != n || 4*guide->grid_depth != dense_depth || 4*guide->grid_height != dense_height || 4*guide->grid_width != dense_width)) {
printf("[ERROR] octree_split_tsdf_gpu - dense dims not compatible with guide\n");
exit(-1);
}
octree_resize_gpu(n, dense_depth/4, dense_height/4, dense_width/4, feature_size, 0, out);
int n_blocks = octree_num_blocks(out);
// octree_clr_trees_gpu(out);
kernel_split_tsdf<<<GET_BLOCKS(n_blocks), CUDA_NUM_THREADS>>>(
*out, n_blocks, reconstruction, dense_depth, dense_height, dense_width, band
);
CUDA_POST_KERNEL_CHECK;
// octree_upd_n_leafs_gpu(out);
// printf("[INFO] after split_tsdf=%d\n", out->n_leafs);
if(guide != 0) {
int n_tree_ints = N_TREE_INTS * n_blocks;
kernel_split_tsdf_or_trees<<<GET_BLOCKS(n_tree_ints), CUDA_NUM_THREADS>>>(
out->trees, n_tree_ints, guide->trees
);
CUDA_POST_KERNEL_CHECK;
// octree_upd_n_leafs_gpu(out);
// printf("[INFO] after guide=%d\n", out->n_leafs);
}
octree_upd_n_leafs_gpu(out);
octree_upd_prefix_leafs_gpu(out);
octree_resize_as_gpu(out, out);
// copy features
octree_leaf_idx_to_grid_idx_gpu(out, out->feature_size, out->data_capacity, out->data);
kernel_split_dense_reconstruction_surface_data<<<GET_BLOCKS(out->n_leafs), CUDA_NUM_THREADS>>>(
*out, out->n_leafs, features, dense_depth, dense_height, dense_width, feature_size
);
CUDA_POST_KERNEL_CHECK;
}
|
f5230b4dd3344e6d3e8e7a66f60e3065a6c993aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pair_logit.cuh"
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
namespace NKernel {
template <int BLOCK_SIZE>
__global__ void PairLogitPointwiseTargetImpl(const float* point,
const uint2* pairs, const float* pairWeights,
const ui32* writeMap,
ui32 pairCount, int pairShift,
float* functionValue,
float* der,
float* der2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float tmpScores[BLOCK_SIZE];
uint2 pair;
if (i < pairCount) {
pair = __ldg(pairs + i);
} else {
pair.x = pairShift;
pair.y = pairShift;
}
pair.x -= pairShift;
pair.y -= pairShift;
const float w = pairWeights && (i < pairCount) ? pairWeights[i] : 1.0f;
const float diff = i < pairCount ? __ldg(point + pair.x) - __ldg(point + pair.y) : 0;
const float expDiff = __expf(diff);
const float p = max(min(isfinite(expDiff) ? expDiff / (1.0f + expDiff) : 1.0f, 1.0f - 1e-40f), 1e-40f);
const float direction = (1.0f - p);
const ui32 firstDst = writeMap ? writeMap[pair.x] : pair.x;
const ui32 secondDst = writeMap ? writeMap[pair.y] : pair.y;
if (der && i < pairCount) {
atomicAdd(der + firstDst, w * direction);
atomicAdd(der + secondDst, -w * direction);
}
if (der2 && i < pairCount) {
const float scale = p * (1.0f - p);
atomicAdd(der2 + firstDst, w * scale);
atomicAdd(der2 + secondDst, w * scale);
}
if (functionValue) {
const float logExpValPlusOne = isfinite(expDiff) ? __logf(1.0f + expDiff) : expDiff;
tmpScores[threadIdx.x] = (i < pairCount) ? w * (diff - logExpValPlusOne) : 0;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BLOCK_SIZE);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
__global__ void MakePairWeightsImpl(const uint2* pairs, const float* pairWeights, ui32 pairCount,
float* weights) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = __ldg(pairs + i);
const float w = pairWeights ? pairWeights[i] : 1.0f;
atomicAdd(weights + pair.x, w);
atomicAdd(weights + pair.y, w);
}
}
void MakePairWeights(const uint2* pairs, const float* pairWeights, ui32 pairCount,
float* weights, TCudaStream stream) {
const int blockSize = 512;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( MakePairWeightsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, pairs, pairWeights, pairCount, weights);
}
void PairLogitPointwiseTarget(const float* point,
const uint2* pairs, const float* pairWeights,
const ui32* writeMap,
ui32 pairCount, int pairShift,
float* functionValue,
float* der,
float* der2,
ui32 docCount,
TCudaStream stream) {
const int blockSize = 1024;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (der) {
FillBuffer(der, 0.0f, docCount, stream);
}
if (der2) {
FillBuffer(der2, 0.0f, docCount, stream);
}
if (numBlocks)
{
PairLogitPointwiseTargetImpl<blockSize> << <numBlocks, blockSize, 0, stream >> > (point, pairs, pairWeights, writeMap, pairCount, pairShift, functionValue, der, der2);
}
}
template <int BLOCK_SIZE>
__global__ void PairLogitPairwiseImpl(const float* point,
const uint2* pairs,
const float* pairWeights,
ui32 pairCount,
const ui32* scatterDerIndices,
float* functionValue,
float* pointDer,
float* pairsDer2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
uint2 pair;
if (i < pairCount) {
pair = __ldg(pairs + i);
}
const float w = pairWeights && (i < pairCount) ? pairWeights[i] : 1.0f;
const float diff = i < pairCount ? __ldg(point + pair.x) - __ldg(point + pair.y) : 0;
const float expDiff = __expf(diff);
const float p = max(min(isfinite(expDiff + 1.0f) ? expDiff / (1.0f + expDiff) : 1.0f, 1.0f - 1e-40f), 1e-40f);
const float direction = w * (1.0f - p);
const float pairDer2 = w * p * (1.0f - p);
if (i < pairCount) {
const ui32 pairx = scatterDerIndices == nullptr ? pair.x : scatterDerIndices[pair.x];
const ui32 pairy = scatterDerIndices == nullptr ? pair.y : scatterDerIndices[pair.y];
atomicAdd(pointDer + pairx, direction);
atomicAdd(pointDer + pairy, -direction);
if (pairsDer2) {
pairsDer2[i] = pairDer2;
}
}
if (functionValue) {
const float logExpValPlusOne = isfinite(expDiff + 1.0f) ? __logf(1.0f + expDiff) : expDiff;
__shared__ float scores[BLOCK_SIZE];
scores[threadIdx.x] = (i < pairCount) ? w * (diff - logExpValPlusOne) : 0;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, scores, BLOCK_SIZE);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
void PairLogitPairwise(const float* point,
const uint2* pairs,
const float* pairWeights,
const ui32* scatterDerIndices,
float* value,
float* pointDer,
ui32 docCount,
float* pairDer2,
ui32 pairCount,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
FillBuffer(pointDer, 0.0f, docCount, stream);
if (value != nullptr) {
FillBuffer(value, 1.0f, 1, stream);
}
if (numBlocks) {
PairLogitPairwiseImpl<blockSize> << <numBlocks, blockSize, 0, stream >> > (point, pairs, pairWeights, pairCount, scatterDerIndices, value, pointDer, pairDer2);
}
}
}
| f5230b4dd3344e6d3e8e7a66f60e3065a6c993aa.cu | #include "pair_logit.cuh"
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
namespace NKernel {
template <int BLOCK_SIZE>
__global__ void PairLogitPointwiseTargetImpl(const float* point,
const uint2* pairs, const float* pairWeights,
const ui32* writeMap,
ui32 pairCount, int pairShift,
float* functionValue,
float* der,
float* der2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float tmpScores[BLOCK_SIZE];
uint2 pair;
if (i < pairCount) {
pair = __ldg(pairs + i);
} else {
pair.x = pairShift;
pair.y = pairShift;
}
pair.x -= pairShift;
pair.y -= pairShift;
const float w = pairWeights && (i < pairCount) ? pairWeights[i] : 1.0f;
const float diff = i < pairCount ? __ldg(point + pair.x) - __ldg(point + pair.y) : 0;
const float expDiff = __expf(diff);
const float p = max(min(isfinite(expDiff) ? expDiff / (1.0f + expDiff) : 1.0f, 1.0f - 1e-40f), 1e-40f);
const float direction = (1.0f - p);
const ui32 firstDst = writeMap ? writeMap[pair.x] : pair.x;
const ui32 secondDst = writeMap ? writeMap[pair.y] : pair.y;
if (der && i < pairCount) {
atomicAdd(der + firstDst, w * direction);
atomicAdd(der + secondDst, -w * direction);
}
if (der2 && i < pairCount) {
const float scale = p * (1.0f - p);
atomicAdd(der2 + firstDst, w * scale);
atomicAdd(der2 + secondDst, w * scale);
}
if (functionValue) {
const float logExpValPlusOne = isfinite(expDiff) ? __logf(1.0f + expDiff) : expDiff;
tmpScores[threadIdx.x] = (i < pairCount) ? w * (diff - logExpValPlusOne) : 0;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, tmpScores, BLOCK_SIZE);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
__global__ void MakePairWeightsImpl(const uint2* pairs, const float* pairWeights, ui32 pairCount,
float* weights) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = __ldg(pairs + i);
const float w = pairWeights ? pairWeights[i] : 1.0f;
atomicAdd(weights + pair.x, w);
atomicAdd(weights + pair.y, w);
}
}
void MakePairWeights(const uint2* pairs, const float* pairWeights, ui32 pairCount,
float* weights, TCudaStream stream) {
const int blockSize = 512;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
MakePairWeightsImpl<<<numBlocks, blockSize, 0, stream>>>(pairs, pairWeights, pairCount, weights);
}
void PairLogitPointwiseTarget(const float* point,
const uint2* pairs, const float* pairWeights,
const ui32* writeMap,
ui32 pairCount, int pairShift,
float* functionValue,
float* der,
float* der2,
ui32 docCount,
TCudaStream stream) {
const int blockSize = 1024;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
if (functionValue) {
FillBuffer(functionValue, 0.0f, 1, stream);
}
if (der) {
FillBuffer(der, 0.0f, docCount, stream);
}
if (der2) {
FillBuffer(der2, 0.0f, docCount, stream);
}
if (numBlocks)
{
PairLogitPointwiseTargetImpl<blockSize> << <numBlocks, blockSize, 0, stream >> > (point, pairs, pairWeights, writeMap, pairCount, pairShift, functionValue, der, der2);
}
}
template <int BLOCK_SIZE>
__global__ void PairLogitPairwiseImpl(const float* point,
const uint2* pairs,
const float* pairWeights,
ui32 pairCount,
const ui32* scatterDerIndices,
float* functionValue,
float* pointDer,
float* pairsDer2) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
uint2 pair;
if (i < pairCount) {
pair = __ldg(pairs + i);
}
const float w = pairWeights && (i < pairCount) ? pairWeights[i] : 1.0f;
const float diff = i < pairCount ? __ldg(point + pair.x) - __ldg(point + pair.y) : 0;
const float expDiff = __expf(diff);
const float p = max(min(isfinite(expDiff + 1.0f) ? expDiff / (1.0f + expDiff) : 1.0f, 1.0f - 1e-40f), 1e-40f);
const float direction = w * (1.0f - p);
const float pairDer2 = w * p * (1.0f - p);
if (i < pairCount) {
const ui32 pairx = scatterDerIndices == nullptr ? pair.x : scatterDerIndices[pair.x];
const ui32 pairy = scatterDerIndices == nullptr ? pair.y : scatterDerIndices[pair.y];
atomicAdd(pointDer + pairx, direction);
atomicAdd(pointDer + pairy, -direction);
if (pairsDer2) {
pairsDer2[i] = pairDer2;
}
}
if (functionValue) {
const float logExpValPlusOne = isfinite(expDiff + 1.0f) ? __logf(1.0f + expDiff) : expDiff;
__shared__ float scores[BLOCK_SIZE];
scores[threadIdx.x] = (i < pairCount) ? w * (diff - logExpValPlusOne) : 0;
__syncthreads();
float val = FastInBlockReduce<float>(threadIdx.x, scores, BLOCK_SIZE);
if (threadIdx.x == 0) {
atomicAdd(functionValue, val);
}
}
}
void PairLogitPairwise(const float* point,
const uint2* pairs,
const float* pairWeights,
const ui32* scatterDerIndices,
float* value,
float* pointDer,
ui32 docCount,
float* pairDer2,
ui32 pairCount,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
FillBuffer(pointDer, 0.0f, docCount, stream);
if (value != nullptr) {
FillBuffer(value, 1.0f, 1, stream);
}
if (numBlocks) {
PairLogitPairwiseImpl<blockSize> << <numBlocks, blockSize, 0, stream >> > (point, pairs, pairWeights, pairCount, scatterDerIndices, value, pointDer, pairDer2);
}
}
}
|
346e03340382afc2d62ea049285fccca0f81bcdf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <stdio.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// get x, y, index
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int idx = numCols*y + x;
// check if its in bounds
if ( x >= numCols ||
y >= numRows )
{
return;
}
float sum = 0.0f;
float cur_channel_val = 0.0f;
float cur_filter_val = 0.0f;
for (int r = -filterWidth/2; r <= filterWidth/2; ++r) {
for (int c = -filterWidth/2; c <= filterWidth/2; ++c) {
int cur_x = min( max(x + c,0), numCols-1 );
int cur_y = min( max(y + r,0), numRows-1 );
int cur_idx = numCols*cur_y + cur_x;
cur_channel_val = static_cast<float>(inputChannel[cur_idx]);
cur_filter_val = filter[(r + filterWidth/2) * filterWidth + c + filterWidth/2];
sum += cur_filter_val * cur_channel_val;
}
}
outputChannel[idx] = sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// get x, y, index
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int idx = numCols*y + x;
// check if its in bounds
if ( x >= numCols ||
y >= numRows )
{
return;
}
// get 3 channels
redChannel[idx] = inputImageRGBA[idx].x;
greenChannel[idx] = inputImageRGBA[idx].y;
blueChannel[idx] = inputImageRGBA[idx].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth*filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof( float) * filterWidth*filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
int blockWidth = 32; // thread size per block: blockWidth*blockWidth
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(blockWidth, blockWidth, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
int blocksY = (numRows)/blockWidth +1;
int blocksX = (numCols)/blockWidth +1;
const dim3 gridSize( blocksX, blocksY, 1); //TODO
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| 346e03340382afc2d62ea049285fccca0f81bcdf.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
#include <stdio.h>
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// get x, y, index
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int idx = numCols*y + x;
// check if its in bounds
if ( x >= numCols ||
y >= numRows )
{
return;
}
float sum = 0.0f;
float cur_channel_val = 0.0f;
float cur_filter_val = 0.0f;
for (int r = -filterWidth/2; r <= filterWidth/2; ++r) {
for (int c = -filterWidth/2; c <= filterWidth/2; ++c) {
int cur_x = min( max(x + c,0), numCols-1 );
int cur_y = min( max(y + r,0), numRows-1 );
int cur_idx = numCols*cur_y + cur_x;
cur_channel_val = static_cast<float>(inputChannel[cur_idx]);
cur_filter_val = filter[(r + filterWidth/2) * filterWidth + c + filterWidth/2];
sum += cur_filter_val * cur_channel_val;
}
}
outputChannel[idx] = sum;
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// get x, y, index
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int idx = numCols*y + x;
// check if its in bounds
if ( x >= numCols ||
y >= numRows )
{
return;
}
// get 3 channels
redChannel[idx] = inputImageRGBA[idx].x;
greenChannel[idx] = inputImageRGBA[idx].y;
blueChannel[idx] = inputImageRGBA[idx].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth*filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof( float) * filterWidth*filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
int blockWidth = 32; // thread size per block: blockWidth*blockWidth
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(blockWidth, blockWidth, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
int blocksY = (numRows)/blockWidth +1;
int blocksX = (numCols)/blockWidth +1;
const dim3 gridSize( blocksX, blocksY, 1); //TODO
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.