hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
5b11d9feab5b989e4032b4d2ee5af819b13cd0bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgb2grey_kernel(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
// Border protection
if(index_x >= numCols || index_y >= numRows)
return;
//Orignal data organized as 1-dimensional array
int idx = index_y * numCols + index_x;
uchar4 rgba = rgbaImage[idx];
float channel_sum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[idx] = channel_sum;
}
void rgba_to_greyscale(uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage,
size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
int m = 32;
const dim3 blockSize(m, m, 1);
//Order in dim3 : dx, dy, dz
const dim3 gridSize( numCols / m + 1, numRows / m + 1, 1);
std::cout << "numRows: " << numRows << std::endl;
hipLaunchKernelGGL(( rgb2grey_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 5b11d9feab5b989e4032b4d2ee5af819b13cd0bc.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgb2grey_kernel(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
// Border protection
if(index_x >= numCols || index_y >= numRows)
return;
//Orignal data organized as 1-dimensional array
int idx = index_y * numCols + index_x;
uchar4 rgba = rgbaImage[idx];
float channel_sum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[idx] = channel_sum;
}
void rgba_to_greyscale(uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage,
size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
int m = 32;
const dim3 blockSize(m, m, 1);
//Order in dim3 : dx, dy, dz
const dim3 gridSize( numCols / m + 1, numRows / m + 1, 1);
std::cout << "numRows: " << numRows << std::endl;
rgb2grey_kernel<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
7a3c6f44aaa8f03d85f910a5a0a4dc41ee21d6c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void addArray( float *d_a, float *d_b, float *d_c, int size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= size)
{
return;
}
d_c[i] = d_a[i] + d_b[i];
} | 7a3c6f44aaa8f03d85f910a5a0a4dc41ee21d6c1.cu | #include "includes.h"
__global__ void addArray( float *d_a, float *d_b, float *d_c, int size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i >= size)
{
return;
}
d_c[i] = d_a[i] + d_b[i];
} |
5cd081cff77fdb4a486444296356088a2c49dea8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include <thrust/device_vector.h>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
using std::max;
namespace caffe {
template <typename Dtype>
void SoftmaxLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
CHECK_EQ(bottom.size(), 1) << "Softmax Layer takes a single blob as input.";
CHECK_EQ(top->size(), 1) << "Softmax Layer takes a single blob as output.";
(*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
sum_multiplier_.Reshape(1, bottom[0]->channels(), bottom[0]->height(), bottom[0]->width());
Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data();
for (int i = 0; i < sum_multiplier_.count(); ++i) {
multiplier_data[i] = 1.;
}
scale_.Reshape(bottom[0]->num(), 1, 1, 1);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
Dtype* scale_data = scale_.mutable_cpu_data();
int num = bottom[0]->num();
int dim = bottom[0]->count() / bottom[0]->num();
memcpy(top_data, bottom_data, sizeof(Dtype) * bottom[0]->count());
// we need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
for (int i = 0; i < num; ++i) {
scale_data[i] = bottom_data[i*dim];
for (int j = 0; j < dim; ++j) {
scale_data[i] = max(scale_data[i], bottom_data[i * dim + j]);
}
}
// subtraction
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
scale_data, sum_multiplier_.cpu_data(), 1., top_data);
// Perform exponentiation
caffe_exp<Dtype>(num * dim, top_data, top_data);
// sum after exp
caffe_cpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., top_data,
sum_multiplier_.cpu_data(), 0., scale_data);
// Do division
for (int i = 0; i < num; ++i) {
caffe_scal<Dtype>(dim, Dtype(1.) / scale_data[i], top_data + i * dim);
}
}
template <typename Dtype>
__global__ void kernel_get_max(const int num, const int dim,
const Dtype* data, Dtype* out) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < num) {
Dtype maxval = -FLT_MAX;
for (int i = 0; i < dim; ++i) {
maxval = max(data[index * dim + i], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_softmax_div(const int num, const int dim,
const Dtype* scale, Dtype* data) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < num * dim) {
int n = index / dim;
data[index] /= scale[n];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int num, const Dtype* data, Dtype* out) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < num) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int num = bottom[0]->num();
int dim = bottom[0]->count() / bottom[0]->num();
CUDA_CHECK(hipMemcpy(top_data, bottom_data,
sizeof(Dtype) * bottom[0]->count(), hipMemcpyDeviceToDevice));
// we need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// Compute max
hipLaunchKernelGGL(( kernel_get_max<Dtype>), dim3(CAFFE_GET_BLOCKS(num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num, dim, bottom_data, scale_data);
// subtraction
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
scale_data, sum_multiplier_.gpu_data(), 1., top_data);
// Perform exponentiation
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(num * dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num * dim, top_data, top_data);
// sum after exp
caffe_gpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., top_data,
sum_multiplier_.gpu_data(), 0., scale_data);
// Do division
hipLaunchKernelGGL(( kernel_softmax_div<Dtype>), dim3(CAFFE_GET_BLOCKS(num * dim)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num, dim, scale_data, top_data);
}
template <typename Dtype>
Dtype SoftmaxLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
const Dtype* top_data = top[0]->cpu_data();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
Dtype* scale_data = scale_.mutable_cpu_data();
int num = top[0]->num();
int dim = top[0]->count() / top[0]->num();
memcpy(bottom_diff, top_diff, sizeof(Dtype) * top[0]->count());
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff
for (int i = 0; i < num; ++i) {
scale_data[i] = caffe_cpu_dot<Dtype>(dim, top_diff + i * dim,
top_data + i * dim);
}
// subtraction
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
scale_data, sum_multiplier_.cpu_data(), 1., bottom_diff);
// elementwise multiplication
caffe_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
return Dtype(0);
}
template <typename Dtype>
Dtype SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
bool propagate_down, vector<Blob<Dtype>*>* bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
int num = top[0]->num();
int dim = top[0]->count() / top[0]->num();
CUDA_CHECK(hipMemcpy(bottom_diff, top_diff,
sizeof(Dtype) * top[0]->count(), hipMemcpyDeviceToDevice));
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff
// cuda dot returns the result to cpu, so we temporarily change the pointer
// mode
CUBLAS_CHECK(hipblasSetPointerMode(Caffe::cublas_handle(),
HIPBLAS_POINTER_MODE_DEVICE));
Dtype* scale_data = scale_.mutable_gpu_data();
for (int i = 0; i < num; ++i) {
caffe_gpu_dot<Dtype>(dim, top_diff + i * dim,
top_data + i * dim, scale_data + i);
}
CUBLAS_CHECK(hipblasSetPointerMode(Caffe::cublas_handle(),
HIPBLAS_POINTER_MODE_HOST));
// subtraction
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
scale_.gpu_data(), sum_multiplier_.gpu_data(), 1., bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
return Dtype(0);
}
INSTANTIATE_CLASS(SoftmaxLayer);
} // namespace caffe
| 5cd081cff77fdb4a486444296356088a2c49dea8.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include <thrust/device_vector.h>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
using std::max;
namespace caffe {
template <typename Dtype>
void SoftmaxLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
CHECK_EQ(bottom.size(), 1) << "Softmax Layer takes a single blob as input.";
CHECK_EQ(top->size(), 1) << "Softmax Layer takes a single blob as output.";
(*top)[0]->Reshape(bottom[0]->num(), bottom[0]->channels(),
bottom[0]->height(), bottom[0]->width());
sum_multiplier_.Reshape(1, bottom[0]->channels(), bottom[0]->height(), bottom[0]->width());
Dtype* multiplier_data = sum_multiplier_.mutable_cpu_data();
for (int i = 0; i < sum_multiplier_.count(); ++i) {
multiplier_data[i] = 1.;
}
scale_.Reshape(bottom[0]->num(), 1, 1, 1);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = (*top)[0]->mutable_cpu_data();
Dtype* scale_data = scale_.mutable_cpu_data();
int num = bottom[0]->num();
int dim = bottom[0]->count() / bottom[0]->num();
memcpy(top_data, bottom_data, sizeof(Dtype) * bottom[0]->count());
// we need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
for (int i = 0; i < num; ++i) {
scale_data[i] = bottom_data[i*dim];
for (int j = 0; j < dim; ++j) {
scale_data[i] = max(scale_data[i], bottom_data[i * dim + j]);
}
}
// subtraction
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
scale_data, sum_multiplier_.cpu_data(), 1., top_data);
// Perform exponentiation
caffe_exp<Dtype>(num * dim, top_data, top_data);
// sum after exp
caffe_cpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., top_data,
sum_multiplier_.cpu_data(), 0., scale_data);
// Do division
for (int i = 0; i < num; ++i) {
caffe_scal<Dtype>(dim, Dtype(1.) / scale_data[i], top_data + i * dim);
}
}
template <typename Dtype>
__global__ void kernel_get_max(const int num, const int dim,
const Dtype* data, Dtype* out) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < num) {
Dtype maxval = -FLT_MAX;
for (int i = 0; i < dim; ++i) {
maxval = max(data[index * dim + i], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_softmax_div(const int num, const int dim,
const Dtype* scale, Dtype* data) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < num * dim) {
int n = index / dim;
data[index] /= scale[n];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int num, const Dtype* data, Dtype* out) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < num) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int num = bottom[0]->num();
int dim = bottom[0]->count() / bottom[0]->num();
CUDA_CHECK(cudaMemcpy(top_data, bottom_data,
sizeof(Dtype) * bottom[0]->count(), cudaMemcpyDeviceToDevice));
// we need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// Compute max
kernel_get_max<Dtype><<<CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS>>>(
num, dim, bottom_data, scale_data);
// subtraction
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
scale_data, sum_multiplier_.gpu_data(), 1., top_data);
// Perform exponentiation
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(num * dim), CAFFE_CUDA_NUM_THREADS>>>(
num * dim, top_data, top_data);
// sum after exp
caffe_gpu_gemv<Dtype>(CblasNoTrans, num, dim, 1., top_data,
sum_multiplier_.gpu_data(), 0., scale_data);
// Do division
kernel_softmax_div<Dtype><<<CAFFE_GET_BLOCKS(num * dim), CAFFE_CUDA_NUM_THREADS>>>(
num, dim, scale_data, top_data);
}
template <typename Dtype>
Dtype SoftmaxLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
bool propagate_down,
vector<Blob<Dtype>*>* bottom) {
const Dtype* top_diff = top[0]->cpu_diff();
const Dtype* top_data = top[0]->cpu_data();
Dtype* bottom_diff = (*bottom)[0]->mutable_cpu_diff();
Dtype* scale_data = scale_.mutable_cpu_data();
int num = top[0]->num();
int dim = top[0]->count() / top[0]->num();
memcpy(bottom_diff, top_diff, sizeof(Dtype) * top[0]->count());
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff
for (int i = 0; i < num; ++i) {
scale_data[i] = caffe_cpu_dot<Dtype>(dim, top_diff + i * dim,
top_data + i * dim);
}
// subtraction
caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
scale_data, sum_multiplier_.cpu_data(), 1., bottom_diff);
// elementwise multiplication
caffe_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
return Dtype(0);
}
template <typename Dtype>
Dtype SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
bool propagate_down, vector<Blob<Dtype>*>* bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
int num = top[0]->num();
int dim = top[0]->count() / top[0]->num();
CUDA_CHECK(cudaMemcpy(bottom_diff, top_diff,
sizeof(Dtype) * top[0]->count(), cudaMemcpyDeviceToDevice));
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff
// cuda dot returns the result to cpu, so we temporarily change the pointer
// mode
CUBLAS_CHECK(cublasSetPointerMode(Caffe::cublas_handle(),
CUBLAS_POINTER_MODE_DEVICE));
Dtype* scale_data = scale_.mutable_gpu_data();
for (int i = 0; i < num; ++i) {
caffe_gpu_dot<Dtype>(dim, top_diff + i * dim,
top_data + i * dim, scale_data + i);
}
CUBLAS_CHECK(cublasSetPointerMode(Caffe::cublas_handle(),
CUBLAS_POINTER_MODE_HOST));
// subtraction
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, dim, 1, -1.,
scale_.gpu_data(), sum_multiplier_.gpu_data(), 1., bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
return Dtype(0);
}
INSTANTIATE_CLASS(SoftmaxLayer);
} // namespace caffe
|
6a13df01acbe76957b0235fa496077e445ce61ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.hu"
#include <hip/hip_cooperative_groups.h>
#define CUDART_PI_F 3.141592654f
__device__ __forceinline__ DATA_TYPE mul(DATA_TYPE a, DATA_TYPE b)
{
return make_float2(a.x * b.x - a.y * b.y, a.x * b.y + a.y * b.x);
}
__device__ __forceinline__ DATA_TYPE twiddle(DATA_TYPE a, size_t n, size_t block, size_t row)
{
// todo: static
float f = (-2 * CUDART_PI_F * block * row) / n;
return mul(a, make_float2(cosf(f), sinf(f)));
}
__device__ __forceinline__ void sFFT(DATA_TYPE out[], DATA_TYPE a[], size_t n)
{
for (int i = 0; i < n; i++) {
float real = 0.0;
float imag = 0.0;
float pow = 2 * CUDART_PI_F * i / (float)n;
for (int j = 0; j < n; j++) {
float powh = fmodf(j * pow, 2 * CUDART_PI_F);
DATA_TYPE d = a[j];
real += d.x * cosf(powh) + d.y * sinf(powh);
imag += - d.x * sinf(powh) + d.y * cosf(powh);
}
out[i] = make_float2(real, imag);
}
}
__global__ void kernel1_2(DATA_TYPE *output, DATA_TYPE *data, int kernel_id)
{
DATA_TYPE sample[8];
DATA_TYPE out[8];
__shared__ DATA_TYPE block[512];
for (int i = 0; i < 8; i++)
sample[i] = data[(blockIdx.x + threadIdx.y * 128 + i * 128 * 8) * 8 + threadIdx.x];
// 1. 8-point fft
sFFT(out, sample, 8);
// 2. transpose through shared memory
{
for (int i = 0; i < 8; i++)
block[(threadIdx.y * blockDim.x + i) * 8 + threadIdx.x] = out[i];
__syncthreads();
for (int i = 0; i < 8; i++)
sample[i] = block[i * blockDim.x * 8 + threadIdx.y * 8 + threadIdx.x];
}
// 3. twiddle
for (int i = 0; i < 8; i++)
sample[i] = twiddle(sample[i], 64, i, threadIdx.y);
// 4. 8-point fft
sFFT(out, sample, 8);
if (kernel_id == 1) {
for (int i = 0; i < 8; i++) {
size_t id = (blockIdx.x + threadIdx.y * 128 + i * 128 * 8);
size_t row = id / 2;
size_t rem = blockIdx.x % 2;
output[(((row % 64) * 64 + row / 64) * 2 + rem) * 8 + threadIdx.x] =
twiddle(out[i], 64 * 64, row % 64, (row / 64) % 64);
}
}
else {
for (int i = 0; i < 8; i++) {
size_t id = (blockIdx.x + threadIdx.y * 128 + i * 128 * 8);
size_t row = id / 2;
size_t rem = blockIdx.x % 2;
output[id * 8 + threadIdx.x] =
twiddle(out[i], 64 * 64 * 16, row, rem * 8 + threadIdx.x);
}
}
}
__global__ void kernel3(DATA_TYPE *output, DATA_TYPE *data)
{
size_t pos = (blockIdx.x * blockDim.x + threadIdx.x) * 16;
DATA_TYPE sample[16];
DATA_TYPE out[16];
for (int i = 0; i < 16; i++)
sample[i] = data[pos + i];
// 1. 16-point fft
sFFT(out, sample, 16);
for (int i = 0; i < 16; i++) {
output[pos + i] = out[i];
}
}
void fft(DATA_TYPE *output, DATA_TYPE *data)
{
dim3 blockDim1(8, 8, 1);
dim3 blockDim3(32, 1, 1);
dim3 gridDim(128);
hipLaunchKernelGGL(( kernel1_2), dim3(gridDim), dim3(blockDim1), 0, 0, output, data, 1);
hipLaunchKernelGGL(( kernel1_2), dim3(gridDim), dim3(blockDim1), 0, 0, data, output, 2);
hipLaunchKernelGGL(( kernel3), dim3(gridDim), dim3(blockDim3), 0, 0, output, data);
}
std::vector<float> benchmark(DATA_TYPE *output,
DATA_TYPE *data,
hipEvent_t start, hipEvent_t stop)
{
DATA_TYPE *dev_output, *dev_middle, *dev_data, *middle, *middle2;
std::vector<float> time(2);
/*
Setup
*/
cudaCheckReturn(hipHostMalloc(&middle, DATA_SIZE * sizeof(DATA_TYPE)));
cudaCheckReturn(hipHostMalloc(&middle2, DATA_SIZE * sizeof(DATA_TYPE)));
cudaCheckReturn(hipMalloc(&dev_data, DATA_SIZE * sizeof(DATA_TYPE)));
cudaCheckReturn(hipMalloc(&dev_middle, DATA_SIZE * sizeof(DATA_TYPE)));
cudaCheckReturn(hipMalloc(&dev_output, DATA_SIZE * sizeof(DATA_TYPE)));
cudaCheckReturn(hipMemcpy(dev_data, data, DATA_SIZE * sizeof(DATA_TYPE),
hipMemcpyHostToDevice));
hipfftHandle plan;
cufftCheckReturn(hipfftCreate(&plan));
long long len = DATA_SIZE;
size_t ws = 0;
cufftCheckReturn(
cufftXtMakePlanMany(
plan, 1, &len,
NULL, 1, 1, HIP_C_32F,
NULL, 1, 1, HIP_C_32F,
1, &ws, HIP_C_32F));
/*
FFT
*/
cudaCheckReturn(hipDeviceSynchronize());
cudaCheckReturn(hipEventRecord(start));
// cufftCheckReturn(cufftXtExec(plan, dev_data, dev_middle, HIPFFT_FORWARD));
fft(dev_middle, dev_data);
cudaCheckReturn(hipEventRecord(stop));
cudaCheckReturn(hipEventSynchronize(stop));
cudaCheckKernel();
cudaCheckReturn(hipEventElapsedTime(&time[0], start, stop));
/*
Scaling
*/
cudaCheckReturn(hipMemcpy(middle, dev_middle, DATA_SIZE * sizeof(DATA_TYPE),
hipMemcpyDeviceToHost));
for (size_t i = 0; i < DATA_SIZE; i++) {
float2 m = middle[i];
m.x /= DATA_SIZE;
m.y /= DATA_SIZE;
middle2[i] = m;
}
for (size_t i = 0; i < 4096; i++) {
for (size_t j = 0; j < 16; j++) {
middle[j * 4096 + i] = middle2[i * 16 + j];
}
}
cudaCheckReturn(hipMemcpy(dev_middle, middle, DATA_SIZE * sizeof(DATA_TYPE),
hipMemcpyHostToDevice));
/*
IFFT
*/
cudaCheckReturn(hipDeviceSynchronize());
cudaCheckReturn(hipEventRecord(start));
cufftCheckReturn(cufftXtExec(plan, dev_middle, dev_output, HIPFFT_BACKWARD));
cudaCheckReturn(hipEventRecord(stop));
cudaCheckReturn(hipEventSynchronize(stop));
cudaCheckKernel();
cudaCheckReturn(hipEventElapsedTime(&time[1], start, stop));
/*
Close
*/
cufftCheckReturn(hipfftDestroy(plan));
cudaCheckReturn(hipMemcpy(output, dev_output, DATA_SIZE * sizeof(DATA_TYPE),
hipMemcpyDeviceToHost));
cudaCheckReturn(hipHostFree(middle));
cudaCheckReturn(hipFree(dev_output));
cudaCheckReturn(hipFree(dev_middle));
cudaCheckReturn(hipFree(dev_data));
return time;
}
| 6a13df01acbe76957b0235fa496077e445ce61ab.cu | #include "common.hu"
#include <cooperative_groups.h>
#define CUDART_PI_F 3.141592654f
__device__ __forceinline__ DATA_TYPE mul(DATA_TYPE a, DATA_TYPE b)
{
return make_float2(a.x * b.x - a.y * b.y, a.x * b.y + a.y * b.x);
}
__device__ __forceinline__ DATA_TYPE twiddle(DATA_TYPE a, size_t n, size_t block, size_t row)
{
// todo: static
float f = (-2 * CUDART_PI_F * block * row) / n;
return mul(a, make_float2(cosf(f), sinf(f)));
}
__device__ __forceinline__ void sFFT(DATA_TYPE out[], DATA_TYPE a[], size_t n)
{
for (int i = 0; i < n; i++) {
float real = 0.0;
float imag = 0.0;
float pow = 2 * CUDART_PI_F * i / (float)n;
for (int j = 0; j < n; j++) {
float powh = fmodf(j * pow, 2 * CUDART_PI_F);
DATA_TYPE d = a[j];
real += d.x * cosf(powh) + d.y * sinf(powh);
imag += - d.x * sinf(powh) + d.y * cosf(powh);
}
out[i] = make_float2(real, imag);
}
}
__global__ void kernel1_2(DATA_TYPE *output, DATA_TYPE *data, int kernel_id)
{
DATA_TYPE sample[8];
DATA_TYPE out[8];
__shared__ DATA_TYPE block[512];
for (int i = 0; i < 8; i++)
sample[i] = data[(blockIdx.x + threadIdx.y * 128 + i * 128 * 8) * 8 + threadIdx.x];
// 1. 8-point fft
sFFT(out, sample, 8);
// 2. transpose through shared memory
{
for (int i = 0; i < 8; i++)
block[(threadIdx.y * blockDim.x + i) * 8 + threadIdx.x] = out[i];
__syncthreads();
for (int i = 0; i < 8; i++)
sample[i] = block[i * blockDim.x * 8 + threadIdx.y * 8 + threadIdx.x];
}
// 3. twiddle
for (int i = 0; i < 8; i++)
sample[i] = twiddle(sample[i], 64, i, threadIdx.y);
// 4. 8-point fft
sFFT(out, sample, 8);
if (kernel_id == 1) {
for (int i = 0; i < 8; i++) {
size_t id = (blockIdx.x + threadIdx.y * 128 + i * 128 * 8);
size_t row = id / 2;
size_t rem = blockIdx.x % 2;
output[(((row % 64) * 64 + row / 64) * 2 + rem) * 8 + threadIdx.x] =
twiddle(out[i], 64 * 64, row % 64, (row / 64) % 64);
}
}
else {
for (int i = 0; i < 8; i++) {
size_t id = (blockIdx.x + threadIdx.y * 128 + i * 128 * 8);
size_t row = id / 2;
size_t rem = blockIdx.x % 2;
output[id * 8 + threadIdx.x] =
twiddle(out[i], 64 * 64 * 16, row, rem * 8 + threadIdx.x);
}
}
}
__global__ void kernel3(DATA_TYPE *output, DATA_TYPE *data)
{
size_t pos = (blockIdx.x * blockDim.x + threadIdx.x) * 16;
DATA_TYPE sample[16];
DATA_TYPE out[16];
for (int i = 0; i < 16; i++)
sample[i] = data[pos + i];
// 1. 16-point fft
sFFT(out, sample, 16);
for (int i = 0; i < 16; i++) {
output[pos + i] = out[i];
}
}
void fft(DATA_TYPE *output, DATA_TYPE *data)
{
dim3 blockDim1(8, 8, 1);
dim3 blockDim3(32, 1, 1);
dim3 gridDim(128);
kernel1_2<<<gridDim, blockDim1>>>(output, data, 1);
kernel1_2<<<gridDim, blockDim1>>>(data, output, 2);
kernel3<<<gridDim, blockDim3>>>(output, data);
}
std::vector<float> benchmark(DATA_TYPE *output,
DATA_TYPE *data,
cudaEvent_t start, cudaEvent_t stop)
{
DATA_TYPE *dev_output, *dev_middle, *dev_data, *middle, *middle2;
std::vector<float> time(2);
/*
Setup
*/
cudaCheckReturn(cudaMallocHost(&middle, DATA_SIZE * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMallocHost(&middle2, DATA_SIZE * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMalloc(&dev_data, DATA_SIZE * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMalloc(&dev_middle, DATA_SIZE * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMalloc(&dev_output, DATA_SIZE * sizeof(DATA_TYPE)));
cudaCheckReturn(cudaMemcpy(dev_data, data, DATA_SIZE * sizeof(DATA_TYPE),
cudaMemcpyHostToDevice));
cufftHandle plan;
cufftCheckReturn(cufftCreate(&plan));
long long len = DATA_SIZE;
size_t ws = 0;
cufftCheckReturn(
cufftXtMakePlanMany(
plan, 1, &len,
NULL, 1, 1, CUDA_C_32F,
NULL, 1, 1, CUDA_C_32F,
1, &ws, CUDA_C_32F));
/*
FFT
*/
cudaCheckReturn(cudaDeviceSynchronize());
cudaCheckReturn(cudaEventRecord(start));
// cufftCheckReturn(cufftXtExec(plan, dev_data, dev_middle, CUFFT_FORWARD));
fft(dev_middle, dev_data);
cudaCheckReturn(cudaEventRecord(stop));
cudaCheckReturn(cudaEventSynchronize(stop));
cudaCheckKernel();
cudaCheckReturn(cudaEventElapsedTime(&time[0], start, stop));
/*
Scaling
*/
cudaCheckReturn(cudaMemcpy(middle, dev_middle, DATA_SIZE * sizeof(DATA_TYPE),
cudaMemcpyDeviceToHost));
for (size_t i = 0; i < DATA_SIZE; i++) {
float2 m = middle[i];
m.x /= DATA_SIZE;
m.y /= DATA_SIZE;
middle2[i] = m;
}
for (size_t i = 0; i < 4096; i++) {
for (size_t j = 0; j < 16; j++) {
middle[j * 4096 + i] = middle2[i * 16 + j];
}
}
cudaCheckReturn(cudaMemcpy(dev_middle, middle, DATA_SIZE * sizeof(DATA_TYPE),
cudaMemcpyHostToDevice));
/*
IFFT
*/
cudaCheckReturn(cudaDeviceSynchronize());
cudaCheckReturn(cudaEventRecord(start));
cufftCheckReturn(cufftXtExec(plan, dev_middle, dev_output, CUFFT_INVERSE));
cudaCheckReturn(cudaEventRecord(stop));
cudaCheckReturn(cudaEventSynchronize(stop));
cudaCheckKernel();
cudaCheckReturn(cudaEventElapsedTime(&time[1], start, stop));
/*
Close
*/
cufftCheckReturn(cufftDestroy(plan));
cudaCheckReturn(cudaMemcpy(output, dev_output, DATA_SIZE * sizeof(DATA_TYPE),
cudaMemcpyDeviceToHost));
cudaCheckReturn(cudaFreeHost(middle));
cudaCheckReturn(cudaFree(dev_output));
cudaCheckReturn(cudaFree(dev_middle));
cudaCheckReturn(cudaFree(dev_data));
return time;
}
|
def2fb454616555f9c9ccd039aa4e39e662099c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "print_debug.hpp"
#include <stdio.h>
#ifdef DEBUG
#define cudaCheckError(ans) cudaAssert((ans), __FILE__, __LINE__);
inline void cudaAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess) {
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
__device__ void print_cell(int x, int y, cellData* cell) {
#ifdef DEBUG
printf("Cell %i,%i: h:%f wvol:%f wheght: %f sed:%f",
x, y, cell->height, cell->water_vol, cell->water_height, cell->sediment);
/*printf("sediment cap: %f, new erosion: %f new deposition: %f\n",sediment_capacity,new_erosion,new_deposition);*/
/*printf("water height_dir: %f %f %f %f\n",*/
/*water_height_dir.x, water_height_dir.y, water_height_dir.w, water_height_dir.z);*/
/*printf("water flux_dir: %f %f %f %f\nsediment_mov_dir %f %f %f %f\n", water_flux.x, water_flux.y, water_flux.w, water_flux.z,*/
/*sediment_movement_dir.x, sediment_movement_dir.y, sediment_movement_dir.w, sediment_movement_dir.z);*/
/*printf("height %f -> %f\n", total_map[index].height, new_total_map[index].height);*/
#endif
}
__device__ void debug_erode_dump(int print_ind, int mapDim, int globalMapDim, cellData* map, cellData* cell, cellData* newcell,
float sediment_capacity, float cell_erosion, float new_deposition,
float4 water_height_dir, float4 delta_h_dir, float4 v_dir,
float4 water_flux, float4 water_flux_norm, float4 total_water_flux, float4 sediment_flux)
{
#ifdef DEBUG
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int index = x + globalMapDim * y;
int left = max(0, x - 1);
int right = min(x + 1, mapDim - 1);
int up = max(0, y - 1);
int down = min(y + 1, mapDim - 1);
if (index == print_ind) {
printf("\n");
print_cell(x, y, cell);
print_cell(x, y, newcell);
printf("\ncell water_height: %f\n", cell->water_height);
printf("water_height_dir: %f %f %f %f\n", water_height_dir.x, water_height_dir.y, water_height_dir.w, water_height_dir.z);
printf("slope dir: %f %f %f %f\n", delta_h_dir.x, delta_h_dir.y, delta_h_dir.w, delta_h_dir.z);
printf("v_dir: %f %f %f %f\n", v_dir.x, v_dir.y, v_dir.w, v_dir.z);
printf("sediment cap: %f, new erosion: %f new deposition: %f\n", sediment_capacity, cell_erosion, new_deposition);
/*printf("height change: %f -> %f : %f\n", oldheight, newheight, newheight-oldheight);*/
printf("slope dir: %f %f %f %f\n", delta_h_dir.x, delta_h_dir.y, delta_h_dir.w, delta_h_dir.z);
printf("water height_dir: %f %f %f %f\n",
water_height_dir.x, water_height_dir.y, water_height_dir.w, water_height_dir.z);
printf("total water flux %f\n", total_water_flux);
printf("water flux_dir: %f %f %f %f\nsediment_flux_dir %f %f %f %f\n", water_flux.x, water_flux.y, water_flux.w, water_flux.z,
sediment_flux.x, sediment_flux.y, sediment_flux.w, sediment_flux.z);
printf("norm water flux: %f %f %f %f\n",
water_flux_norm.x, water_flux_norm.y, water_flux_norm.w, water_flux_norm.z);
printf("\nold water vol: %f -> new water vol %f\n", cell->water_vol*WATER_LOSS, sum(water_flux) + newcell->water_vol);
print_cell(x, y, cell);
print_cell(x - 1, y, &map[left + y * mapDim]);
print_cell(x + 1, y, &map[right + y * mapDim]);
print_cell(x, y - 1, &map[x + up + mapDim]);
print_cell(x, y + 1, &map[x + down * mapDim]);
}
#endif
}
void debug_print_terrain(cellData* map, cellData* old_map, int map_dim) {
#ifdef DEBUG
for (int x = 0; x < map_dim; x++) {
for (int y = 0; y < map_dim; y++) {
if (map[x + map_dim * y].height > old_map[x + y * map_dim].height) {
printf("\033[37;1m%5.2f \033[0m", map[x + map_dim * y].height);
}
else if (map[x + map_dim * y].height < old_map[x + y * map_dim].height) {
printf("\033[30;1m%5.2f \033[0m", map[x + map_dim * y].height);
}
else
printf("%5.2f ", map[x + map_dim * y].height);
}
printf("\n");
}
printf("\033[0m\n");
#endif
}
void debug_print_water(cellData* map, cellData* old_map, int map_dim) {
#ifdef DEBUG
for (int x = 0; x < map_dim; x++) {
for (int y = 0; y < map_dim; y++) {
int index = x + map_dim * y;
if (map[index].water_vol > 0.005)
printf("\033[36m%5.2f \033[0m", map[index].water_height);
else
printf("%5.2f ", map[index].water_height);
}
printf("\n");
}
printf("\033[0m\n");
#endif
}
void debug_print_sediment(cellData* map, cellData* old_map, int map_dim) {
#ifdef DEBUG
for (int x = 0; x < map_dim; x++) {
for (int y = 0; y < map_dim; y++) {
int index = x + map_dim * y;
if (map[index].sediment >= .005) {
printf("\033[33m%5.2f \033[0m", map[index].sediment);
}
else {
printf("%5.2f ", map[index].sediment);
}
}
printf("\n");
}
printf("\033[0m\n");
#endif
}
__device__ void debug_compare_maps(cellData* total_map, cellData* new_total_map, int globalMapDim) {
#ifdef DEBUG
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int index = x + globalMapDim * y;
if (index == 0) {
/*printf("\n\ncell %i %i\n", threadIdx.x, threadIdx.y);*/
float old_volume = 0;
float old_heightsum = 0;
float old_sedsum = 0;
float old_water_vol = 0;
float new_water_volsum = 0;
float new_volume = 0;
float new_heightsum = 0;
float new_sedsum = 0;
for (int j = 0; j<blockDim.y; j++) {
for (int i = 0; i<blockDim.x; i++) {
old_heightsum += total_map[i + j * blockDim.x].height;
old_sedsum += total_map[i + j * blockDim.x].sediment;
old_water_vol += total_map[i + j * blockDim.x].water_vol;
new_heightsum += new_total_map[i + j * blockDim.x].height;
new_sedsum += new_total_map[i + j * blockDim.x].sediment;
new_water_volsum += new_total_map[i + j * blockDim.x].water_vol;
}
}
old_volume = old_heightsum + old_sedsum;
new_volume = new_heightsum + new_sedsum;
printf("old height: %f new height: %f\n", old_heightsum, new_heightsum);
printf("old sed: %f new sed: %f\n", old_sedsum, new_sedsum);
printf("old volume: %f new volume: %f diff:%f\n", old_volume, new_volume, new_volume - old_volume);
printf("old water volume: %f new water volume: %f diff:%f\n", old_water_vol, new_water_volsum, new_water_volsum - old_water_vol);
}
#endif
}
void printMap(float *map, int dim) {
#ifdef DEBUG
for (int x = 0; x < dim; x++) {
for (int y = 0; y < dim; y++) {
printf("%4i ", int(map[x + dim * y]));
}
printf("\n");
}
printf("\n");
#endif
} | def2fb454616555f9c9ccd039aa4e39e662099c8.cu | #include "print_debug.hpp"
#include <stdio.h>
#ifdef DEBUG
#define cudaCheckError(ans) cudaAssert((ans), __FILE__, __LINE__);
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s at %s:%d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
#define cudaCheckError(ans) ans
#endif
__device__ void print_cell(int x, int y, cellData* cell) {
#ifdef DEBUG
printf("Cell %i,%i: h:%f wvol:%f wheght: %f sed:%f",
x, y, cell->height, cell->water_vol, cell->water_height, cell->sediment);
/*printf("sediment cap: %f, new erosion: %f new deposition: %f\n",sediment_capacity,new_erosion,new_deposition);*/
/*printf("water height_dir: %f %f %f %f\n",*/
/*water_height_dir.x, water_height_dir.y, water_height_dir.w, water_height_dir.z);*/
/*printf("water flux_dir: %f %f %f %f\nsediment_mov_dir %f %f %f %f\n", water_flux.x, water_flux.y, water_flux.w, water_flux.z,*/
/*sediment_movement_dir.x, sediment_movement_dir.y, sediment_movement_dir.w, sediment_movement_dir.z);*/
/*printf("height %f -> %f\n", total_map[index].height, new_total_map[index].height);*/
#endif
}
__device__ void debug_erode_dump(int print_ind, int mapDim, int globalMapDim, cellData* map, cellData* cell, cellData* newcell,
float sediment_capacity, float cell_erosion, float new_deposition,
float4 water_height_dir, float4 delta_h_dir, float4 v_dir,
float4 water_flux, float4 water_flux_norm, float4 total_water_flux, float4 sediment_flux)
{
#ifdef DEBUG
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int index = x + globalMapDim * y;
int left = max(0, x - 1);
int right = min(x + 1, mapDim - 1);
int up = max(0, y - 1);
int down = min(y + 1, mapDim - 1);
if (index == print_ind) {
printf("\n");
print_cell(x, y, cell);
print_cell(x, y, newcell);
printf("\ncell water_height: %f\n", cell->water_height);
printf("water_height_dir: %f %f %f %f\n", water_height_dir.x, water_height_dir.y, water_height_dir.w, water_height_dir.z);
printf("slope dir: %f %f %f %f\n", delta_h_dir.x, delta_h_dir.y, delta_h_dir.w, delta_h_dir.z);
printf("v_dir: %f %f %f %f\n", v_dir.x, v_dir.y, v_dir.w, v_dir.z);
printf("sediment cap: %f, new erosion: %f new deposition: %f\n", sediment_capacity, cell_erosion, new_deposition);
/*printf("height change: %f -> %f : %f\n", oldheight, newheight, newheight-oldheight);*/
printf("slope dir: %f %f %f %f\n", delta_h_dir.x, delta_h_dir.y, delta_h_dir.w, delta_h_dir.z);
printf("water height_dir: %f %f %f %f\n",
water_height_dir.x, water_height_dir.y, water_height_dir.w, water_height_dir.z);
printf("total water flux %f\n", total_water_flux);
printf("water flux_dir: %f %f %f %f\nsediment_flux_dir %f %f %f %f\n", water_flux.x, water_flux.y, water_flux.w, water_flux.z,
sediment_flux.x, sediment_flux.y, sediment_flux.w, sediment_flux.z);
printf("norm water flux: %f %f %f %f\n",
water_flux_norm.x, water_flux_norm.y, water_flux_norm.w, water_flux_norm.z);
printf("\nold water vol: %f -> new water vol %f\n", cell->water_vol*WATER_LOSS, sum(water_flux) + newcell->water_vol);
print_cell(x, y, cell);
print_cell(x - 1, y, &map[left + y * mapDim]);
print_cell(x + 1, y, &map[right + y * mapDim]);
print_cell(x, y - 1, &map[x + up + mapDim]);
print_cell(x, y + 1, &map[x + down * mapDim]);
}
#endif
}
void debug_print_terrain(cellData* map, cellData* old_map, int map_dim) {
#ifdef DEBUG
for (int x = 0; x < map_dim; x++) {
for (int y = 0; y < map_dim; y++) {
if (map[x + map_dim * y].height > old_map[x + y * map_dim].height) {
printf("\033[37;1m%5.2f \033[0m", map[x + map_dim * y].height);
}
else if (map[x + map_dim * y].height < old_map[x + y * map_dim].height) {
printf("\033[30;1m%5.2f \033[0m", map[x + map_dim * y].height);
}
else
printf("%5.2f ", map[x + map_dim * y].height);
}
printf("\n");
}
printf("\033[0m\n");
#endif
}
void debug_print_water(cellData* map, cellData* old_map, int map_dim) {
#ifdef DEBUG
for (int x = 0; x < map_dim; x++) {
for (int y = 0; y < map_dim; y++) {
int index = x + map_dim * y;
if (map[index].water_vol > 0.005)
printf("\033[36m%5.2f \033[0m", map[index].water_height);
else
printf("%5.2f ", map[index].water_height);
}
printf("\n");
}
printf("\033[0m\n");
#endif
}
void debug_print_sediment(cellData* map, cellData* old_map, int map_dim) {
#ifdef DEBUG
for (int x = 0; x < map_dim; x++) {
for (int y = 0; y < map_dim; y++) {
int index = x + map_dim * y;
if (map[index].sediment >= .005) {
printf("\033[33m%5.2f \033[0m", map[index].sediment);
}
else {
printf("%5.2f ", map[index].sediment);
}
}
printf("\n");
}
printf("\033[0m\n");
#endif
}
__device__ void debug_compare_maps(cellData* total_map, cellData* new_total_map, int globalMapDim) {
#ifdef DEBUG
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int index = x + globalMapDim * y;
if (index == 0) {
/*printf("\n\ncell %i %i\n", threadIdx.x, threadIdx.y);*/
float old_volume = 0;
float old_heightsum = 0;
float old_sedsum = 0;
float old_water_vol = 0;
float new_water_volsum = 0;
float new_volume = 0;
float new_heightsum = 0;
float new_sedsum = 0;
for (int j = 0; j<blockDim.y; j++) {
for (int i = 0; i<blockDim.x; i++) {
old_heightsum += total_map[i + j * blockDim.x].height;
old_sedsum += total_map[i + j * blockDim.x].sediment;
old_water_vol += total_map[i + j * blockDim.x].water_vol;
new_heightsum += new_total_map[i + j * blockDim.x].height;
new_sedsum += new_total_map[i + j * blockDim.x].sediment;
new_water_volsum += new_total_map[i + j * blockDim.x].water_vol;
}
}
old_volume = old_heightsum + old_sedsum;
new_volume = new_heightsum + new_sedsum;
printf("old height: %f new height: %f\n", old_heightsum, new_heightsum);
printf("old sed: %f new sed: %f\n", old_sedsum, new_sedsum);
printf("old volume: %f new volume: %f diff:%f\n", old_volume, new_volume, new_volume - old_volume);
printf("old water volume: %f new water volume: %f diff:%f\n", old_water_vol, new_water_volsum, new_water_volsum - old_water_vol);
}
#endif
}
void printMap(float *map, int dim) {
#ifdef DEBUG
for (int x = 0; x < dim; x++) {
for (int y = 0; y < dim; y++) {
printf("%4i ", int(map[x + dim * y]));
}
printf("\n");
}
printf("\n");
#endif
} |
3f84810741518f82a4a7bee8cf6c28d1d95474e2.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _HEADER_H_
#define _HEADER_H_
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <limits.h> //rand
#include <assert.h>
#include <math.h> //ceil
#include "cutil.h"
/////////////////////////////////////////////////////////////////////////defines
#ifndef mymax
#define mymax(a,b) (((a) > (b)) ? (a) : (b))
#endif
#ifndef mymin
#define mymin(a,b) (((a) < (b)) ? (a) : (b))
#endif
#define SAFE_FREE(p) {if(p) {free(p); (p)=NULL;} };
#define SAFE_CUDA_FREE(p) {if(p) {CUDA_SAFE_CALL(hipFree(p)); (p)=NULL;} };
#define IntCeilDiv(a, b) ( (int)ceilf((a) / float(b)) )
///////////////////////////////////////general define
//debug?
#define _bDebug 0
#ifdef BINARY_SEARCH_HASH
#define _bSortPart 1 //sort each partition, so probe will bisearch rather than scan the part
#else
#define _bSortPart 0 //sort each partition, so probe will bisearch rather than scan the part
#endif
#define _maxPartLen (_bDebug? 2: 512) //max partition length. Limited by shared memory size (4k to be safe): sizeof(Rec) * maxPartLen <= 4k
#define HASH(v) (_bDebug ? ((unsigned int) v) : ((unsigned int)( (v >> 7) ^ (v >> 13) ^ (v >>21) ^ (v) )) )
#endif
| 3f84810741518f82a4a7bee8cf6c28d1d95474e2.cu |
#ifndef _HEADER_H_
#define _HEADER_H_
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <limits.h> //rand
#include <assert.h>
#include <math.h> //ceil
#include "cutil.h"
/////////////////////////////////////////////////////////////////////////defines
#ifndef mymax
#define mymax(a,b) (((a) > (b)) ? (a) : (b))
#endif
#ifndef mymin
#define mymin(a,b) (((a) < (b)) ? (a) : (b))
#endif
#define SAFE_FREE(p) {if(p) {free(p); (p)=NULL;} };
#define SAFE_CUDA_FREE(p) {if(p) {CUDA_SAFE_CALL(cudaFree(p)); (p)=NULL;} };
#define IntCeilDiv(a, b) ( (int)ceilf((a) / float(b)) )
///////////////////////////////////////general define
//debug?
#define _bDebug 0
#ifdef BINARY_SEARCH_HASH
#define _bSortPart 1 //sort each partition, so probe will bisearch rather than scan the part
#else
#define _bSortPart 0 //sort each partition, so probe will bisearch rather than scan the part
#endif
#define _maxPartLen (_bDebug? 2: 512) //max partition length. Limited by shared memory size (4k to be safe): sizeof(Rec) * maxPartLen <= 4k
#define HASH(v) (_bDebug ? ((unsigned int) v) : ((unsigned int)( (v >> 7) ^ (v >> 13) ^ (v >>21) ^ (v) )) )
#endif
|
cf9a6562063b7a1f70058776bcdf70e6dbb52c26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define NVFUSER_DEFINE_MAGIC_ZERO \
__shared__ int nvfuser_zero_s; \
if (threadIdx.x == 0) \
nvfuser_zero_s = 0; \
__syncthreads(); \
atomicMin(&nvfuser_zero_s, threadIdx.x); \
int nvfuser_zero = nvfuser_zero_s;
#define NVFUSER_UPDATE_MAGIC_ZERO \
do { \
nvfuser_zero <<= 1; \
} while (0);
#ifdef __NVCC__
#include <assert.h>
#endif // __NVCC__
__device__ constexpr int ceilDiv(int a, int b) {
return (a + b - 1) / b;
}
__device__ constexpr int64_t ceilDiv(int64_t a, int64_t b) {
return (a + b - 1) / b;
}
__device__ constexpr int64_t ceilDiv(int64_t a, int b) {
return ceilDiv(a, (int64_t)b);
}
__device__ constexpr int64_t ceilDiv(int a, int64_t b) {
return ceilDiv((int64_t)a, b);
}
__device__ constexpr double ceilDiv(double a, double b) {
return ::ceil(a / b);
}
__device__ constexpr double ceilDiv(double a, int64_t b) {
return ::ceil(a / b);
}
__device__ constexpr double ceilDiv(int64_t a, double b) {
return ::ceil(a / b);
}
// Monotonic and precise lerp is described here:
// https://math.stackexchange.com/a/1798323
__device__ double lerp(double start, double end, double weight) {
if (weight < 0.5) {
return start + weight * (end - start);
} else {
return end - (end - start) * (1.0 - weight);
}
}
__device__ float lerp(float start, float end, float weight) {
if (weight < 0.5f) {
return start + weight * (end - start);
} else {
return end - (end - start) * (1.0f - weight);
}
}
__device__ float lerp(float start, float end, double weight) {
return lerp(start, end, static_cast<float>(weight));
}
__device__ constexpr int max(int a, int b) {
return a > b ? a : b;
}
__device__ constexpr int64_t max(int64_t a, int b) {
return a > (int64_t)b ? a : (int64_t)b;
}
__device__ constexpr int64_t max(int a, int64_t b) {
return (int64_t)a > b ? (int64_t)a : b;
}
__device__ constexpr int64_t max(int64_t a, int64_t b) {
return a > b ? a : b;
}
__device__ double fmax(double a, double b) {
// check and propagate NaN
if (a != a) {
return a;
} else if (b != b) {
return b;
} else {
return a > b ? a : b;
}
}
__device__ float fmax(float a, float b) {
// check and propagate NaN
if (a != a) {
return a;
} else if (b != b) {
return b;
} else {
return a > b ? a : b;
}
}
__device__ constexpr int min(int a, int b) {
return a > b ? b : a;
}
__device__ constexpr int64_t min(int64_t a, int b) {
return (int64_t)a > b ? b : (int64_t)a;
}
__device__ constexpr int64_t min(int a, int64_t b) {
return a > (int64_t)b ? (int64_t)b : a;
}
__device__ constexpr int64_t min(int64_t a, int64_t b) {
return a > b ? b : a;
}
__device__ double fmin(double a, double b) {
// check and propagate NaN
if (a != a) {
return a;
} else if (b != b) {
return b;
} else {
return a > b ? b : a;
}
}
__device__ float fmin(float a, float b) {
// check and propagate NaN
if (a != a) {
return a;
} else if (b != b) {
return b;
} else {
return a > b ? b : a;
}
}
__device__ constexpr int alignBufferSize(int buffer, int size) {
return (buffer + (size - 1)) & ~(size - 1);
}
__device__ double clamp(double x, double minv, double maxv) {
return fmin(fmax(x, minv), maxv);
}
__device__ float clamp(float x, double minv, double maxv) {
return fmin(fmax((double)x, minv), maxv);
}
__device__ int clamp(int x, int64_t minv, int64_t maxv) {
return min(max((int64_t)x, minv), maxv);
}
__device__ int64_t clamp(int64_t x, int64_t minv, int64_t maxv) {
return min(max(x, minv), maxv);
}
__device__ double frac(double x) {
return x - trunc(x);
}
__device__ float frac(float x) {
return x - trunc(x);
}
__device__ double reciprocal(double x) {
return 1 / x;
}
__device__ float reciprocal(float x) {
return 1 / x;
}
__device__ double relu(double x) {
return x <= 0 ? 0 : x;
}
__device__ float relu(float x) {
return x <= 0 ? 0 : x;
}
__device__ float relu(int64_t x) {
return x <= 0 ? 0 : x;
}
__device__ float relu(int x) {
return x <= 0 ? 0 : x;
}
__device__ double remainder(double a, double b) {
auto mod = ::fmod(a, b);
if ((mod != 0) && ((b < 0) != (mod < 0)))
mod += b;
return mod;
}
__device__ float remainder(float a, float b) {
auto mod = ::fmod(a, b);
if ((mod != 0) && ((b < 0) != (mod < 0)))
mod += b;
return mod;
}
__device__ double sigmoid(double x) {
return 1.0 / (1.0 + exp(-x));
}
__device__ float sigmoid(float x) {
return 1.0f / (1.0f + exp(-x));
}
__device__ double silu(double x) {
return x * sigmoid(x);
}
__device__ float silu(float x) {
return x * sigmoid(x);
}
__device__ double threshold(double x, double t, double v) {
return x <= t ? v : x;
}
__device__ float threshold(float x, double t, double v) {
return x <= t ? v : x;
}
__device__ int threshold(int x, int64_t t, int64_t v) {
return x <= t ? v : x;
}
__device__ int64_t threshold(int64_t x, int64_t t, int64_t v) {
return x <= t ? v : x;
}
__device__ constexpr int64_t remainder(int64_t a, int64_t b) {
auto mod = a % b;
if ((mod != 0) && ((b < 0) != (mod < 0)))
mod += b;
return mod;
}
__device__ constexpr int remainder(int a, int b) {
auto mod = a % b;
if ((mod != 0) && ((b < 0) != (mod < 0)))
mod += b;
return mod;
}
__device__ constexpr int64_t fmod(int64_t a, int64_t b) {
return a % b;
}
__device__ constexpr int fmod(int a, int b) {
return a % b;
}
__device__ constexpr double fmod(double a, double b) {
return ::fmod(a, b);
}
__device__ constexpr float fmod(float a, float b) {
return ::fmod(a, b);
}
template <typename T>
__device__ T pow(T a, T b) {
if (b < 0) {
if (a == 1) {
return 1;
} else if (a == -1) {
auto negative = (-b) % static_cast<T>(2);
return negative ? -1 : 1;
} else {
return 0;
}
} else {
T result = 1;
while (b) {
if (b & 1) {
result *= a;
}
b /= 2;
a *= a;
}
return result;
}
}
template __device__ int pow<int>(int a, int b);
template __device__ int64_t pow<int64_t>(int64_t a, int64_t b);
template <>
__device__ float pow<float>(float a, float b) {
return ::pow(a, b);
}
template <>
__device__ double pow<double>(double a, double b) {
return ::pow(a, b);
}
__device__ float pow(float a, int b) {
return pow(a, (float)b);
}
__device__ double pow(double a, int b) {
return pow(a, (double)b);
}
__device__ float pow(float a, int64_t b) {
return pow(a, (float)b);
}
__device__ double pow(double a, int64_t b) {
return pow(a, (double)b);
}
__device__ int64_t pow(int64_t a, int b) {
return pow(a, (int64_t)b);
}
__device__ int64_t pow(int a, int64_t b) {
return pow((int64_t)a, b);
}
__device__ double rsqrt(double z) {
return ::rsqrt(z);
}
__device__ float rsqrt(float z) {
return ::rsqrtf(z);
}
__device__ int rsqrt(int z) {
return ::rsqrtf((float)z);
}
__device__ int64_t rsqrt(int64_t z) {
return ::rsqrt((double)z);
}
template <int size, int align = size>
struct alignas(align) TypelessData {
int8_t data[size];
template <typename T, std::enable_if_t<sizeof(T) == size, int> _ = 0>
TypelessData(T x) {
*reinterpret_cast<T*>(data) = x;
}
template <typename T, std::enable_if_t<sizeof(T) == size, int> _ = 0>
operator T() {
return *reinterpret_cast<T*>(data);
}
};
template <typename T>
TypelessData<sizeof(T), alignof(T)> erase_type(T x) {
return x;
}
template <typename T>
bool isfinite(T x) {
return ::isfinite(x);
}
template <typename T>
bool isinf(T x) {
return ::isinf(x);
}
////////////////////////////////////////////////////////////
// TODO: the following overloads are only needed for CUDA //
// 10.2 Please remove when CUDA 10.2 support is dropped //
////////////////////////////////////////////////////////////
bool isinf(int64_t x) {
return false;
}
bool isinf(int x) {
return false;
}
bool isinf(short x) {
return false;
}
bool isinf(char x) {
return false;
}
bool isinf(unsigned char x) {
return false;
}
bool isinf(bool x) {
return false;
}
bool isfinite(int64_t x) {
return true;
}
bool isfinite(int x) {
return true;
}
bool isfinite(short x) {
return true;
}
bool isfinite(char x) {
return true;
}
bool isfinite(unsigned char x) {
return true;
}
bool isfinite(bool x) {
return true;
}
////////////////////////////////////////////////////////////
// End TODO //
////////////////////////////////////////////////////////////
template <typename T>
bool isnan(T x) {
return x != x;
}
template <typename T>
bool isneginf(T x) {
return x < 0 && isinf(x);
}
template <typename T>
bool isposinf(T x) {
return x > 0 && isinf(x);
}
template <typename T>
bool isreal(T x) {
return true;
}
// Return the current value of the cycle counter
__device__ inline int64_t readCycleCounter() {
// Ensures preceding memory operations are completed. Doing this
// would make sense for measuring elapsed times enclosed with this
// function.
__threadfence();
return clock64();
}
__device__ float print_impl(const char* name, float value) {
printf(
"%s = %f @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
value,
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
__device__ double print_impl(const char* name, double value) {
printf(
"%s = %lf @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
value,
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
__device__ int print_impl(const char* name, int value) {
printf(
"%s = %d @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
value,
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
__device__ int64_t print_impl(const char* name, int64_t value) {
printf(
"%s = %ld @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
value,
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
__device__ bool print_impl(const char* name, bool value) {
printf(
"%s = %s @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
value ? "true" : "false",
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
__device__ __half print_impl(const char* name, __half value) {
printf(
"%s = %f @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
__half2float(value),
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
#if __CUDACC_VER_MAJOR__ >= 11
__device__ __bfloat print_impl(const char* name, __bfloat value) {
printf(
"%s = %f @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
__bfloat2float(value),
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
#endif
#define print(...) print_impl(#__VA_ARGS__, (__VA_ARGS__))
| cf9a6562063b7a1f70058776bcdf70e6dbb52c26.cu | #define NVFUSER_DEFINE_MAGIC_ZERO \
__shared__ int nvfuser_zero_s; \
if (threadIdx.x == 0) \
nvfuser_zero_s = 0; \
__syncthreads(); \
atomicMin(&nvfuser_zero_s, threadIdx.x); \
int nvfuser_zero = nvfuser_zero_s;
#define NVFUSER_UPDATE_MAGIC_ZERO \
do { \
nvfuser_zero <<= 1; \
} while (0);
#ifdef __NVCC__
#include <assert.h>
#endif // __NVCC__
__device__ constexpr int ceilDiv(int a, int b) {
return (a + b - 1) / b;
}
__device__ constexpr int64_t ceilDiv(int64_t a, int64_t b) {
return (a + b - 1) / b;
}
__device__ constexpr int64_t ceilDiv(int64_t a, int b) {
return ceilDiv(a, (int64_t)b);
}
__device__ constexpr int64_t ceilDiv(int a, int64_t b) {
return ceilDiv((int64_t)a, b);
}
__device__ constexpr double ceilDiv(double a, double b) {
return std::ceil(a / b);
}
__device__ constexpr double ceilDiv(double a, int64_t b) {
return std::ceil(a / b);
}
__device__ constexpr double ceilDiv(int64_t a, double b) {
return std::ceil(a / b);
}
// Monotonic and precise lerp is described here:
// https://math.stackexchange.com/a/1798323
__device__ double lerp(double start, double end, double weight) {
if (weight < 0.5) {
return start + weight * (end - start);
} else {
return end - (end - start) * (1.0 - weight);
}
}
__device__ float lerp(float start, float end, float weight) {
if (weight < 0.5f) {
return start + weight * (end - start);
} else {
return end - (end - start) * (1.0f - weight);
}
}
__device__ float lerp(float start, float end, double weight) {
return lerp(start, end, static_cast<float>(weight));
}
__device__ constexpr int max(int a, int b) {
return a > b ? a : b;
}
__device__ constexpr int64_t max(int64_t a, int b) {
return a > (int64_t)b ? a : (int64_t)b;
}
__device__ constexpr int64_t max(int a, int64_t b) {
return (int64_t)a > b ? (int64_t)a : b;
}
__device__ constexpr int64_t max(int64_t a, int64_t b) {
return a > b ? a : b;
}
__device__ double fmax(double a, double b) {
// check and propagate NaN
if (a != a) {
return a;
} else if (b != b) {
return b;
} else {
return a > b ? a : b;
}
}
__device__ float fmax(float a, float b) {
// check and propagate NaN
if (a != a) {
return a;
} else if (b != b) {
return b;
} else {
return a > b ? a : b;
}
}
__device__ constexpr int min(int a, int b) {
return a > b ? b : a;
}
__device__ constexpr int64_t min(int64_t a, int b) {
return (int64_t)a > b ? b : (int64_t)a;
}
__device__ constexpr int64_t min(int a, int64_t b) {
return a > (int64_t)b ? (int64_t)b : a;
}
__device__ constexpr int64_t min(int64_t a, int64_t b) {
return a > b ? b : a;
}
__device__ double fmin(double a, double b) {
// check and propagate NaN
if (a != a) {
return a;
} else if (b != b) {
return b;
} else {
return a > b ? b : a;
}
}
__device__ float fmin(float a, float b) {
// check and propagate NaN
if (a != a) {
return a;
} else if (b != b) {
return b;
} else {
return a > b ? b : a;
}
}
__device__ constexpr int alignBufferSize(int buffer, int size) {
return (buffer + (size - 1)) & ~(size - 1);
}
__device__ double clamp(double x, double minv, double maxv) {
return fmin(fmax(x, minv), maxv);
}
__device__ float clamp(float x, double minv, double maxv) {
return fmin(fmax((double)x, minv), maxv);
}
__device__ int clamp(int x, int64_t minv, int64_t maxv) {
return min(max((int64_t)x, minv), maxv);
}
__device__ int64_t clamp(int64_t x, int64_t minv, int64_t maxv) {
return min(max(x, minv), maxv);
}
__device__ double frac(double x) {
return x - trunc(x);
}
__device__ float frac(float x) {
return x - trunc(x);
}
__device__ double reciprocal(double x) {
return 1 / x;
}
__device__ float reciprocal(float x) {
return 1 / x;
}
__device__ double relu(double x) {
return x <= 0 ? 0 : x;
}
__device__ float relu(float x) {
return x <= 0 ? 0 : x;
}
__device__ float relu(int64_t x) {
return x <= 0 ? 0 : x;
}
__device__ float relu(int x) {
return x <= 0 ? 0 : x;
}
__device__ double remainder(double a, double b) {
auto mod = ::fmod(a, b);
if ((mod != 0) && ((b < 0) != (mod < 0)))
mod += b;
return mod;
}
__device__ float remainder(float a, float b) {
auto mod = ::fmod(a, b);
if ((mod != 0) && ((b < 0) != (mod < 0)))
mod += b;
return mod;
}
__device__ double sigmoid(double x) {
return 1.0 / (1.0 + exp(-x));
}
__device__ float sigmoid(float x) {
return 1.0f / (1.0f + exp(-x));
}
__device__ double silu(double x) {
return x * sigmoid(x);
}
__device__ float silu(float x) {
return x * sigmoid(x);
}
__device__ double threshold(double x, double t, double v) {
return x <= t ? v : x;
}
__device__ float threshold(float x, double t, double v) {
return x <= t ? v : x;
}
__device__ int threshold(int x, int64_t t, int64_t v) {
return x <= t ? v : x;
}
__device__ int64_t threshold(int64_t x, int64_t t, int64_t v) {
return x <= t ? v : x;
}
__device__ constexpr int64_t remainder(int64_t a, int64_t b) {
auto mod = a % b;
if ((mod != 0) && ((b < 0) != (mod < 0)))
mod += b;
return mod;
}
__device__ constexpr int remainder(int a, int b) {
auto mod = a % b;
if ((mod != 0) && ((b < 0) != (mod < 0)))
mod += b;
return mod;
}
__device__ constexpr int64_t fmod(int64_t a, int64_t b) {
return a % b;
}
__device__ constexpr int fmod(int a, int b) {
return a % b;
}
__device__ constexpr double fmod(double a, double b) {
return ::fmod(a, b);
}
__device__ constexpr float fmod(float a, float b) {
return ::fmod(a, b);
}
template <typename T>
__device__ T pow(T a, T b) {
if (b < 0) {
if (a == 1) {
return 1;
} else if (a == -1) {
auto negative = (-b) % static_cast<T>(2);
return negative ? -1 : 1;
} else {
return 0;
}
} else {
T result = 1;
while (b) {
if (b & 1) {
result *= a;
}
b /= 2;
a *= a;
}
return result;
}
}
template __device__ int pow<int>(int a, int b);
template __device__ int64_t pow<int64_t>(int64_t a, int64_t b);
template <>
__device__ float pow<float>(float a, float b) {
return ::pow(a, b);
}
template <>
__device__ double pow<double>(double a, double b) {
return ::pow(a, b);
}
__device__ float pow(float a, int b) {
return pow(a, (float)b);
}
__device__ double pow(double a, int b) {
return pow(a, (double)b);
}
__device__ float pow(float a, int64_t b) {
return pow(a, (float)b);
}
__device__ double pow(double a, int64_t b) {
return pow(a, (double)b);
}
__device__ int64_t pow(int64_t a, int b) {
return pow(a, (int64_t)b);
}
__device__ int64_t pow(int a, int64_t b) {
return pow((int64_t)a, b);
}
__device__ double rsqrt(double z) {
return ::rsqrt(z);
}
__device__ float rsqrt(float z) {
return ::rsqrtf(z);
}
__device__ int rsqrt(int z) {
return ::rsqrtf((float)z);
}
__device__ int64_t rsqrt(int64_t z) {
return ::rsqrt((double)z);
}
template <int size, int align = size>
struct alignas(align) TypelessData {
int8_t data[size];
template <typename T, std::enable_if_t<sizeof(T) == size, int> _ = 0>
TypelessData(T x) {
*reinterpret_cast<T*>(data) = x;
}
template <typename T, std::enable_if_t<sizeof(T) == size, int> _ = 0>
operator T() {
return *reinterpret_cast<T*>(data);
}
};
template <typename T>
TypelessData<sizeof(T), alignof(T)> erase_type(T x) {
return x;
}
template <typename T>
bool isfinite(T x) {
return ::isfinite(x);
}
template <typename T>
bool isinf(T x) {
return ::isinf(x);
}
////////////////////////////////////////////////////////////
// TODO: the following overloads are only needed for CUDA //
// 10.2 Please remove when CUDA 10.2 support is dropped //
////////////////////////////////////////////////////////////
bool isinf(int64_t x) {
return false;
}
bool isinf(int x) {
return false;
}
bool isinf(short x) {
return false;
}
bool isinf(char x) {
return false;
}
bool isinf(unsigned char x) {
return false;
}
bool isinf(bool x) {
return false;
}
bool isfinite(int64_t x) {
return true;
}
bool isfinite(int x) {
return true;
}
bool isfinite(short x) {
return true;
}
bool isfinite(char x) {
return true;
}
bool isfinite(unsigned char x) {
return true;
}
bool isfinite(bool x) {
return true;
}
////////////////////////////////////////////////////////////
// End TODO //
////////////////////////////////////////////////////////////
template <typename T>
bool isnan(T x) {
return x != x;
}
template <typename T>
bool isneginf(T x) {
return x < 0 && isinf(x);
}
template <typename T>
bool isposinf(T x) {
return x > 0 && isinf(x);
}
template <typename T>
bool isreal(T x) {
return true;
}
// Return the current value of the cycle counter
__device__ inline int64_t readCycleCounter() {
// Ensures preceding memory operations are completed. Doing this
// would make sense for measuring elapsed times enclosed with this
// function.
__threadfence();
return clock64();
}
__device__ float print_impl(const char* name, float value) {
printf(
"%s = %f @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
value,
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
__device__ double print_impl(const char* name, double value) {
printf(
"%s = %lf @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
value,
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
__device__ int print_impl(const char* name, int value) {
printf(
"%s = %d @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
value,
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
__device__ int64_t print_impl(const char* name, int64_t value) {
printf(
"%s = %ld @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
value,
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
__device__ bool print_impl(const char* name, bool value) {
printf(
"%s = %s @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
value ? "true" : "false",
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
__device__ __half print_impl(const char* name, __half value) {
printf(
"%s = %f @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
__half2float(value),
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
#if __CUDACC_VER_MAJOR__ >= 11
__device__ __bfloat print_impl(const char* name, __bfloat value) {
printf(
"%s = %f @ threadIdx=(%d,%d,%d), blockIdx=(%d,%d,%d)\n",
name,
__bfloat2float(value),
(int)threadIdx.x,
(int)threadIdx.y,
(int)threadIdx.z,
(int)blockIdx.x,
(int)blockIdx.y,
(int)blockIdx.z);
return value;
}
#endif
#define print(...) print_impl(#__VA_ARGS__, (__VA_ARGS__))
|
393034518aea2bdbdf5442892074977e5abc13eb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************
* Copyright (C) 2011 by Soumith Chintala*
* [email protected] *
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Redistribution under a license not approved by the Open Source
* Initiative (http://www.opensource.org) must display the
* following acknowledgement in all advertising material:
* This product includes software developed at the Courant
* Institute of Mathematical Sciences (http://cims.nyu.edu).
* * The names of the authors may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ThE AUTHORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include "ebl_cudaops.h"
#ifdef __CUDA__
// #include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
// #include <thrust/reduce.h>
// #include <thrust/inner_product.h>
namespace ebl {
#define CUDA_SHARED_MEM_SIZE (4*1024-32)
// this is given by nVidia: max shared mem per block
/*
* Description:
* base conv2D routine: 3D input, 3D output, 4D kernel
*
* - all chunks of data should be contiguous
* - the swapkernel flag can be used to generate a conv2 instead of xcorr2
* - the templated kernel size is useful to generate code that's 2x faster
* but can be set to 0 to allow arbitrary kernel sizes
*/
template <bool swapkernel, int T_kernel_h, int T_kernel_w>
__global__ void conv2generic(float *input, float *kernel, float *output,
int input_n, int input_h, int input_w,
int kernel_n, int kernel_h, int kernel_w,
int stride_h, int stride_w) {
// output dimensions
int output_h = (input_h - kernel_h) / stride_h + 1;
int output_w = (input_w - kernel_w) / stride_w + 1;
// xcorr or conv
int koffset = swapkernel ? kernel_w*kernel_h-1 : 0;
// nb outputs
int output_n = kernel_n / input_n;
// generate offsets according to block/thread ids
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
int oo_start = blockIdx.x;
int oo_end = oo_start+1;
int ii_start = (blockIdx.x / output_n) * input_n;
int ii_end = ii_start + input_n;
// nb threads, unique thread id
int tid = blockDim.x*blockDim.y*threadIdx.z + blockDim.x * threadIdx.y
+ threadIdx.x;
int nthreads = blockDim.x * blockDim.y * blockDim.z;
// iterators
int oo, ii, xx, yy, kx, ky, kk;
// do the kernels fit in shared mem ?
if (input_n*kernel_w*kernel_h <= CUDA_SHARED_MEM_SIZE) {
// put the kernel in shared memory
__shared__ float shared_kernel[CUDA_SHARED_MEM_SIZE];
// first thread of each block does the copy
for (kk = tid; kk < kernel_w*kernel_h*input_n; kk += nthreads) {
shared_kernel[kk] = kernel[input_n*kernel_w*kernel_h
*(oo_start % output_n) + kk];
}
__syncthreads();
// templated kernel size
if ((T_kernel_w > 0) && (T_kernel_h > 0)) {
// unrolled convolution loop
for(oo = oo_start; oo < oo_end; oo++) {
for(ii = ii_start; ii < ii_end; ii++) {
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Dot product in two dimensions... (between input image and the mask)
float *input_p = input + ii*input_h*input_w
+ yy*stride_h*input_w + xx*stride_w;
float *output_p = output + oo*output_h*output_w + yy*output_w
+ xx;
float *kernel_p = shared_kernel + (ii % input_n)*kernel_w*kernel_h
+ koffset;
float sum = 0;
if (swapkernel) {
#pragma unroll
for(ky = 0; ky < T_kernel_h; ky++) {
#pragma unroll
for(kx = 0; kx < T_kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p--);
}
input_p += input_w;
}
} else {
#pragma unroll
for(ky = 0; ky < T_kernel_h; ky++) {
#pragma unroll
for(kx = 0; kx < T_kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p++);
}
input_p += input_w;
}
}
*output_p += sum;
}
}
}
}
} else {
// default convolution loop
for(oo = oo_start; oo < oo_end; oo++) {
for(ii = ii_start; ii < ii_end; ii++) {
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Dot product in two dimensions... (between input image and the mask)
float *input_p = input + ii*input_h*input_w
+ yy*stride_h*input_w + xx*stride_w;
float *output_p = output + oo*output_h*output_w + yy*output_w
+ xx;
float *kernel_p = shared_kernel + (ii % input_n)
* kernel_w * kernel_h + koffset;
float sum = 0;
if (swapkernel) {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p--);
}
input_p += input_w;
}
} else {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p++);
}
input_p += input_w;
}
}
*output_p += sum;
}
}
}
}
}
} else { // not enough shared mem for kernels, simply stream them
// convolution loop
for(oo = oo_start; oo < oo_end; oo++) {
for(ii = ii_start; ii < ii_end; ii++) {
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Dot product in two dimensions... (between input image and the mask)
float *input_p = input + ii*input_h*input_w
+ yy*stride_h*input_w + xx*stride_w;
float *output_p = output + oo*output_h*output_w + yy*output_w
+ xx;
float *kernel_p = kernel + ((oo % output_n) * input_n
+ (ii % input_n))*kernel_w*kernel_h
+ koffset;
float sum = 0;
if (swapkernel) {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p--);
}
input_p += input_w;
}
} else {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p++);
}
input_p += input_w;
}
}
*output_p += sum;
}
}
}
}
}
}
void ebl::cuda_convolution_3d(idx<float32> &in, idx<float32> &ker,
idx<float32> &out,
intg stride_x, intg stride_y, int devid) {
if (!in.contiguousp() || !ker.contiguousp() ||
!out.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
hipSetDevice(devid);
// copy input and kernels to gpu, allocate output on gpu
hipError_t err;
float *input_data, *kernel_data, *output_data;
LOCAL_TIMING_START();
hipMalloc((void**) &input_data, in.nelements() * sizeof(float));
hipMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
hipMemcpyHostToDevice);
hipMalloc((void**) &kernel_data, ker.nelements() * sizeof(float));
hipMemcpy( kernel_data, ker.idx_ptr(), ker.nelements() * sizeof(float),
hipMemcpyHostToDevice);
hipMalloc((void**) &output_data, out.nelements() * sizeof(float));
hipMemset(output_data, 0, out.nelements() * sizeof(float));
// set the number of blocks and threads
int nthreads_x = 32;
int nthreads_y = 8;
int block_height = floor(16 / out.dim(0));
if (block_height < 1)
block_height = 1;
dim3 blocks(out.dim(0),block_height);
dim3 threads(nthreads_x,nthreads_y);
// sync any previous kernel exec
hipDeviceSynchronize();
LOCAL_TIMING_REPORT("convgpu initial transfer time" << in.dim(0));
LOCAL_TIMING2_START();
if ((ker.dim(2) == 3) && (ker.dim(1) == 3))
hipLaunchKernelGGL(( conv2generic <false, 3, 3>) , dim3(blocks), dim3(threads), 0, 0, input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 5) && (ker.dim(1) == 5))
hipLaunchKernelGGL(( conv2generic <false, 5, 5>) , dim3(blocks), dim3(threads), 0, 0, input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 7) && (ker.dim(1) == 7))
hipLaunchKernelGGL(( conv2generic <false, 7, 7>) , dim3(blocks), dim3(threads), 0, 0, input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 9) && (ker.dim(1) == 9))
hipLaunchKernelGGL(( conv2generic <false, 9, 9>) , dim3(blocks), dim3(threads), 0, 0, input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 11) && (ker.dim(1) == 11))
hipLaunchKernelGGL(( conv2generic <false, 11, 11>) , dim3(blocks), dim3(threads), 0, 0, input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 13) && (ker.dim(1) == 13))
hipLaunchKernelGGL(( conv2generic <false, 13, 13>) , dim3(blocks), dim3(threads), 0, 0, input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 4) && (ker.dim(1) == 4))
hipLaunchKernelGGL(( conv2generic <false, 4, 4>) , dim3(blocks), dim3(threads), 0, 0, input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 6) && (ker.dim(1) == 6))
hipLaunchKernelGGL(( conv2generic <false, 6, 6>) , dim3(blocks), dim3(threads), 0, 0, input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 8) && (ker.dim(1) == 8))
hipLaunchKernelGGL(( conv2generic <false, 8, 8>) , dim3(blocks), dim3(threads), 0, 0, input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 10) && (ker.dim(1) == 10))
hipLaunchKernelGGL(( conv2generic <false, 10, 10>) , dim3(blocks), dim3(threads), 0, 0, input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 12) && (ker.dim(1) == 12))
hipLaunchKernelGGL(( conv2generic <false, 12, 12>) , dim3(blocks), dim3(threads), 0, 0, input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else
hipLaunchKernelGGL(( conv2generic <false, 0 , 0>) , dim3(blocks), dim3(threads), 0, 0, input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
// sync & clean
hipDeviceSynchronize();
LOCAL_TIMING2_REPORT("convgpu kernel execution time");
hipMemcpy(out.idx_ptr(), output_data, out.nelements() * sizeof(float),
hipMemcpyDeviceToHost);
hipFree(input_data);
hipFree(kernel_data);
hipFree(output_data);
// check for errors
err = hipGetLastError();
if (err != hipSuccess)
eblerror("cuda error:\t" << hipGetErrorString(err));
}
/////////////////////////////////////////////////////////////////////////////
//////////////////////
/// ebl_convolution_3dmap
/////////////////////////////////////////////////////////////////////////////
/*
* Description:
* base conv2D routine: 3D input, 3D output, 4D kernel
*
* - all chunks of data should be contiguous
* - the swapkernel flag can be used to generate a conv2 instead of xcorr2
* - the templated kernel size is useful to generate code that's 2x faster
* but can be set to 0 to allow arbitrary kernel sizes
* ---- the table should have the first dim with the outputs, each output
* ---- should have a fanin set of inputs contiguously
*/
template <bool swapkernel, int T_kernel_h, int T_kernel_w>
__global__ void conv2mapgeneric(float *input, float *kernel, float *output,
int input_n, int input_h, int input_w,
int kernel_n, int kernel_h, int kernel_w,
int stride_h, int stride_w,
long *table, int fanin)
{
// output dimensions
int output_h = (input_h - kernel_h) / stride_h + 1;
int output_w = (input_w - kernel_w) / stride_w + 1;
// xcorr or conv
int koffset = swapkernel ? kernel_w*kernel_h-1 : 0;
// nb outputs
// int output_n = kernel_n / fanin;
// generate offsets according to block/thread ids
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
int oo_start = blockIdx.x;
int oo_end = oo_start+1;
int table_start = blockIdx.x * (fanin * 2);
int table_end = table_start + (fanin * 2);
// nb threads, unique thread id
int tid = blockDim.x*blockDim.y*threadIdx.z
+ blockDim.x * threadIdx.y + threadIdx.x;
int nthreads = blockDim.x * blockDim.y * blockDim.z;
// iterators
int oo, ii, xx, yy, kx, ky, kk;
// do the kernels fit in shared mem ?
if (kernel_w*kernel_h*kernel_n <= CUDA_SHARED_MEM_SIZE) {
// put the kernel in shared memory
__shared__ float shared_kernel[CUDA_SHARED_MEM_SIZE];
// first thread of each block does the copy
for (kk = tid; kk < kernel_w*kernel_h*kernel_n; kk += nthreads) {
shared_kernel[kk] = kernel[kk];
}
__syncthreads();
// templated kernel size
if ((T_kernel_w > 0) && (T_kernel_h > 0)) {
// unrolled convolution loop
for(oo = oo_start; oo < oo_end; oo++) {
for (ii = table_start; ii < table_end; ii = ii + 2) {
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Dot product in two dimensions... (between input image and the mask)
float *input_p = input + table[ii]*input_h*input_w
+ yy*stride_h*input_w + xx*stride_w;
float *output_p = output + oo*output_h*output_w + yy*output_w + xx;
float *kernel_p = shared_kernel
+ table[ii + 1] *kernel_w*kernel_h + koffset;
float sum = 0;
if (swapkernel) {
#pragma unroll
for(ky = 0; ky < T_kernel_h; ky++) {
#pragma unroll
for(kx = 0; kx < T_kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p--);
}
input_p += input_w;
}
} else {
#pragma unroll
for(ky = 0; ky < T_kernel_h; ky++) {
#pragma unroll
for(kx = 0; kx < T_kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p++);
}
input_p += input_w;
}
}
*output_p += sum;
}
}
}
}
} else {
// default convolution loop
for(oo = oo_start; oo < oo_end; oo++) {
for (ii = table_start; ii < table_end; ii++) {
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Dot product in two dims (between input image and the mask)
float *input_p = input + table[ii]*input_h*input_w
+ yy*stride_h*input_w + xx*stride_w;
float *output_p = output + oo*output_h*output_w + yy*output_w
+ xx;
float *kernel_p = shared_kernel
+ (table[ii] % fanin) * kernel_w * kernel_h + koffset;
float sum = 0;
if (swapkernel) {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p--);
}
input_p += input_w;
}
} else {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p++);
}
input_p += input_w;
}
}
*output_p += sum;
}
}
}
}
}
} else { // not enough shared mem for kernels, simply stream them
// convolution loop
for(oo = oo_start; oo < oo_end; oo++) {
for (ii = table_start; ii < table_end; ii = ii + 2) {
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Dot product in two dimensions... (between input image and the mask)
float *input_p = input + table[ii]*input_h*input_w
+ yy*stride_h*input_w + xx*stride_w;
float *output_p = output + oo*output_h*output_w + yy*output_w + xx;
float *kernel_p = kernel + table[ii + 1] *kernel_w*kernel_h + koffset;
float sum = 0;
if (swapkernel) {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p--);
}
input_p += input_w;
}
} else {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p++);
}
input_p += input_w;
}
}
*output_p += sum;
}
}
}
}
}
}
void ebl::cuda_convolution_3dmap(idx<float32> &in, idx<float32> &ker,
idx<float32> &out,
intg stride_x, intg stride_y,
idx<intg> table, int fanin, int devid) {
if (!in.contiguousp() || !ker.contiguousp() ||
!out.contiguousp() || !table.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
hipSetDevice(devid);
// copy input and kernels to gpu, allocate output on gpu
hipError_t err;
float *input_data, *kernel_data, *output_data;
long *table_data;
LOCAL_TIMING_START();
hipMalloc((void**) &input_data, in.nelements() * sizeof(float));
hipMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
hipMemcpyHostToDevice);
hipMalloc((void**) &kernel_data, ker.nelements() * sizeof(float));
hipMemcpy( kernel_data, ker.idx_ptr(), ker.nelements() * sizeof(float),
hipMemcpyHostToDevice);
hipMalloc((void**) &output_data, out.nelements() * sizeof(float));
hipMemset(output_data, 0, out.nelements() * sizeof(float));
hipMalloc((void**) &table_data, table.nelements() * sizeof(intg));
hipMemcpy(table_data, table.idx_ptr(), table.nelements() * sizeof(intg),
hipMemcpyHostToDevice);
// set the number of blocks and threads
int nthreads_x = 32;
int nthreads_y = 8;
int block_height = floor(16 / out.dim(0));
if (block_height < 1)
block_height = 1;
dim3 blocks(out.dim(0),block_height);
dim3 threads(nthreads_x,nthreads_y);
// cout << "DEBUG INFO:" <<"\nnthread_x:" << nthreads_x
// <<"\nnthread_y:" << nthreads_y
// <<"\nblock_height:" << block_height
// <<"\nblock_width:" << out.dim(0)
// <<"\tinput_elements: "<<in.nelements()
// <<"\tkernel_elements: "<<ker.nelements()
// <<"\toutput_elements: "<<out.nelements()
// << "\nin0:"<<in.dim(0)<<"\n in1:"<< in.dim(1)<<"\n in2:"<< in.dim(2)
// <<"\nkernel_n: "<<out.dim(0) *fanin<<"\n ker.dim1:"
// <<ker.dim(1)<<"\n ker.dim2:"
// <<ker.dim(2)
// <<"\nfanin:" <<fanin <<endl;
// sync any previous kernel exec
hipDeviceSynchronize();
LOCAL_TIMING_REPORT("convgpu initial transfer time" << in.dim(0));
LOCAL_TIMING2_START();
if ((ker.dim(2) == 3) && (ker.dim(1) == 3))
hipLaunchKernelGGL(( conv2mapgeneric <false, 3, 3>) , dim3(blocks), dim3(threads), 0, 0, input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 5) && (ker.dim(1) == 5))
hipLaunchKernelGGL(( conv2mapgeneric <false, 5, 5>) , dim3(blocks), dim3(threads), 0, 0, input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 7) && (ker.dim(1) == 7))
hipLaunchKernelGGL(( conv2mapgeneric <false, 7, 7>) , dim3(blocks), dim3(threads), 0, 0, input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 9) && (ker.dim(1) == 9))
hipLaunchKernelGGL(( conv2mapgeneric <false, 9, 9>) , dim3(blocks), dim3(threads), 0, 0, input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 11) && (ker.dim(1) == 11))
hipLaunchKernelGGL(( conv2mapgeneric <false, 11, 11>) , dim3(blocks), dim3(threads), 0, 0, input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 13) && (ker.dim(1) == 13))
hipLaunchKernelGGL(( conv2mapgeneric <false, 13, 13>) , dim3(blocks), dim3(threads), 0, 0, input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1), ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 4) && (ker.dim(1) == 4))
hipLaunchKernelGGL(( conv2mapgeneric <false, 4, 4>) , dim3(blocks), dim3(threads), 0, 0, input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 6) && (ker.dim(1) == 6))
hipLaunchKernelGGL(( conv2mapgeneric <false, 6, 6>) , dim3(blocks), dim3(threads), 0, 0, input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 8) && (ker.dim(1) == 8))
hipLaunchKernelGGL(( conv2mapgeneric <false, 8, 8>) , dim3(blocks), dim3(threads), 0, 0, input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 10) && (ker.dim(1) == 10))
hipLaunchKernelGGL(( conv2mapgeneric <false, 10, 10>) , dim3(blocks), dim3(threads), 0, 0, input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 12) && (ker.dim(1) == 12))
hipLaunchKernelGGL(( conv2mapgeneric <false, 12, 12>) , dim3(blocks), dim3(threads), 0, 0, input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else
hipLaunchKernelGGL(( conv2mapgeneric <false, 0 , 0>) , dim3(blocks), dim3(threads), 0, 0, input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
LOCAL_TIMING2_REPORT("convgpu kernel execution time");
// sync & clean
hipDeviceSynchronize();
hipMemcpy(out.idx_ptr(), output_data, out.nelements() * sizeof(float),
hipMemcpyDeviceToHost);
hipFree(input_data);
hipFree(kernel_data);
hipFree(output_data);
hipFree(table_data);
// check for errors
err = hipGetLastError();
if (err != hipSuccess)
eblerror("cuda error:\t" << hipGetErrorString(err));
}
/////////////////////////////////////////////////////////////////////////////
//////////////////////
/// cuda_tanh
/////////////////////////////////////////////////////////////////////////////
struct tanh_functor
{
__host__ __device__ float operator()(const float& x) const
{
return tanh(x);
}
};
void ebl::cuda_tanh(idx<float32> &in, idx<float32> &out, int devid) {
if (!in.contiguousp() || !out.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
if(in.nelements() != out.nelements())
eblerror("in and out tensors have different number of elements in tanh module");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
hipSetDevice(devid);
// copy input on gpu
hipError_t err;
float *input_data;
hipMalloc((void**) &input_data, in.nelements() * sizeof(float));
hipMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
hipMemcpyHostToDevice);
// apply tanh
thrust::device_ptr<float> in_thrustptr(input_data);
thrust::transform(in_thrustptr, in_thrustptr + in.nelements(),
in_thrustptr,
tanh_functor());
hipMemcpy(out.idx_ptr(), input_data, in.nelements() * sizeof(float),
hipMemcpyDeviceToHost);
// sync & clean
hipDeviceSynchronize();
hipFree(input_data);
// check for errors
err = hipGetLastError();
if (err != hipSuccess)
eblerror("Cuda Error:\t" << hipGetErrorString(err));
}
/////////////////////////////////////////////////////////////////////////////
//////////////////////
/// cuda_power
/////////////////////////////////////////////////////////////////////////////
struct power_functor
{
const float value;
power_functor(float pow_) : value(pow_) {}
__host__ __device__ float operator()(const float& x) const
{
return pow(x, value);
}
};
void ebl::cuda_power(idx<float32> &in, idx<float32> &out, float pow, int devid) {
if (!in.contiguousp() || !out.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
if(in.nelements() != out.nelements())
eblerror("in and out tensors have different number of elements in power module");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
hipSetDevice(devid);
// copy input on gpu
hipError_t err;
float *input_data;
hipMalloc((void**) &input_data, in.nelements() * sizeof(float));
hipMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
hipMemcpyHostToDevice);
// apply power
thrust::device_ptr<float> in_thrustptr(input_data);
thrust::transform(in_thrustptr, in_thrustptr + in.nelements(),
in_thrustptr,
power_functor(pow));
hipMemcpy(out.idx_ptr(), input_data, in.nelements() * sizeof(float),
hipMemcpyDeviceToHost);
// sync & clean
hipDeviceSynchronize();
hipFree(input_data);
// check for errors
err = hipGetLastError();
if (err != hipSuccess)
eblerror("Cuda Error:\t" << hipGetErrorString(err));
}
/////////////////////////////////////////////////////////////////////////////
//////////////////////
/// cuda_addc
/////////////////////////////////////////////////////////////////////////////
struct addvalue_functor
{
const float value;
addvalue_functor(float value_) : value(value_) {}
__host__ __device__ float operator()(const float& x) const {
return (x+value);
}
};
void ebl::cuda_addc(idx<float32> &in, idx<float32> &bias,
idx<float32> &out, int devid) {
if (!in.contiguousp() || !out.contiguousp() || !bias.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
if(in.nelements() != out.nelements())
eblerror("in and out tensors have different number of elements in addc module");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
hipSetDevice(devid);
// copy input on gpu
hipError_t err;
float *input_data;
hipMalloc((void**) &input_data, in.nelements() * sizeof(float));
hipMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
hipMemcpyHostToDevice);
// apply addc for each slice using add
int nslices = in.dim(0);
long elements_per_slice = in.nelements() / nslices;
for (int i=0; i < nslices; i++) {
thrust::device_ptr<float> in_thrustptr(input_data + elements_per_slice * i);
thrust::transform(in_thrustptr, in_thrustptr + elements_per_slice,
in_thrustptr,
addvalue_functor(bias.get(i)));
}
hipMemcpy(out.idx_ptr(), input_data, in.nelements() * sizeof(float),
hipMemcpyDeviceToHost);
// sync & clean
hipDeviceSynchronize();
hipFree(input_data);
// check for errors
err = hipGetLastError();
if (err != hipSuccess)
eblerror("Cuda Error:\t" << hipGetErrorString(err));
}
/////////////////////////////////////////////////////////////////////////////
//////////////////////
/// cuda_div
/////////////////////////////////////////////////////////////////////////////
struct div_elementwise_functor
{
__host__ __device__ float operator()(const float& x, const float&y) const {
return (x/y);
}
};
void ebl::cuda_div(idx<float32> &in1, idx<float32> &in2,
idx<float32> &out, int devid) {
if (!in1.contiguousp() || !out.contiguousp() || !in2.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
if(in1.nelements() != in2.nelements() && in2.nelements() !=out.nelements())
eblerror("in and out tensors have different number of elements in cuda_div module"
<< "in1 size:" << in1.nelements()
<< "in2 size:" << in2.nelements()
<< "out size:" << out.nelements());
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
hipSetDevice(devid);
// copy input on gpu
hipError_t err;
float *input1_data;
float *input2_data;
float *output_data;
hipMalloc((void**) &input1_data, in1.nelements() * sizeof(float));
hipMalloc((void**) &input2_data, in2.nelements() * sizeof(float));
hipMemcpy(input1_data, in1.idx_ptr(), in1.nelements() * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(input2_data, in2.idx_ptr(), in2.nelements() * sizeof(float),
hipMemcpyHostToDevice);
hipMalloc((void**) &output_data, out.nelements() * sizeof(float));
hipMemset(output_data, 0, out.nelements() * sizeof(float));
thrust::device_ptr<float> in1_thrustptr(input1_data);
thrust::device_ptr<float> in2_thrustptr(input2_data);
thrust::device_ptr<float> output_thrustptr(output_data);
thrust::transform(in1_thrustptr, in1_thrustptr + in1.nelements(),
in2_thrustptr, output_thrustptr, div_elementwise_functor());
hipMemcpy(out.idx_ptr(), output_data, out.nelements() * sizeof(float),
hipMemcpyDeviceToHost);
// sync & clean
hipDeviceSynchronize();
hipFree(input1_data);
hipFree(input2_data);
hipFree(output_data);
// check for errors
err = hipGetLastError();
if (err != hipSuccess)
eblerror("Cuda Error:\t" << hipGetErrorString(err));
}
/////////////////////////////////////////////////////////////////////////////
//////////////////////
/// cuda_fsum
/////////////////////////////////////////////////////////////////////////////
struct fsum_functor
{
__host__ __device__ float operator()(const float& x, const float& y) const {
return (x+y);
}
};
struct fsumdiv_functor
{
const float value;
fsumdiv_functor(float value_) : value(value_) {}
__host__ __device__ float operator()(const float& x, const float& y) const {
return (x+y)/value;
}
};
void ebl::cuda_fsum(idx<float32> &in, idx<float32> &out, bool div, int devid) {
if (!in.contiguousp() || !out.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
if(in.nelements() != out.nelements())
eblerror("in and out tensors have different number of elements in addc module");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
hipSetDevice(devid);
// copy input on gpu
hipError_t err;
float *input_data;
hipMalloc((void**) &input_data, in.nelements() * sizeof(float));
hipMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
hipMemcpyHostToDevice);
// apply addc for each slice using add
int nslices = in.dim(0);
long elements_per_slice = in.nelements() / nslices;
thrust::device_ptr<float> in_thrustptr(input_data);
for (int i=1; i < nslices; i++) {
thrust::device_ptr<float> in2_thrustptr(input_data + elements_per_slice * i);
if(div)
thrust::transform(in2_thrustptr, in2_thrustptr + elements_per_slice,
in_thrustptr, in_thrustptr,
fsumdiv_functor(nslices));
else
thrust::transform(in2_thrustptr, in2_thrustptr + elements_per_slice,
in_thrustptr, in_thrustptr,
fsum_functor());
}
hipMemcpy(out.idx_ptr(), input_data, elements_per_slice * sizeof(float),
hipMemcpyDeviceToHost);
// sync & clean
hipDeviceSynchronize();
hipFree(input_data);
// check for errors
err = hipGetLastError();
if (err != hipSuccess)
eblerror("Cuda Error:\t" << hipGetErrorString(err));
}
////////////////////////////////////////////////////////////////////////////
///// cuda_threshold
struct threshold_functor
{
const float thres;
const float val;
threshold_functor(float thres_, float value_) : val(value_), thres(thres_) {}
__host__ __device__ float operator()(const float& x) const {
return (x > thres ? x : val);
}
};
void ebl::cuda_threshold(idx<float32> &in,
idx<float32> &out, float32 thres, float32 val,
int devid) {
if (!in.contiguousp() || !out.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
if(in.nelements() != out.nelements())
eblerror("in and out tensors have different number of elements in addc module");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
hipSetDevice(devid);
// copy input on gpu
hipError_t err;
float *input_data;
hipMalloc((void**) &input_data, in.nelements() * sizeof(float));
hipMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
hipMemcpyHostToDevice);
thrust::device_ptr<float> in_thrustptr(input_data);
thrust::transform(in_thrustptr, in_thrustptr + in.nelements(),
in_thrustptr, threshold_functor(thres, val));
hipMemcpy(out.idx_ptr(), input_data, in.nelements() * sizeof(float),
hipMemcpyDeviceToHost);
// sync & clean
hipDeviceSynchronize();
hipFree(input_data);
// check for errors
err = hipGetLastError();
if (err != hipSuccess)
eblerror("Cuda Error:\t" << hipGetErrorString(err));
}
} // end namespace ebl
#endif
| 393034518aea2bdbdf5442892074977e5abc13eb.cu | /***************************************************************************
* Copyright (C) 2011 by Soumith Chintala*
* [email protected] *
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Redistribution under a license not approved by the Open Source
* Initiative (http://www.opensource.org) must display the
* following acknowledgement in all advertising material:
* This product includes software developed at the Courant
* Institute of Mathematical Sciences (http://cims.nyu.edu).
* * The names of the authors may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL ThE AUTHORS BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
***************************************************************************/
#include "ebl_cudaops.h"
#ifdef __CUDA__
// #include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
// #include <thrust/reduce.h>
// #include <thrust/inner_product.h>
namespace ebl {
#define CUDA_SHARED_MEM_SIZE (4*1024-32)
// this is given by nVidia: max shared mem per block
/*
* Description:
* base conv2D routine: 3D input, 3D output, 4D kernel
*
* - all chunks of data should be contiguous
* - the swapkernel flag can be used to generate a conv2 instead of xcorr2
* - the templated kernel size is useful to generate code that's 2x faster
* but can be set to 0 to allow arbitrary kernel sizes
*/
template <bool swapkernel, int T_kernel_h, int T_kernel_w>
__global__ void conv2generic(float *input, float *kernel, float *output,
int input_n, int input_h, int input_w,
int kernel_n, int kernel_h, int kernel_w,
int stride_h, int stride_w) {
// output dimensions
int output_h = (input_h - kernel_h) / stride_h + 1;
int output_w = (input_w - kernel_w) / stride_w + 1;
// xcorr or conv
int koffset = swapkernel ? kernel_w*kernel_h-1 : 0;
// nb outputs
int output_n = kernel_n / input_n;
// generate offsets according to block/thread ids
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
int oo_start = blockIdx.x;
int oo_end = oo_start+1;
int ii_start = (blockIdx.x / output_n) * input_n;
int ii_end = ii_start + input_n;
// nb threads, unique thread id
int tid = blockDim.x*blockDim.y*threadIdx.z + blockDim.x * threadIdx.y
+ threadIdx.x;
int nthreads = blockDim.x * blockDim.y * blockDim.z;
// iterators
int oo, ii, xx, yy, kx, ky, kk;
// do the kernels fit in shared mem ?
if (input_n*kernel_w*kernel_h <= CUDA_SHARED_MEM_SIZE) {
// put the kernel in shared memory
__shared__ float shared_kernel[CUDA_SHARED_MEM_SIZE];
// first thread of each block does the copy
for (kk = tid; kk < kernel_w*kernel_h*input_n; kk += nthreads) {
shared_kernel[kk] = kernel[input_n*kernel_w*kernel_h
*(oo_start % output_n) + kk];
}
__syncthreads();
// templated kernel size
if ((T_kernel_w > 0) && (T_kernel_h > 0)) {
// unrolled convolution loop
for(oo = oo_start; oo < oo_end; oo++) {
for(ii = ii_start; ii < ii_end; ii++) {
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Dot product in two dimensions... (between input image and the mask)
float *input_p = input + ii*input_h*input_w
+ yy*stride_h*input_w + xx*stride_w;
float *output_p = output + oo*output_h*output_w + yy*output_w
+ xx;
float *kernel_p = shared_kernel + (ii % input_n)*kernel_w*kernel_h
+ koffset;
float sum = 0;
if (swapkernel) {
#pragma unroll
for(ky = 0; ky < T_kernel_h; ky++) {
#pragma unroll
for(kx = 0; kx < T_kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p--);
}
input_p += input_w;
}
} else {
#pragma unroll
for(ky = 0; ky < T_kernel_h; ky++) {
#pragma unroll
for(kx = 0; kx < T_kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p++);
}
input_p += input_w;
}
}
*output_p += sum;
}
}
}
}
} else {
// default convolution loop
for(oo = oo_start; oo < oo_end; oo++) {
for(ii = ii_start; ii < ii_end; ii++) {
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Dot product in two dimensions... (between input image and the mask)
float *input_p = input + ii*input_h*input_w
+ yy*stride_h*input_w + xx*stride_w;
float *output_p = output + oo*output_h*output_w + yy*output_w
+ xx;
float *kernel_p = shared_kernel + (ii % input_n)
* kernel_w * kernel_h + koffset;
float sum = 0;
if (swapkernel) {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p--);
}
input_p += input_w;
}
} else {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p++);
}
input_p += input_w;
}
}
*output_p += sum;
}
}
}
}
}
} else { // not enough shared mem for kernels, simply stream them
// convolution loop
for(oo = oo_start; oo < oo_end; oo++) {
for(ii = ii_start; ii < ii_end; ii++) {
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Dot product in two dimensions... (between input image and the mask)
float *input_p = input + ii*input_h*input_w
+ yy*stride_h*input_w + xx*stride_w;
float *output_p = output + oo*output_h*output_w + yy*output_w
+ xx;
float *kernel_p = kernel + ((oo % output_n) * input_n
+ (ii % input_n))*kernel_w*kernel_h
+ koffset;
float sum = 0;
if (swapkernel) {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p--);
}
input_p += input_w;
}
} else {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p++);
}
input_p += input_w;
}
}
*output_p += sum;
}
}
}
}
}
}
void ebl::cuda_convolution_3d(idx<float32> &in, idx<float32> &ker,
idx<float32> &out,
intg stride_x, intg stride_y, int devid) {
if (!in.contiguousp() || !ker.contiguousp() ||
!out.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
cudaSetDevice(devid);
// copy input and kernels to gpu, allocate output on gpu
cudaError_t err;
float *input_data, *kernel_data, *output_data;
LOCAL_TIMING_START();
cudaMalloc((void**) &input_data, in.nelements() * sizeof(float));
cudaMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
cudaMemcpyHostToDevice);
cudaMalloc((void**) &kernel_data, ker.nelements() * sizeof(float));
cudaMemcpy( kernel_data, ker.idx_ptr(), ker.nelements() * sizeof(float),
cudaMemcpyHostToDevice);
cudaMalloc((void**) &output_data, out.nelements() * sizeof(float));
cudaMemset(output_data, 0, out.nelements() * sizeof(float));
// set the number of blocks and threads
int nthreads_x = 32;
int nthreads_y = 8;
int block_height = floor(16 / out.dim(0));
if (block_height < 1)
block_height = 1;
dim3 blocks(out.dim(0),block_height);
dim3 threads(nthreads_x,nthreads_y);
// sync any previous kernel exec
cudaDeviceSynchronize();
LOCAL_TIMING_REPORT("convgpu initial transfer time" << in.dim(0));
LOCAL_TIMING2_START();
if ((ker.dim(2) == 3) && (ker.dim(1) == 3))
conv2generic <false, 3, 3> <<<blocks, threads>>> (input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 5) && (ker.dim(1) == 5))
conv2generic <false, 5, 5> <<<blocks, threads>>> (input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 7) && (ker.dim(1) == 7))
conv2generic <false, 7, 7> <<<blocks, threads>>> (input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 9) && (ker.dim(1) == 9))
conv2generic <false, 9, 9> <<<blocks, threads>>> (input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 11) && (ker.dim(1) == 11))
conv2generic <false, 11, 11> <<<blocks, threads>>> (input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 13) && (ker.dim(1) == 13))
conv2generic <false, 13, 13> <<<blocks, threads>>> (input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 4) && (ker.dim(1) == 4))
conv2generic <false, 4, 4> <<<blocks, threads>>> (input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 6) && (ker.dim(1) == 6))
conv2generic <false, 6, 6> <<<blocks, threads>>> (input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 8) && (ker.dim(1) == 8))
conv2generic <false, 8, 8> <<<blocks, threads>>> (input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 10) && (ker.dim(1) == 10))
conv2generic <false, 10, 10> <<<blocks, threads>>> (input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else if ((ker.dim(2) == 12) && (ker.dim(1) == 12))
conv2generic <false, 12, 12> <<<blocks, threads>>> (input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
else
conv2generic <false, 0 , 0> <<<blocks, threads>>> (input_data, kernel_data,
output_data,
in.dim(0), in.dim(1),
in.dim(2),
out.dim(0)*in.dim(0),
ker.dim(1), ker.dim(2),
stride_x, stride_y);
// sync & clean
cudaDeviceSynchronize();
LOCAL_TIMING2_REPORT("convgpu kernel execution time");
cudaMemcpy(out.idx_ptr(), output_data, out.nelements() * sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(input_data);
cudaFree(kernel_data);
cudaFree(output_data);
// check for errors
err = cudaGetLastError();
if (err != cudaSuccess)
eblerror("cuda error:\t" << cudaGetErrorString(err));
}
/////////////////////////////////////////////////////////////////////////////
//////////////////////
/// ebl_convolution_3dmap
/////////////////////////////////////////////////////////////////////////////
/*
* Description:
* base conv2D routine: 3D input, 3D output, 4D kernel
*
* - all chunks of data should be contiguous
* - the swapkernel flag can be used to generate a conv2 instead of xcorr2
* - the templated kernel size is useful to generate code that's 2x faster
* but can be set to 0 to allow arbitrary kernel sizes
* ---- the table should have the first dim with the outputs, each output
* ---- should have a fanin set of inputs contiguously
*/
template <bool swapkernel, int T_kernel_h, int T_kernel_w>
__global__ void conv2mapgeneric(float *input, float *kernel, float *output,
int input_n, int input_h, int input_w,
int kernel_n, int kernel_h, int kernel_w,
int stride_h, int stride_w,
long *table, int fanin)
{
// output dimensions
int output_h = (input_h - kernel_h) / stride_h + 1;
int output_w = (input_w - kernel_w) / stride_w + 1;
// xcorr or conv
int koffset = swapkernel ? kernel_w*kernel_h-1 : 0;
// nb outputs
// int output_n = kernel_n / fanin;
// generate offsets according to block/thread ids
int xx_start = threadIdx.x;
int xx_end = output_w;
int xx_step = blockDim.x;
int yy_start = blockDim.y*blockIdx.y + threadIdx.y;
int yy_end = output_h;
int yy_step = blockDim.y*gridDim.y;
int oo_start = blockIdx.x;
int oo_end = oo_start+1;
int table_start = blockIdx.x * (fanin * 2);
int table_end = table_start + (fanin * 2);
// nb threads, unique thread id
int tid = blockDim.x*blockDim.y*threadIdx.z
+ blockDim.x * threadIdx.y + threadIdx.x;
int nthreads = blockDim.x * blockDim.y * blockDim.z;
// iterators
int oo, ii, xx, yy, kx, ky, kk;
// do the kernels fit in shared mem ?
if (kernel_w*kernel_h*kernel_n <= CUDA_SHARED_MEM_SIZE) {
// put the kernel in shared memory
__shared__ float shared_kernel[CUDA_SHARED_MEM_SIZE];
// first thread of each block does the copy
for (kk = tid; kk < kernel_w*kernel_h*kernel_n; kk += nthreads) {
shared_kernel[kk] = kernel[kk];
}
__syncthreads();
// templated kernel size
if ((T_kernel_w > 0) && (T_kernel_h > 0)) {
// unrolled convolution loop
for(oo = oo_start; oo < oo_end; oo++) {
for (ii = table_start; ii < table_end; ii = ii + 2) {
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Dot product in two dimensions... (between input image and the mask)
float *input_p = input + table[ii]*input_h*input_w
+ yy*stride_h*input_w + xx*stride_w;
float *output_p = output + oo*output_h*output_w + yy*output_w + xx;
float *kernel_p = shared_kernel
+ table[ii + 1] *kernel_w*kernel_h + koffset;
float sum = 0;
if (swapkernel) {
#pragma unroll
for(ky = 0; ky < T_kernel_h; ky++) {
#pragma unroll
for(kx = 0; kx < T_kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p--);
}
input_p += input_w;
}
} else {
#pragma unroll
for(ky = 0; ky < T_kernel_h; ky++) {
#pragma unroll
for(kx = 0; kx < T_kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p++);
}
input_p += input_w;
}
}
*output_p += sum;
}
}
}
}
} else {
// default convolution loop
for(oo = oo_start; oo < oo_end; oo++) {
for (ii = table_start; ii < table_end; ii++) {
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Dot product in two dims (between input image and the mask)
float *input_p = input + table[ii]*input_h*input_w
+ yy*stride_h*input_w + xx*stride_w;
float *output_p = output + oo*output_h*output_w + yy*output_w
+ xx;
float *kernel_p = shared_kernel
+ (table[ii] % fanin) * kernel_w * kernel_h + koffset;
float sum = 0;
if (swapkernel) {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p--);
}
input_p += input_w;
}
} else {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p++);
}
input_p += input_w;
}
}
*output_p += sum;
}
}
}
}
}
} else { // not enough shared mem for kernels, simply stream them
// convolution loop
for(oo = oo_start; oo < oo_end; oo++) {
for (ii = table_start; ii < table_end; ii = ii + 2) {
for(yy = yy_start; yy < yy_end; yy+=yy_step) {
for(xx = xx_start; xx < xx_end; xx+=xx_step) {
// Dot product in two dimensions... (between input image and the mask)
float *input_p = input + table[ii]*input_h*input_w
+ yy*stride_h*input_w + xx*stride_w;
float *output_p = output + oo*output_h*output_w + yy*output_w + xx;
float *kernel_p = kernel + table[ii + 1] *kernel_w*kernel_h + koffset;
float sum = 0;
if (swapkernel) {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p--);
}
input_p += input_w;
}
} else {
for(ky = 0; ky < kernel_h; ky++) {
#pragma unroll 5
for(kx = 0; kx < kernel_w; kx++) {
sum += input_p[kx]*(*kernel_p++);
}
input_p += input_w;
}
}
*output_p += sum;
}
}
}
}
}
}
void ebl::cuda_convolution_3dmap(idx<float32> &in, idx<float32> &ker,
idx<float32> &out,
intg stride_x, intg stride_y,
idx<intg> table, int fanin, int devid) {
if (!in.contiguousp() || !ker.contiguousp() ||
!out.contiguousp() || !table.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
cudaSetDevice(devid);
// copy input and kernels to gpu, allocate output on gpu
cudaError_t err;
float *input_data, *kernel_data, *output_data;
long *table_data;
LOCAL_TIMING_START();
cudaMalloc((void**) &input_data, in.nelements() * sizeof(float));
cudaMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
cudaMemcpyHostToDevice);
cudaMalloc((void**) &kernel_data, ker.nelements() * sizeof(float));
cudaMemcpy( kernel_data, ker.idx_ptr(), ker.nelements() * sizeof(float),
cudaMemcpyHostToDevice);
cudaMalloc((void**) &output_data, out.nelements() * sizeof(float));
cudaMemset(output_data, 0, out.nelements() * sizeof(float));
cudaMalloc((void**) &table_data, table.nelements() * sizeof(intg));
cudaMemcpy(table_data, table.idx_ptr(), table.nelements() * sizeof(intg),
cudaMemcpyHostToDevice);
// set the number of blocks and threads
int nthreads_x = 32;
int nthreads_y = 8;
int block_height = floor(16 / out.dim(0));
if (block_height < 1)
block_height = 1;
dim3 blocks(out.dim(0),block_height);
dim3 threads(nthreads_x,nthreads_y);
// cout << "DEBUG INFO:" <<"\nnthread_x:" << nthreads_x
// <<"\nnthread_y:" << nthreads_y
// <<"\nblock_height:" << block_height
// <<"\nblock_width:" << out.dim(0)
// <<"\tinput_elements: "<<in.nelements()
// <<"\tkernel_elements: "<<ker.nelements()
// <<"\toutput_elements: "<<out.nelements()
// << "\nin0:"<<in.dim(0)<<"\n in1:"<< in.dim(1)<<"\n in2:"<< in.dim(2)
// <<"\nkernel_n: "<<out.dim(0) *fanin<<"\n ker.dim1:"
// <<ker.dim(1)<<"\n ker.dim2:"
// <<ker.dim(2)
// <<"\nfanin:" <<fanin <<endl;
// sync any previous kernel exec
cudaDeviceSynchronize();
LOCAL_TIMING_REPORT("convgpu initial transfer time" << in.dim(0));
LOCAL_TIMING2_START();
if ((ker.dim(2) == 3) && (ker.dim(1) == 3))
conv2mapgeneric <false, 3, 3> <<<blocks, threads>>> (input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 5) && (ker.dim(1) == 5))
conv2mapgeneric <false, 5, 5> <<<blocks, threads>>> (input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 7) && (ker.dim(1) == 7))
conv2mapgeneric <false, 7, 7> <<<blocks, threads>>> (input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 9) && (ker.dim(1) == 9))
conv2mapgeneric <false, 9, 9> <<<blocks, threads>>> (input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 11) && (ker.dim(1) == 11))
conv2mapgeneric <false, 11, 11> <<<blocks, threads>>> (input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 13) && (ker.dim(1) == 13))
conv2mapgeneric <false, 13, 13> <<<blocks, threads>>> (input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1), ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 4) && (ker.dim(1) == 4))
conv2mapgeneric <false, 4, 4> <<<blocks, threads>>> (input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 6) && (ker.dim(1) == 6))
conv2mapgeneric <false, 6, 6> <<<blocks, threads>>> (input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 8) && (ker.dim(1) == 8))
conv2mapgeneric <false, 8, 8> <<<blocks, threads>>> (input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 10) && (ker.dim(1) == 10))
conv2mapgeneric <false, 10, 10> <<<blocks, threads>>> (input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else if ((ker.dim(2) == 12) && (ker.dim(1) == 12))
conv2mapgeneric <false, 12, 12> <<<blocks, threads>>> (input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
else
conv2mapgeneric <false, 0 , 0> <<<blocks, threads>>> (input_data,
kernel_data,
output_data,
in.dim(0),
in.dim(1),
in.dim(2),
out.dim(0)*fanin,
ker.dim(1),
ker.dim(2),
stride_x,
stride_y,
table_data,
fanin);
LOCAL_TIMING2_REPORT("convgpu kernel execution time");
// sync & clean
cudaDeviceSynchronize();
cudaMemcpy(out.idx_ptr(), output_data, out.nelements() * sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(input_data);
cudaFree(kernel_data);
cudaFree(output_data);
cudaFree(table_data);
// check for errors
err = cudaGetLastError();
if (err != cudaSuccess)
eblerror("cuda error:\t" << cudaGetErrorString(err));
}
/////////////////////////////////////////////////////////////////////////////
//////////////////////
/// cuda_tanh
/////////////////////////////////////////////////////////////////////////////
struct tanh_functor
{
__host__ __device__ float operator()(const float& x) const
{
return tanh(x);
}
};
void ebl::cuda_tanh(idx<float32> &in, idx<float32> &out, int devid) {
if (!in.contiguousp() || !out.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
if(in.nelements() != out.nelements())
eblerror("in and out tensors have different number of elements in tanh module");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
cudaSetDevice(devid);
// copy input on gpu
cudaError_t err;
float *input_data;
cudaMalloc((void**) &input_data, in.nelements() * sizeof(float));
cudaMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
cudaMemcpyHostToDevice);
// apply tanh
thrust::device_ptr<float> in_thrustptr(input_data);
thrust::transform(in_thrustptr, in_thrustptr + in.nelements(),
in_thrustptr,
tanh_functor());
cudaMemcpy(out.idx_ptr(), input_data, in.nelements() * sizeof(float),
cudaMemcpyDeviceToHost);
// sync & clean
cudaDeviceSynchronize();
cudaFree(input_data);
// check for errors
err = cudaGetLastError();
if (err != cudaSuccess)
eblerror("Cuda Error:\t" << cudaGetErrorString(err));
}
/////////////////////////////////////////////////////////////////////////////
//////////////////////
/// cuda_power
/////////////////////////////////////////////////////////////////////////////
struct power_functor
{
const float value;
power_functor(float pow_) : value(pow_) {}
__host__ __device__ float operator()(const float& x) const
{
return pow(x, value);
}
};
void ebl::cuda_power(idx<float32> &in, idx<float32> &out, float pow, int devid) {
if (!in.contiguousp() || !out.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
if(in.nelements() != out.nelements())
eblerror("in and out tensors have different number of elements in power module");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
cudaSetDevice(devid);
// copy input on gpu
cudaError_t err;
float *input_data;
cudaMalloc((void**) &input_data, in.nelements() * sizeof(float));
cudaMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
cudaMemcpyHostToDevice);
// apply power
thrust::device_ptr<float> in_thrustptr(input_data);
thrust::transform(in_thrustptr, in_thrustptr + in.nelements(),
in_thrustptr,
power_functor(pow));
cudaMemcpy(out.idx_ptr(), input_data, in.nelements() * sizeof(float),
cudaMemcpyDeviceToHost);
// sync & clean
cudaDeviceSynchronize();
cudaFree(input_data);
// check for errors
err = cudaGetLastError();
if (err != cudaSuccess)
eblerror("Cuda Error:\t" << cudaGetErrorString(err));
}
/////////////////////////////////////////////////////////////////////////////
//////////////////////
/// cuda_addc
/////////////////////////////////////////////////////////////////////////////
struct addvalue_functor
{
const float value;
addvalue_functor(float value_) : value(value_) {}
__host__ __device__ float operator()(const float& x) const {
return (x+value);
}
};
void ebl::cuda_addc(idx<float32> &in, idx<float32> &bias,
idx<float32> &out, int devid) {
if (!in.contiguousp() || !out.contiguousp() || !bias.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
if(in.nelements() != out.nelements())
eblerror("in and out tensors have different number of elements in addc module");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
cudaSetDevice(devid);
// copy input on gpu
cudaError_t err;
float *input_data;
cudaMalloc((void**) &input_data, in.nelements() * sizeof(float));
cudaMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
cudaMemcpyHostToDevice);
// apply addc for each slice using add
int nslices = in.dim(0);
long elements_per_slice = in.nelements() / nslices;
for (int i=0; i < nslices; i++) {
thrust::device_ptr<float> in_thrustptr(input_data + elements_per_slice * i);
thrust::transform(in_thrustptr, in_thrustptr + elements_per_slice,
in_thrustptr,
addvalue_functor(bias.get(i)));
}
cudaMemcpy(out.idx_ptr(), input_data, in.nelements() * sizeof(float),
cudaMemcpyDeviceToHost);
// sync & clean
cudaDeviceSynchronize();
cudaFree(input_data);
// check for errors
err = cudaGetLastError();
if (err != cudaSuccess)
eblerror("Cuda Error:\t" << cudaGetErrorString(err));
}
/////////////////////////////////////////////////////////////////////////////
//////////////////////
/// cuda_div
/////////////////////////////////////////////////////////////////////////////
struct div_elementwise_functor
{
__host__ __device__ float operator()(const float& x, const float&y) const {
return (x/y);
}
};
void ebl::cuda_div(idx<float32> &in1, idx<float32> &in2,
idx<float32> &out, int devid) {
if (!in1.contiguousp() || !out.contiguousp() || !in2.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
if(in1.nelements() != in2.nelements() && in2.nelements() !=out.nelements())
eblerror("in and out tensors have different number of elements in cuda_div module"
<< "in1 size:" << in1.nelements()
<< "in2 size:" << in2.nelements()
<< "out size:" << out.nelements());
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
cudaSetDevice(devid);
// copy input on gpu
cudaError_t err;
float *input1_data;
float *input2_data;
float *output_data;
cudaMalloc((void**) &input1_data, in1.nelements() * sizeof(float));
cudaMalloc((void**) &input2_data, in2.nelements() * sizeof(float));
cudaMemcpy(input1_data, in1.idx_ptr(), in1.nelements() * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(input2_data, in2.idx_ptr(), in2.nelements() * sizeof(float),
cudaMemcpyHostToDevice);
cudaMalloc((void**) &output_data, out.nelements() * sizeof(float));
cudaMemset(output_data, 0, out.nelements() * sizeof(float));
thrust::device_ptr<float> in1_thrustptr(input1_data);
thrust::device_ptr<float> in2_thrustptr(input2_data);
thrust::device_ptr<float> output_thrustptr(output_data);
thrust::transform(in1_thrustptr, in1_thrustptr + in1.nelements(),
in2_thrustptr, output_thrustptr, div_elementwise_functor());
cudaMemcpy(out.idx_ptr(), output_data, out.nelements() * sizeof(float),
cudaMemcpyDeviceToHost);
// sync & clean
cudaDeviceSynchronize();
cudaFree(input1_data);
cudaFree(input2_data);
cudaFree(output_data);
// check for errors
err = cudaGetLastError();
if (err != cudaSuccess)
eblerror("Cuda Error:\t" << cudaGetErrorString(err));
}
/////////////////////////////////////////////////////////////////////////////
//////////////////////
/// cuda_fsum
/////////////////////////////////////////////////////////////////////////////
struct fsum_functor
{
__host__ __device__ float operator()(const float& x, const float& y) const {
return (x+y);
}
};
struct fsumdiv_functor
{
const float value;
fsumdiv_functor(float value_) : value(value_) {}
__host__ __device__ float operator()(const float& x, const float& y) const {
return (x+y)/value;
}
};
void ebl::cuda_fsum(idx<float32> &in, idx<float32> &out, bool div, int devid) {
if (!in.contiguousp() || !out.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
if(in.nelements() != out.nelements())
eblerror("in and out tensors have different number of elements in addc module");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
cudaSetDevice(devid);
// copy input on gpu
cudaError_t err;
float *input_data;
cudaMalloc((void**) &input_data, in.nelements() * sizeof(float));
cudaMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
cudaMemcpyHostToDevice);
// apply addc for each slice using add
int nslices = in.dim(0);
long elements_per_slice = in.nelements() / nslices;
thrust::device_ptr<float> in_thrustptr(input_data);
for (int i=1; i < nslices; i++) {
thrust::device_ptr<float> in2_thrustptr(input_data + elements_per_slice * i);
if(div)
thrust::transform(in2_thrustptr, in2_thrustptr + elements_per_slice,
in_thrustptr, in_thrustptr,
fsumdiv_functor(nslices));
else
thrust::transform(in2_thrustptr, in2_thrustptr + elements_per_slice,
in_thrustptr, in_thrustptr,
fsum_functor());
}
cudaMemcpy(out.idx_ptr(), input_data, elements_per_slice * sizeof(float),
cudaMemcpyDeviceToHost);
// sync & clean
cudaDeviceSynchronize();
cudaFree(input_data);
// check for errors
err = cudaGetLastError();
if (err != cudaSuccess)
eblerror("Cuda Error:\t" << cudaGetErrorString(err));
}
////////////////////////////////////////////////////////////////////////////
///// cuda_threshold
struct threshold_functor
{
const float thres;
const float val;
threshold_functor(float thres_, float value_) : val(value_), thres(thres_) {}
__host__ __device__ float operator()(const float& x) const {
return (x > thres ? x : val);
}
};
void ebl::cuda_threshold(idx<float32> &in,
idx<float32> &out, float32 thres, float32 val,
int devid) {
if (!in.contiguousp() || !out.contiguousp())
eblerror("Tensor inputs to cuda kernel are not contiguous");
if(in.nelements() != out.nelements())
eblerror("in and out tensors have different number of elements in addc module");
// cout << "Executing on Device " << devid << " (CUDA)"<<endl;
if (devid != -1)
cudaSetDevice(devid);
// copy input on gpu
cudaError_t err;
float *input_data;
cudaMalloc((void**) &input_data, in.nelements() * sizeof(float));
cudaMemcpy(input_data, in.idx_ptr(), in.nelements() * sizeof(float),
cudaMemcpyHostToDevice);
thrust::device_ptr<float> in_thrustptr(input_data);
thrust::transform(in_thrustptr, in_thrustptr + in.nelements(),
in_thrustptr, threshold_functor(thres, val));
cudaMemcpy(out.idx_ptr(), input_data, in.nelements() * sizeof(float),
cudaMemcpyDeviceToHost);
// sync & clean
cudaDeviceSynchronize();
cudaFree(input_data);
// check for errors
err = cudaGetLastError();
if (err != cudaSuccess)
eblerror("Cuda Error:\t" << cudaGetErrorString(err));
}
} // end namespace ebl
#endif
|
6779b17d27c2878604d00934df0faa4d7228eb50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHBlas.h"
#include "THHTensorCopy.h"
#include "THHTensorRandom.h"
#include "THHApply.cuh"
#include "THHReduce.cuh"
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
struct TensorPowOp {
TensorPowOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = powf(*in, val);
}
__device__ __forceinline__ void operator()(float* v) {
*v = powf(*v, val);
}
const float val;
};
void THCudaTensor_pow(THCState *state, THCudaTensor *self_, THCudaTensor *src, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
struct TensorTPowOp {
TensorTPowOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = powf(val, *in);
}
__device__ __forceinline__ void operator()(float* v) {
*v = powf(val, *v);
}
const float val;
};
void THCudaTensor_tpow(THCState *state, THCudaTensor *self_, float value, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorTPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorTPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
struct TensorATan2Op {
__device__ __forceinline__ void operator()(float* out, float* a, float* b) {
*out = atan2f(*a, *b);
}
};
void THCudaTensor_atan2(THCState *state, THCudaTensor *self_, THCudaTensor *tx, THCudaTensor *ty)
{
THAssert(THCudaTensor_checkGPU(state, 3, self_, tx, ty));
THArgCheck(THCudaTensor_nElement(state, tx) ==
THCudaTensor_nElement(state, ty), 3, "sizes do not match");
THCudaTensor_resizeAs(state, self_, tx);
if (!THCudaTensor_pointwiseApply3(state, self_, tx, ty, TensorATan2Op())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
struct TensorClampOp {
TensorClampOp(float min, float max) : minValue(min), maxValue(max) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = max(min(*in, maxValue), minValue);
}
__device__ __forceinline__ void operator()(float* v) {
*v = max(min(*v, maxValue), minValue);
}
const float minValue;
const float maxValue;
};
void THCudaTensor_clamp(THCState *state, THCudaTensor *self_, THCudaTensor *src, float min_value,
float max_value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorClampOp(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorClampOp(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
struct TensorSignOp {
__device__ __forceinline__ void operator()(float* out, float* in) {
float orig = *in;
*out = (orig > 0) - (orig < 0);
}
__device__ __forceinline__ void operator()(float* v) {
float orig = *v;
*v = (orig > 0) - (orig < 0);
}
};
void THCudaTensor_sign(THCState *state, THCudaTensor *self_, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorSignOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorSignOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
float THCudaTensor_meanall(THCState *state, THCudaTensor *self)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCudaTensor_sumall(state, self)/THCudaTensor_nElement(state, self);
}
void
THCudaTensor_mean(THCState *state, THCudaTensor *self, THCudaTensor *src, long dim)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
THCudaTensor_sum(state, self, src, dim);
THCudaTensor_div(state, self, self, THCudaTensor_size(state, src, dim));
}
struct square_functor
{
const float mean;
square_functor(float mean_) : mean(mean_) {}
__host__ __device__ float operator()(const float& x) const
{
return (x-mean)*(x-mean);
}
};
float THCudaTensor_varall(THCState *state, THCudaTensor *self)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
self = THCudaTensor_newContiguous(state, self);
long size = THCudaTensor_nElement(state, self);
thrust::device_ptr<float> self_data(THCudaTensor_data(state, self));
float mean = THCudaTensor_meanall(state, self);
float result = thrust::transform_reduce(self_data, self_data+size, square_functor(mean), (float)0, thrust::plus<float>());
result = result/(THCudaTensor_nElement(state, self)-1);
THCudaTensor_free(state, self);
return result;
}
float THCudaTensor_stdall(THCState *state, THCudaTensor *self)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
return sqrt(THCudaTensor_varall(state, self));
}
// Given the sum of values and the sum of squares, compute the variance or standard deviation.
template<bool flag, bool apply_sqrt>
__forceinline__ __device__ float THCudaTensor_computeVar(float sum, float sum2, unsigned row_size) {
if (flag) {
sum /= row_size;
sum2 /= row_size;
sum2 -= sum * sum;
sum2 = (sum2 < 0 ? 0 : sum2);
}
else {
sum /= row_size;
sum2 /= row_size - 1;
sum2 -= ((float)row_size) / ((float)(row_size - 1)) * sum * sum;
sum2 = (sum2 < 0 ? 0 : sum2);
}
if (apply_sqrt)
return sqrt(sum2);
else
return sum2;
}
/* Compute the variance (or standard deviation) along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
* - if flag is set, normalize by `row_size` instead of `row_size - 1`
* - if apply_sqrt is set, compute the standard deviation instead of variance
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<bool flag, bool apply_sqrt>
__global__ void THCudaTensor_kernel_varOuterDim(float *tgt, float *src_, unsigned num_orows, unsigned num_irows, unsigned row_size)
{
for (unsigned orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (unsigned irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
float *src = src_ + orow * row_size * num_irows + irow;
float sum = 0, sum2 = 0;
for (unsigned col = 0; col < row_size; ++col) {
float val = *src;
sum += val;
sum2 += val * val;
src += num_irows;
}
tgt[orow * num_irows + irow] = THCudaTensor_computeVar<flag, apply_sqrt>(sum, sum2, row_size);
}
}
}
template<bool apply_sqrt>
__host__ void THCudaTensor_varOuterDim(THCState *state, THCudaTensor *tgt, THCudaTensor *src, long dimension, int flag)
{
unsigned ndim = THCudaTensor_nDimension(state, src);
// Treat all outer dimensions (i.e. dim < dimension) as one.
unsigned num_orows = 1;
for (unsigned dim = 0; dim < dimension; dim++) {
num_orows *= THCudaTensor_size(state, src, dim);
}
unsigned row_size = THCudaTensor_size(state, src, dimension);
// Treat all inner dimensions (i.e. dim > dimension) as one.
unsigned num_irows = 1;
for (unsigned dim = dimension + 1; dim < ndim; dim++) {
num_irows *= THCudaTensor_size(state, src, dim);
}
dim3 threads(min(512, num_irows));
unsigned maxGridDim = 1024;
dim3 grid(min(maxGridDim, num_orows), min(maxGridDim, DIVUP(num_irows, threads.x)));
if (flag) {
hipLaunchKernelGGL(( THCudaTensor_kernel_varOuterDim<true, apply_sqrt>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_orows, num_irows, row_size);
} else {
hipLaunchKernelGGL(( THCudaTensor_kernel_varOuterDim<false, apply_sqrt>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_orows, num_irows, row_size);
}
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess) {
THError(hipGetErrorString(errcode));
}
}
/* Compute the variance (or standard deviation) of the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
* - if flag is set, normalize by `row_size` instead of `row_size - 1`
* - if apply_sqrt is set, compute the standard deviation instead of variance
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<bool flag, bool apply_sqrt>
__global__ void THCudaTensor_kernel_varInnermostDim(float *tgt, float *src_, unsigned num_rows, unsigned row_size)
{
__shared__ float ssum[32][16];
__shared__ float ssum2[32][16];
for (unsigned block_row = blockIdx.x * blockDim.y; block_row < num_rows; block_row += blockDim.y * gridDim.x) {
unsigned row = block_row + threadIdx.y;
float sum = 0, sum2 = 0;
if (row < num_rows) {
float *src = src_ + row * row_size;
// Sequential reduction within a thread.
for (unsigned col = threadIdx.x; col < row_size; col += blockDim.x) {
float val = src[col];
sum += val;
sum2 += val * val;
}
}
ssum[threadIdx.y][threadIdx.x] = sum;
ssum2[threadIdx.y][threadIdx.x] = sum2;
__syncthreads();
// Reduce intermediate values to single value.
for (unsigned s = 8; s > 1; s >>= 1) {
if (row < num_rows && threadIdx.x < s) {
ssum[threadIdx.y][threadIdx.x] += ssum[threadIdx.y][threadIdx.x + s];
ssum2[threadIdx.y][threadIdx.x] += ssum2[threadIdx.y][threadIdx.x + s];
}
__syncthreads();
}
if (row < num_rows && threadIdx.x == 0) {
sum = ssum[threadIdx.y][0] + ssum[threadIdx.y][1];
sum2 = ssum2[threadIdx.y][0] + ssum2[threadIdx.y][1];
tgt[row] = THCudaTensor_computeVar<flag, apply_sqrt>(sum, sum2, row_size);
}
__syncthreads();
}
}
template<bool apply_sqrt>
__host__ void THCudaTensor_varInnermostDim(THCState *state, THCudaTensor *tgt, THCudaTensor *src, int flag)
{
unsigned ndim = THCudaTensor_nDimension(state, src);
// Treat all outer dimensions as a single dimension.
unsigned num_rows = 1;
for (unsigned dim = 0; dim < ndim - 1; dim++) {
num_rows *= THCudaTensor_size(state, src, dim);
}
unsigned row_size = THCudaTensor_size(state, src, ndim - 1);
// From limited testing, 16x32 seemed a good compromise for handling both long and short dimensions.
dim3 threads(16, 32);
dim3 grid(min(1024, DIVUP(num_rows, threads.y)));
if (flag) {
hipLaunchKernelGGL(( THCudaTensor_kernel_varInnermostDim<true, apply_sqrt>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_rows, row_size);
} else {
hipLaunchKernelGGL(( THCudaTensor_kernel_varInnermostDim<false, apply_sqrt>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_rows, row_size);
}
hipError_t errcode = hipGetLastError();
if (errcode != hipSuccess) {
THError(hipGetErrorString(errcode));
}
}
void THCudaTensor_var(THCState *state, THCudaTensor *self_, THCudaTensor *src, long dimension, int flag)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
THLongStorage *dim = THCudaTensor_newSizeOf(state, src);
THLongStorage_set(dim, dimension, 1);
THCudaTensor_resize(state, self_, dim, NULL);
THLongStorage_free(dim);
THCudaTensor *self = THCudaTensor_newContiguous(state, self_);
src = THCudaTensor_newContiguous(state, src);
if (dimension == THCudaTensor_nDimension(state, src) - 1) {
THCudaTensor_varInnermostDim<false>(state, self, src, flag);
} else {
THCudaTensor_varOuterDim<false>(state, self, src, dimension, flag);
}
THCudaTensor_free(state, src);
THCudaTensor_freeCopyTo(state, self, self_);
}
void THCudaTensor_std(THCState *state, THCudaTensor *self_, THCudaTensor *src, long dimension, int flag)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
THLongStorage *dim = THCudaTensor_newSizeOf(state, src);
THLongStorage_set(dim, dimension, 1);
THCudaTensor_resize(state, self_, dim, NULL);
THLongStorage_free(dim);
THCudaTensor *self = THCudaTensor_newContiguous(state, self_);
src = THCudaTensor_newContiguous(state, src);
if (dimension == THCudaTensor_nDimension(state, src) - 1) {
THCudaTensor_varInnermostDim<true>(state, self, src, flag);
} else {
THCudaTensor_varOuterDim<true>(state, self, src, dimension, flag);
}
THCudaTensor_free(state, src);
THCudaTensor_freeCopyTo(state, self, self_);
}
struct norm_functor
{
const float exponent;
norm_functor(float exponent_) : exponent(exponent_) {}
__host__ __device__ float operator()(const float& x) const
{
return pow(fabs(x), exponent);
}
};
struct partial_not_equal_functor
{
const float rhs;
partial_not_equal_functor(float rhs) : rhs(rhs) {}
__host__ __device__ bool operator()(const float &lhs) const {return lhs != rhs;}
};
float THCudaTensor_normall(THCState *state, THCudaTensor *self, float value)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
self = THCudaTensor_newContiguous(state, self);
long size = THCudaTensor_nElement(state, self);
thrust::device_ptr<float> self_data(THCudaTensor_data(state, self));
float result;
if(value == 0.0f) {
result = thrust::transform_reduce(self_data, self_data+size, partial_not_equal_functor(0.0f), (float)0, thrust::plus<float>());
} else {
result = thrust::transform_reduce(self_data, self_data+size, norm_functor(value), (float)0, thrust::plus<float>());
result = pow(result, (float)1.0/value);
}
THCudaTensor_free(state, self);
return result;
}
void THCudaTensor_norm(THCState *state, THCudaTensor* self, THCudaTensor* src, float value, long dimension)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
if (value == 0.0f) {
THCudaTensor_reduceDim(state, self, src,
partial_not_equal_functor(0.0f), thrust::plus<float>(),
0.0f, dimension);
} else {
THCudaTensor_reduceDim(state, self, src,
norm_functor(value), thrust::plus<float>(),
0.0f, dimension);
THCudaTensor_pow(state, self, self, 1/value);
}
THCudaCheck(hipGetLastError());
}
__global__ void THCudaTensor_kernel_renorm(float *data, const float value, const long size, const float maxnorm)
{
__shared__ float buffer[32];
long tx = threadIdx.x;
long bx = blockIdx.x;
long step = blockDim.x;
float *row = data + size*bx;
buffer[tx] = 0;
// get norm of axis
for (long i=tx; i<size; i+=step)
{
buffer[tx] += pow(fabs(row[i]), value);
}
// add (reduce)
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
// clip norms
__syncthreads();
float norm = pow(buffer[0], 1/value);
if (norm > maxnorm)
{
norm = maxnorm / (norm + 1e-7);
// renormalize
for (long i=tx; i<size; i+=step)
{
row[i] *= norm;
}
}
}
void THCudaTensor_renorm(THCState *state, THCudaTensor* self, THCudaTensor* src, float value, long dimension, float maxnorm)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
THCudaTensor *self_;
THCudaTensor *src_ = THCudaTensor_newTranspose(state, src, dimension, 0);
THCudaTensor *data = THCudaTensor_newClone(state, src_);
long size = THCudaTensor_nElement(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCudaTensor_nDimension(state, src), 3, "invalid dimension");
THArgCheck(value > 0, 2, "non-positive-norm not supported");
THArgCheck(THCudaTensor_nDimension(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
hipLaunchKernelGGL(( THCudaTensor_kernel_renorm), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state), THCudaTensor_data(state, data), value, size, maxnorm);
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
THCudaTensor_free(state, src_);
self_ = THCudaTensor_newTranspose(state, data, dimension, 0);
THCudaTensor_resizeAs(state, self, self_);
THCudaTensor_freeCopyTo(state, self_, self);
THCudaTensor_free(state, data);
}
struct dist_functor
{
const float exponent;
dist_functor(float exponent_) : exponent(exponent_) {}
__host__ __device__ float operator()(const float& x, const float& y) const
{
return pow(fabs(x-y), exponent);
}
};
float THCudaTensor_dist(THCState *state, THCudaTensor *self, THCudaTensor *src, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
self = THCudaTensor_newContiguous(state, self);
long size = THCudaTensor_nElement(state, self);
src = THCudaTensor_newContiguous(state, src);
thrust::device_ptr<float> self_data(THCudaTensor_data(state, self));
thrust::device_ptr<float> src_data(THCudaTensor_data(state, src));
float result = thrust::inner_product(self_data, self_data+size, src_data, (float) 0,thrust::plus<float>(), dist_functor(value));
THCudaTensor_free(state, src);
THCudaTensor_free(state, self);
return pow(result, (float)1.0/value);
}
void THCudaTensor_rand(THCState *state, THCudaTensor *r_, THLongStorage *size)
{
THAssert(THCudaTensor_checkGPU(state, 1, r_));
THCudaTensor_resize(state, r_, size, NULL);
THCudaTensor_uniform(state, r_, 0, 1);
}
void THCudaTensor_randn(THCState *state, THCudaTensor *r_, THLongStorage *size)
{
THAssert(THCudaTensor_checkGPU(state, 1, r_));
THCudaTensor_resize(state, r_, size, NULL);
THCudaTensor_normal(state, r_, 0, 1);
}
| 6779b17d27c2878604d00934df0faa4d7228eb50.cu | #include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCBlas.h"
#include "THCTensorCopy.h"
#include "THCTensorRandom.h"
#include "THCApply.cuh"
#include "THCReduce.cuh"
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
struct TensorPowOp {
TensorPowOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = powf(*in, val);
}
__device__ __forceinline__ void operator()(float* v) {
*v = powf(*v, val);
}
const float val;
};
void THCudaTensor_pow(THCState *state, THCudaTensor *self_, THCudaTensor *src, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
struct TensorTPowOp {
TensorTPowOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = powf(val, *in);
}
__device__ __forceinline__ void operator()(float* v) {
*v = powf(val, *v);
}
const float val;
};
void THCudaTensor_tpow(THCState *state, THCudaTensor *self_, float value, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorTPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorTPowOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
struct TensorATan2Op {
__device__ __forceinline__ void operator()(float* out, float* a, float* b) {
*out = atan2f(*a, *b);
}
};
void THCudaTensor_atan2(THCState *state, THCudaTensor *self_, THCudaTensor *tx, THCudaTensor *ty)
{
THAssert(THCudaTensor_checkGPU(state, 3, self_, tx, ty));
THArgCheck(THCudaTensor_nElement(state, tx) ==
THCudaTensor_nElement(state, ty), 3, "sizes do not match");
THCudaTensor_resizeAs(state, self_, tx);
if (!THCudaTensor_pointwiseApply3(state, self_, tx, ty, TensorATan2Op())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
struct TensorClampOp {
TensorClampOp(float min, float max) : minValue(min), maxValue(max) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = max(min(*in, maxValue), minValue);
}
__device__ __forceinline__ void operator()(float* v) {
*v = max(min(*v, maxValue), minValue);
}
const float minValue;
const float maxValue;
};
void THCudaTensor_clamp(THCState *state, THCudaTensor *self_, THCudaTensor *src, float min_value,
float max_value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorClampOp(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorClampOp(min_value, max_value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
struct TensorSignOp {
__device__ __forceinline__ void operator()(float* out, float* in) {
float orig = *in;
*out = (orig > 0) - (orig < 0);
}
__device__ __forceinline__ void operator()(float* v) {
float orig = *v;
*v = (orig > 0) - (orig < 0);
}
};
void THCudaTensor_sign(THCState *state, THCudaTensor *self_, THCudaTensor *src)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
if (self_ == src) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorSignOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src);
if (!THCudaTensor_pointwiseApply2(state, self_, src, TensorSignOp())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
float THCudaTensor_meanall(THCState *state, THCudaTensor *self)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCudaTensor_sumall(state, self)/THCudaTensor_nElement(state, self);
}
void
THCudaTensor_mean(THCState *state, THCudaTensor *self, THCudaTensor *src, long dim)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
THCudaTensor_sum(state, self, src, dim);
THCudaTensor_div(state, self, self, THCudaTensor_size(state, src, dim));
}
struct square_functor
{
const float mean;
square_functor(float mean_) : mean(mean_) {}
__host__ __device__ float operator()(const float& x) const
{
return (x-mean)*(x-mean);
}
};
float THCudaTensor_varall(THCState *state, THCudaTensor *self)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
self = THCudaTensor_newContiguous(state, self);
long size = THCudaTensor_nElement(state, self);
thrust::device_ptr<float> self_data(THCudaTensor_data(state, self));
float mean = THCudaTensor_meanall(state, self);
float result = thrust::transform_reduce(self_data, self_data+size, square_functor(mean), (float)0, thrust::plus<float>());
result = result/(THCudaTensor_nElement(state, self)-1);
THCudaTensor_free(state, self);
return result;
}
float THCudaTensor_stdall(THCState *state, THCudaTensor *self)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
return sqrt(THCudaTensor_varall(state, self));
}
// Given the sum of values and the sum of squares, compute the variance or standard deviation.
template<bool flag, bool apply_sqrt>
__forceinline__ __device__ float THCudaTensor_computeVar(float sum, float sum2, unsigned row_size) {
if (flag) {
sum /= row_size;
sum2 /= row_size;
sum2 -= sum * sum;
sum2 = (sum2 < 0 ? 0 : sum2);
}
else {
sum /= row_size;
sum2 /= row_size - 1;
sum2 -= ((float)row_size) / ((float)(row_size - 1)) * sum * sum;
sum2 = (sum2 < 0 ? 0 : sum2);
}
if (apply_sqrt)
return sqrt(sum2);
else
return sum2;
}
/* Compute the variance (or standard deviation) along an outer dimension of a tensor.
*
* - num_orows is the size of the flattened outer dimensions;
* - num_irows is the size of the flattened inner dimensions;
* - row_size is the size of the dimension along which to compute the variance;
* - if flag is set, normalize by `row_size` instead of `row_size - 1`
* - if apply_sqrt is set, compute the standard deviation instead of variance
*
* The dimensions to the outside and inside of the specified dimension are considered as flattened.
* Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened
* outer dimensions, which contains several "inner rows").
* Each thread processes a single inner row at a time.
*/
template<bool flag, bool apply_sqrt>
__global__ void THCudaTensor_kernel_varOuterDim(float *tgt, float *src_, unsigned num_orows, unsigned num_irows, unsigned row_size)
{
for (unsigned orow = blockIdx.x; orow < num_orows; orow += gridDim.x) {
for (unsigned irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) {
float *src = src_ + orow * row_size * num_irows + irow;
float sum = 0, sum2 = 0;
for (unsigned col = 0; col < row_size; ++col) {
float val = *src;
sum += val;
sum2 += val * val;
src += num_irows;
}
tgt[orow * num_irows + irow] = THCudaTensor_computeVar<flag, apply_sqrt>(sum, sum2, row_size);
}
}
}
template<bool apply_sqrt>
__host__ void THCudaTensor_varOuterDim(THCState *state, THCudaTensor *tgt, THCudaTensor *src, long dimension, int flag)
{
unsigned ndim = THCudaTensor_nDimension(state, src);
// Treat all outer dimensions (i.e. dim < dimension) as one.
unsigned num_orows = 1;
for (unsigned dim = 0; dim < dimension; dim++) {
num_orows *= THCudaTensor_size(state, src, dim);
}
unsigned row_size = THCudaTensor_size(state, src, dimension);
// Treat all inner dimensions (i.e. dim > dimension) as one.
unsigned num_irows = 1;
for (unsigned dim = dimension + 1; dim < ndim; dim++) {
num_irows *= THCudaTensor_size(state, src, dim);
}
dim3 threads(min(512, num_irows));
unsigned maxGridDim = 1024;
dim3 grid(min(maxGridDim, num_orows), min(maxGridDim, DIVUP(num_irows, threads.x)));
if (flag) {
THCudaTensor_kernel_varOuterDim<true, apply_sqrt><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_orows, num_irows, row_size);
} else {
THCudaTensor_kernel_varOuterDim<false, apply_sqrt><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_orows, num_irows, row_size);
}
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess) {
THError(cudaGetErrorString(errcode));
}
}
/* Compute the variance (or standard deviation) of the innermost dimension of a tensor.
*
* - num_rows is the size of the flattened outer dimensions;
* - row_size is the size of the innermost dimension;
* - if flag is set, normalize by `row_size` instead of `row_size - 1`
* - if apply_sqrt is set, compute the standard deviation instead of variance
*
* The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is
* considered as having 'num_rows' rows of size 'row_size'.
* Each thread block processes one or more sets of contiguous rows (processing multiple rows
* per thread block is quicker than processing a single row, especially for short rows).
*/
template<bool flag, bool apply_sqrt>
__global__ void THCudaTensor_kernel_varInnermostDim(float *tgt, float *src_, unsigned num_rows, unsigned row_size)
{
__shared__ float ssum[32][16];
__shared__ float ssum2[32][16];
for (unsigned block_row = blockIdx.x * blockDim.y; block_row < num_rows; block_row += blockDim.y * gridDim.x) {
unsigned row = block_row + threadIdx.y;
float sum = 0, sum2 = 0;
if (row < num_rows) {
float *src = src_ + row * row_size;
// Sequential reduction within a thread.
for (unsigned col = threadIdx.x; col < row_size; col += blockDim.x) {
float val = src[col];
sum += val;
sum2 += val * val;
}
}
ssum[threadIdx.y][threadIdx.x] = sum;
ssum2[threadIdx.y][threadIdx.x] = sum2;
__syncthreads();
// Reduce intermediate values to single value.
for (unsigned s = 8; s > 1; s >>= 1) {
if (row < num_rows && threadIdx.x < s) {
ssum[threadIdx.y][threadIdx.x] += ssum[threadIdx.y][threadIdx.x + s];
ssum2[threadIdx.y][threadIdx.x] += ssum2[threadIdx.y][threadIdx.x + s];
}
__syncthreads();
}
if (row < num_rows && threadIdx.x == 0) {
sum = ssum[threadIdx.y][0] + ssum[threadIdx.y][1];
sum2 = ssum2[threadIdx.y][0] + ssum2[threadIdx.y][1];
tgt[row] = THCudaTensor_computeVar<flag, apply_sqrt>(sum, sum2, row_size);
}
__syncthreads();
}
}
template<bool apply_sqrt>
__host__ void THCudaTensor_varInnermostDim(THCState *state, THCudaTensor *tgt, THCudaTensor *src, int flag)
{
unsigned ndim = THCudaTensor_nDimension(state, src);
// Treat all outer dimensions as a single dimension.
unsigned num_rows = 1;
for (unsigned dim = 0; dim < ndim - 1; dim++) {
num_rows *= THCudaTensor_size(state, src, dim);
}
unsigned row_size = THCudaTensor_size(state, src, ndim - 1);
// From limited testing, 16x32 seemed a good compromise for handling both long and short dimensions.
dim3 threads(16, 32);
dim3 grid(min(1024, DIVUP(num_rows, threads.y)));
if (flag) {
THCudaTensor_kernel_varInnermostDim<true, apply_sqrt><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_rows, row_size);
} else {
THCudaTensor_kernel_varInnermostDim<false, apply_sqrt><<<grid, threads, 0, THCState_getCurrentStream(state)>>>(
THCudaTensor_data(state, tgt), THCudaTensor_data(state, src), num_rows, row_size);
}
cudaError errcode = cudaGetLastError();
if (errcode != cudaSuccess) {
THError(cudaGetErrorString(errcode));
}
}
void THCudaTensor_var(THCState *state, THCudaTensor *self_, THCudaTensor *src, long dimension, int flag)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
THLongStorage *dim = THCudaTensor_newSizeOf(state, src);
THLongStorage_set(dim, dimension, 1);
THCudaTensor_resize(state, self_, dim, NULL);
THLongStorage_free(dim);
THCudaTensor *self = THCudaTensor_newContiguous(state, self_);
src = THCudaTensor_newContiguous(state, src);
if (dimension == THCudaTensor_nDimension(state, src) - 1) {
THCudaTensor_varInnermostDim<false>(state, self, src, flag);
} else {
THCudaTensor_varOuterDim<false>(state, self, src, dimension, flag);
}
THCudaTensor_free(state, src);
THCudaTensor_freeCopyTo(state, self, self_);
}
void THCudaTensor_std(THCState *state, THCudaTensor *self_, THCudaTensor *src, long dimension, int flag)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src));
THLongStorage *dim = THCudaTensor_newSizeOf(state, src);
THLongStorage_set(dim, dimension, 1);
THCudaTensor_resize(state, self_, dim, NULL);
THLongStorage_free(dim);
THCudaTensor *self = THCudaTensor_newContiguous(state, self_);
src = THCudaTensor_newContiguous(state, src);
if (dimension == THCudaTensor_nDimension(state, src) - 1) {
THCudaTensor_varInnermostDim<true>(state, self, src, flag);
} else {
THCudaTensor_varOuterDim<true>(state, self, src, dimension, flag);
}
THCudaTensor_free(state, src);
THCudaTensor_freeCopyTo(state, self, self_);
}
struct norm_functor
{
const float exponent;
norm_functor(float exponent_) : exponent(exponent_) {}
__host__ __device__ float operator()(const float& x) const
{
return pow(fabs(x), exponent);
}
};
struct partial_not_equal_functor
{
const float rhs;
partial_not_equal_functor(float rhs) : rhs(rhs) {}
__host__ __device__ bool operator()(const float &lhs) const {return lhs != rhs;}
};
float THCudaTensor_normall(THCState *state, THCudaTensor *self, float value)
{
THAssert(THCudaTensor_checkGPU(state, 1, self));
self = THCudaTensor_newContiguous(state, self);
long size = THCudaTensor_nElement(state, self);
thrust::device_ptr<float> self_data(THCudaTensor_data(state, self));
float result;
if(value == 0.0f) {
result = thrust::transform_reduce(self_data, self_data+size, partial_not_equal_functor(0.0f), (float)0, thrust::plus<float>());
} else {
result = thrust::transform_reduce(self_data, self_data+size, norm_functor(value), (float)0, thrust::plus<float>());
result = pow(result, (float)1.0/value);
}
THCudaTensor_free(state, self);
return result;
}
void THCudaTensor_norm(THCState *state, THCudaTensor* self, THCudaTensor* src, float value, long dimension)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
if (value == 0.0f) {
THCudaTensor_reduceDim(state, self, src,
partial_not_equal_functor(0.0f), thrust::plus<float>(),
0.0f, dimension);
} else {
THCudaTensor_reduceDim(state, self, src,
norm_functor(value), thrust::plus<float>(),
0.0f, dimension);
THCudaTensor_pow(state, self, self, 1/value);
}
THCudaCheck(cudaGetLastError());
}
__global__ void THCudaTensor_kernel_renorm(float *data, const float value, const long size, const float maxnorm)
{
__shared__ float buffer[32];
long tx = threadIdx.x;
long bx = blockIdx.x;
long step = blockDim.x;
float *row = data + size*bx;
buffer[tx] = 0;
// get norm of axis
for (long i=tx; i<size; i+=step)
{
buffer[tx] += pow(fabs(row[i]), value);
}
// add (reduce)
for (unsigned int stride = blockDim.x >> 1; stride > 0; stride >>= 1)
{
__syncthreads();
if (tx < stride)
buffer[tx] += buffer[tx+stride];
}
// clip norms
__syncthreads();
float norm = pow(buffer[0], 1/value);
if (norm > maxnorm)
{
norm = maxnorm / (norm + 1e-7);
// renormalize
for (long i=tx; i<size; i+=step)
{
row[i] *= norm;
}
}
}
void THCudaTensor_renorm(THCState *state, THCudaTensor* self, THCudaTensor* src, float value, long dimension, float maxnorm)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
THCudaTensor *self_;
THCudaTensor *src_ = THCudaTensor_newTranspose(state, src, dimension, 0);
THCudaTensor *data = THCudaTensor_newClone(state, src_);
long size = THCudaTensor_nElement(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCudaTensor_nDimension(state, src), 3, "invalid dimension");
THArgCheck(value > 0, 2, "non-positive-norm not supported");
THArgCheck(THCudaTensor_nDimension(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
THCudaTensor_kernel_renorm<<<grid, threads, 0, THCState_getCurrentStream(state)>>>(THCudaTensor_data(state, data), value, size, maxnorm);
cudaError errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
THCudaTensor_free(state, src_);
self_ = THCudaTensor_newTranspose(state, data, dimension, 0);
THCudaTensor_resizeAs(state, self, self_);
THCudaTensor_freeCopyTo(state, self_, self);
THCudaTensor_free(state, data);
}
struct dist_functor
{
const float exponent;
dist_functor(float exponent_) : exponent(exponent_) {}
__host__ __device__ float operator()(const float& x, const float& y) const
{
return pow(fabs(x-y), exponent);
}
};
float THCudaTensor_dist(THCState *state, THCudaTensor *self, THCudaTensor *src, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self, src));
self = THCudaTensor_newContiguous(state, self);
long size = THCudaTensor_nElement(state, self);
src = THCudaTensor_newContiguous(state, src);
thrust::device_ptr<float> self_data(THCudaTensor_data(state, self));
thrust::device_ptr<float> src_data(THCudaTensor_data(state, src));
float result = thrust::inner_product(self_data, self_data+size, src_data, (float) 0,thrust::plus<float>(), dist_functor(value));
THCudaTensor_free(state, src);
THCudaTensor_free(state, self);
return pow(result, (float)1.0/value);
}
void THCudaTensor_rand(THCState *state, THCudaTensor *r_, THLongStorage *size)
{
THAssert(THCudaTensor_checkGPU(state, 1, r_));
THCudaTensor_resize(state, r_, size, NULL);
THCudaTensor_uniform(state, r_, 0, 1);
}
void THCudaTensor_randn(THCState *state, THCudaTensor *r_, THLongStorage *size)
{
THAssert(THCudaTensor_checkGPU(state, 1, r_));
THCudaTensor_resize(state, r_, size, NULL);
THCudaTensor_normal(state, r_, 0, 1);
}
|
7caa895a85c03c1fa9ff42c4e5b2f6793e3372f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
/*
Location qualifiers
__global__
Defines a kernel.
Runs on the GPU, called from the CPU.
Executed with <<<dim3>>> arguments.
__device__
Runs on the GPU, called from the GPU.
Can be used for variables too.
__host__
Runs on the CPU, called from the CPU.
Qualifiers can be mixed
Eg __host__ __device__ foo()
Code compiled for both CPU and GPU
useful for testing
*/
__global__ void addArrays(int* a, int* b, int* c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
void main() {
const int count = 5;
const int size = count * sizeof(int);
int ha[] = { 1, 2, 3, 4, 5 };
int hb[] = { 10, 20, 30 , 40, 50 };
int hc[count];
int *da, *db, *dc;
hipMalloc(&da, size);
hipMalloc(&db, size);
hipMalloc(&dc, size);
hipMemcpy(da, ha, size, hipMemcpyHostToDevice);
hipMemcpy(db, hb, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addArrays), dim3(1),dim3(count), 0, 0, da, db, dc);
hipMemcpy(hc, dc, size, hipMemcpyDeviceToHost);
for (int i = 0; i < count; ++i) {
printf("%d ", hc[i]);
}
getchar();
}
/*
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
*/ | 7caa895a85c03c1fa9ff42c4e5b2f6793e3372f3.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
/*
Location qualifiers
__global__
Defines a kernel.
Runs on the GPU, called from the CPU.
Executed with <<<dim3>>> arguments.
__device__
Runs on the GPU, called from the GPU.
Can be used for variables too.
__host__
Runs on the CPU, called from the CPU.
Qualifiers can be mixed
Eg __host__ __device__ foo()
Code compiled for both CPU and GPU
useful for testing
*/
__global__ void addArrays(int* a, int* b, int* c)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
void main() {
const int count = 5;
const int size = count * sizeof(int);
int ha[] = { 1, 2, 3, 4, 5 };
int hb[] = { 10, 20, 30 , 40, 50 };
int hc[count];
int *da, *db, *dc;
cudaMalloc(&da, size);
cudaMalloc(&db, size);
cudaMalloc(&dc, size);
cudaMemcpy(da, ha, size, cudaMemcpyHostToDevice);
cudaMemcpy(db, hb, size, cudaMemcpyHostToDevice);
addArrays<<<1,count>>> (da, db, dc);
cudaMemcpy(hc, dc, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < count; ++i) {
printf("%d ", hc[i]);
}
getchar();
}
/*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
*/ |
0ae31223b940dfa54b55a8dd77c11d13f5532695.hip | // !!! This is a file automatically generated by hipify!!!
//ll includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include <sys/time.h>
#include "rocm_smi/rocm_smi.h"
#include "cuda_functions.h"
// includes, project
//#include "magma.h"
#include "cuda_multi_gemm_unif.cu"
//#include "cuda_add_vec.h"
//My includes
// #include "debug_fns.h"
//#include "transformations.h"
//switch the comments to toggle debug mode
//#define D
#define D for(;0;)
double get_time( void )
{
struct timeval t;
gettimeofday( &t, NULL );
return t.tv_sec + t.tv_usec*1e-6;
}
__global__ void particles_in_nid(double *rpart){
int id = blockIdx.x*blockDim.x+threadIdx.x;
int n=10;
if(id == n){
//double *rpart = rpart1 + id * nr;
//int *ipart = ipart1 + id * ni;
int ie;
double xloc = rpart[id];
double yloc = rpart[id];
double zloc = rpart[id];
rpart[0]= rpart[0]+22;
rpart[1]= rpart[1]+id;
}
}
extern "C" void particles_in_nid_wrapper_(double* d_rpart, int* a_, double* adee_d_temp_) {
float time;
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
hipEventRecord(startEvent, 0);
double results[10];
// double *d_rfpts, *d_xerange;
// int *d_ifptsmap, *d_ifpts, *d_ipart, *d_nfpts;
int blockSize = 1024, gridSize, n=100;
gridSize = (int)ceil((float)n/blockSize);
printf (" particles-cuda.cu parinnidwrapper d_rpart(0) a adee2 %lf %d \n", d_rpart[0],a_[0]);
hipLaunchKernelGGL(( particles_in_nid), dim3(gridSize), dim3(blockSize), 0, 0, adee_d_temp_);
hipError_t code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error particles_in_nid : %s\n",hipGetErrorString(code));
}
hipMemcpy(results, adee_d_temp_, 10*sizeof(double), hipMemcpyDeviceToHost);
printf("inside gpu results value %lf %lf", results[0], results[1]);
// if(nfpts[0]>0){
// hipMemcpy(fptsmap, d_ifptsmap, nfpts[0]*sizeof(int), hipMemcpyDeviceToHost);
// hipMemcpy(rfpts, d_rfpts, nfpts[0]*nrf[0]*sizeof(double), hipMemcpyDeviceToHost);
// hipMemcpy(ifpts, d_ifpts, nfpts[0]*nif[0]*sizeof(int), hipMemcpyDeviceToHost);
// }
// printf ("print var 1st %d\n", nfpts);
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&time, startEvent, stopEvent);
code = hipPeekAtLastError();
if (code != hipSuccess){
//printf("cuda error after particles_in_nid kernel launch: %s\n",hipGetErrorString(code));
}
// printf ("print var 2nd %d\n", nfpts);
printf("particles in nid time is %f\n",time*1e-03);
}
__global__ void update_particle_location_kernel(double *rpart1, int *xdrange1, int *in_part, int *bc_part, int n, int ndim, int nr,int ni, int jx0, int jx1, int jx2, int jx3,int nbc_sum, int lr, int li,int *newn, int llpart){
int id = blockIdx.x*blockDim.x+threadIdx.x;
//commented by adeesha change 100 back to n*ndim
if(id < n*ndim){
int i = id/ndim;
in_part[i]=0;
int j = id%ndim;
int off = i*lr+j;
double *rpart = rpart1+off;
int *xdrange = xdrange1+2*j;
if (rpart[jx0-1] < xdrange[0]){
if ( (bc_part[0] == 0 && j == 0) || (bc_part[2] == 0 && j == 1) || (bc_part[4] == 0 && j == 2) ){
rpart[jx0-1] = xdrange[1] - fabs(xdrange[0] - rpart[jx0-1]);
rpart[jx1-1] = xdrange[1] + fabs(xdrange[0] - rpart[jx1-1]);
rpart[jx2-1] = xdrange[1] + fabs(xdrange[0] - rpart[jx2-1]);
rpart[jx3-1] = xdrange[1] + fabs(xdrange[0] - rpart[jx3-1]);
}
else if ( (bc_part[0] != 0 && j == 0) || (bc_part[2] != 0 && j == 1) || (bc_part[4] != 0 && j == 2) ){
atomicExch(in_part+i, -1);
}
}
if (rpart[jx0-1] > xdrange[1]){
if ( (bc_part[0] == 0 && j == 0) || (bc_part[2] == 0 && j == 1) || (bc_part[4] == 0 && j == 2) ){
rpart[jx0-1] = xdrange[0] + fabs(xdrange[1] - rpart[jx0]);
rpart[jx1-1] = xdrange[0] - fabs(xdrange[1] - rpart[jx1]);
rpart[jx2-1] = xdrange[0] - fabs(xdrange[1] - rpart[jx2]);
rpart[jx3-1] = xdrange[0] - fabs(xdrange[1] - rpart[jx3]);
}
else if ( (bc_part[0] != 0 && j == 0) || (bc_part[2] != 0 && j == 1) || (bc_part[4] != 0 && j == 2) ){
atomicExch(in_part+i, -1);
}
}
__syncthreads();
if(nbc_sum > 0){
int ic=-1;
if(j==0){
if(in_part[i]==0){
int temploop;
for(temploop=0;temploop<=i;temploop++){
if(in_part[i]==0){
ic++;
}
}
if(ic!=i){
int copynr, copyni;
for(copynr=0;copynr<nr;copynr++){
rpart[ic*lr+copynr]=rpart[i*lr+copynr];
}
for(copyni=0;copyni<ni;copyni++){
rpart[ic*li+copyni]=rpart[i*li+copyni];
}
}
}
}
__syncthreads();
if(i==n-1){
newn[0]=ic;
}
}
}
}
extern "C" void update_particle_location_wrapper_(double *d_rpart, int *d_xdrange, int *d_bc_part, int *n, int *ndim, int *nr,int *ni, int *jx0, int *jx1, int *jx2,int *jx3, int *nbc_sum, int *lr, int *li, int *llpart){
float time;
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
hipEventRecord(startEvent, 0);
int* d_newn;
hipMalloc((void**)&d_newn, sizeof(int));
int *d_in_part;
hipMalloc((void**)&d_in_part,llpart[0]*sizeof(int));
printf("values in update_particle_location_wrapper n[0], ndim[0],nr[0] %d %d %d\n",n[0],ndim[0],nr[0]);
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n[0]*ndim[0]/blockSize);
hipLaunchKernelGGL(( update_particle_location_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_rpart,d_xdrange,d_in_part,d_bc_part,n[0],ndim[0],nr[0],ni[0],jx0[0],jx1[0],jx2[0],jx3[0],nbc_sum[0],lr[0],li[0],d_newn,llpart[0]);
printf("GPU : particle.cu : update_aprticle_location_wrapper\n");
hipMemcpy(n,d_newn,sizeof(int), hipMemcpyDeviceToHost); //need to check whether the n is updated later. adeesha
hipError_t code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error update_particle_location : %s\n",hipGetErrorString(code));
}
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&time, startEvent, stopEvent);
// printf ("print var 2nd %d\n", nfpts);
//printf("particles in nid time is %f\n",time*1e-03);
hipFree(d_newn);
hipFree(d_in_part);
}
__global__ void update_particle_location_kernel(double *rpart, int *xdrange, double *v_part, double *rxbo, int n, int ndim, int jv0, int jv1, int jv2, int jv3,int lr, int li){
int id = blockIdx.x*blockDim.x+threadIdx.x;
//commented by adeesha change 100 back to n*ndim
if(id < n*ndim){
int i = id/ndim;
int j = id%ndim;
// following is from palce_particles_user. but currently does not affects to else part
rxbo[0] = xdrange[0];// ! X-Left
rxbo[1] = xdrange[1];// ! X-Righ;
rxbo[2] = xdrange[2];// ! Y-Left
rxbo[3] = xdrange[3];// ! Y-Right
rxbo[4] = xdrange[4];// ! Z-Left
rxbo[5] = xdrange[5];// ! Z-Right
rpart[i*lr+jv0+j]=v_part[j]; //original code says n instead of i
rpart[i*lr+jv1+j]=v_part[j]; //original code says n instead of i
rpart[i*lr+jv2+j]=v_part[j]; //original code says n instead of i
rpart[i*lr+jv3+j]=v_part[j]; //original code says n instead of i
}
}
extern "C" void place_particles_else_gpu_wrapper_(double *d_rpart,int *d_xdrange,double *d_v_part,double *d_rxbo,int *n,int *ndim,int *jv0,int *jv1,int *jv2, int* jv3,int *lr,int *li){
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n[0]*3/blockSize);
//place_particles_else_gpu_kernel<<<gridSize, blockSize>>>(d_rpart,d_xdrange,d_in_part,d_bc_part,n[0],ndim[0],nr[0],ni[0],jx0[0],jx1[0],jx2[0],jx3[0],nbc_sum[0],lr[0],li[0],d_newn,llpart[0]);
// printf("GPU : particle.cu : update_aprticle_location_wrapper\n");
hipError_t code = hipPeekAtLastError();
if (code != hipSuccess){
printf("cuda error update_particle_location : %s\n",hipGetErrorString(code));
}
}
| 0ae31223b940dfa54b55a8dd77c11d13f5532695.cu | //ll includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime_api.h>
#include <cublas.h>
#include <sys/time.h>
#include "nvml.h"
#include "cuda_functions.h"
// includes, project
//#include "magma.h"
#include "cuda_multi_gemm_unif.cu"
//#include "cuda_add_vec.h"
//My includes
// #include "debug_fns.h"
//#include "transformations.h"
//switch the comments to toggle debug mode
//#define D
#define D for(;0;)
double get_time( void )
{
struct timeval t;
gettimeofday( &t, NULL );
return t.tv_sec + t.tv_usec*1e-6;
}
__global__ void particles_in_nid(double *rpart){
int id = blockIdx.x*blockDim.x+threadIdx.x;
int n=10;
if(id == n){
//double *rpart = rpart1 + id * nr;
//int *ipart = ipart1 + id * ni;
int ie;
double xloc = rpart[id];
double yloc = rpart[id];
double zloc = rpart[id];
rpart[0]= rpart[0]+22;
rpart[1]= rpart[1]+id;
}
}
extern "C" void particles_in_nid_wrapper_(double* d_rpart, int* a_, double* adee_d_temp_) {
float time;
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
cudaEventRecord(startEvent, 0);
double results[10];
// double *d_rfpts, *d_xerange;
// int *d_ifptsmap, *d_ifpts, *d_ipart, *d_nfpts;
int blockSize = 1024, gridSize, n=100;
gridSize = (int)ceil((float)n/blockSize);
printf (" particles-cuda.cu parinnidwrapper d_rpart(0) a adee2 %lf %d \n", d_rpart[0],a_[0]);
particles_in_nid<<<gridSize, blockSize>>>(adee_d_temp_);
cudaError_t code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error particles_in_nid : %s\n",cudaGetErrorString(code));
}
cudaMemcpy(results, adee_d_temp_, 10*sizeof(double), cudaMemcpyDeviceToHost);
printf("inside gpu results value %lf %lf", results[0], results[1]);
// if(nfpts[0]>0){
// cudaMemcpy(fptsmap, d_ifptsmap, nfpts[0]*sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(rfpts, d_rfpts, nfpts[0]*nrf[0]*sizeof(double), cudaMemcpyDeviceToHost);
// cudaMemcpy(ifpts, d_ifpts, nfpts[0]*nif[0]*sizeof(int), cudaMemcpyDeviceToHost);
// }
// printf ("print var 1st %d\n", nfpts);
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
code = cudaPeekAtLastError();
if (code != cudaSuccess){
//printf("cuda error after particles_in_nid kernel launch: %s\n",cudaGetErrorString(code));
}
// printf ("print var 2nd %d\n", nfpts);
printf("particles in nid time is %f\n",time*1e-03);
}
__global__ void update_particle_location_kernel(double *rpart1, int *xdrange1, int *in_part, int *bc_part, int n, int ndim, int nr,int ni, int jx0, int jx1, int jx2, int jx3,int nbc_sum, int lr, int li,int *newn, int llpart){
int id = blockIdx.x*blockDim.x+threadIdx.x;
//commented by adeesha change 100 back to n*ndim
if(id < n*ndim){
int i = id/ndim;
in_part[i]=0;
int j = id%ndim;
int off = i*lr+j;
double *rpart = rpart1+off;
int *xdrange = xdrange1+2*j;
if (rpart[jx0-1] < xdrange[0]){
if ( (bc_part[0] == 0 && j == 0) || (bc_part[2] == 0 && j == 1) || (bc_part[4] == 0 && j == 2) ){
rpart[jx0-1] = xdrange[1] - fabs(xdrange[0] - rpart[jx0-1]);
rpart[jx1-1] = xdrange[1] + fabs(xdrange[0] - rpart[jx1-1]);
rpart[jx2-1] = xdrange[1] + fabs(xdrange[0] - rpart[jx2-1]);
rpart[jx3-1] = xdrange[1] + fabs(xdrange[0] - rpart[jx3-1]);
}
else if ( (bc_part[0] != 0 && j == 0) || (bc_part[2] != 0 && j == 1) || (bc_part[4] != 0 && j == 2) ){
atomicExch(in_part+i, -1);
}
}
if (rpart[jx0-1] > xdrange[1]){
if ( (bc_part[0] == 0 && j == 0) || (bc_part[2] == 0 && j == 1) || (bc_part[4] == 0 && j == 2) ){
rpart[jx0-1] = xdrange[0] + fabs(xdrange[1] - rpart[jx0]);
rpart[jx1-1] = xdrange[0] - fabs(xdrange[1] - rpart[jx1]);
rpart[jx2-1] = xdrange[0] - fabs(xdrange[1] - rpart[jx2]);
rpart[jx3-1] = xdrange[0] - fabs(xdrange[1] - rpart[jx3]);
}
else if ( (bc_part[0] != 0 && j == 0) || (bc_part[2] != 0 && j == 1) || (bc_part[4] != 0 && j == 2) ){
atomicExch(in_part+i, -1);
}
}
__syncthreads();
if(nbc_sum > 0){
int ic=-1;
if(j==0){
if(in_part[i]==0){
int temploop;
for(temploop=0;temploop<=i;temploop++){
if(in_part[i]==0){
ic++;
}
}
if(ic!=i){
int copynr, copyni;
for(copynr=0;copynr<nr;copynr++){
rpart[ic*lr+copynr]=rpart[i*lr+copynr];
}
for(copyni=0;copyni<ni;copyni++){
rpart[ic*li+copyni]=rpart[i*li+copyni];
}
}
}
}
__syncthreads();
if(i==n-1){
newn[0]=ic;
}
}
}
}
extern "C" void update_particle_location_wrapper_(double *d_rpart, int *d_xdrange, int *d_bc_part, int *n, int *ndim, int *nr,int *ni, int *jx0, int *jx1, int *jx2,int *jx3, int *nbc_sum, int *lr, int *li, int *llpart){
float time;
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
cudaEventRecord(startEvent, 0);
int* d_newn;
cudaMalloc((void**)&d_newn, sizeof(int));
int *d_in_part;
cudaMalloc((void**)&d_in_part,llpart[0]*sizeof(int));
printf("values in update_particle_location_wrapper n[0], ndim[0],nr[0] %d %d %d\n",n[0],ndim[0],nr[0]);
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n[0]*ndim[0]/blockSize);
update_particle_location_kernel<<<gridSize, blockSize>>>(d_rpart,d_xdrange,d_in_part,d_bc_part,n[0],ndim[0],nr[0],ni[0],jx0[0],jx1[0],jx2[0],jx3[0],nbc_sum[0],lr[0],li[0],d_newn,llpart[0]);
printf("GPU : particle.cu : update_aprticle_location_wrapper\n");
cudaMemcpy(n,d_newn,sizeof(int), cudaMemcpyDeviceToHost); //need to check whether the n is updated later. adeesha
cudaError_t code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error update_particle_location : %s\n",cudaGetErrorString(code));
}
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&time, startEvent, stopEvent);
// printf ("print var 2nd %d\n", nfpts);
//printf("particles in nid time is %f\n",time*1e-03);
cudaFree(d_newn);
cudaFree(d_in_part);
}
__global__ void update_particle_location_kernel(double *rpart, int *xdrange, double *v_part, double *rxbo, int n, int ndim, int jv0, int jv1, int jv2, int jv3,int lr, int li){
int id = blockIdx.x*blockDim.x+threadIdx.x;
//commented by adeesha change 100 back to n*ndim
if(id < n*ndim){
int i = id/ndim;
int j = id%ndim;
// following is from palce_particles_user. but currently does not affects to else part
rxbo[0] = xdrange[0];// ! X-Left
rxbo[1] = xdrange[1];// ! X-Righ;
rxbo[2] = xdrange[2];// ! Y-Left
rxbo[3] = xdrange[3];// ! Y-Right
rxbo[4] = xdrange[4];// ! Z-Left
rxbo[5] = xdrange[5];// ! Z-Right
rpart[i*lr+jv0+j]=v_part[j]; //original code says n instead of i
rpart[i*lr+jv1+j]=v_part[j]; //original code says n instead of i
rpart[i*lr+jv2+j]=v_part[j]; //original code says n instead of i
rpart[i*lr+jv3+j]=v_part[j]; //original code says n instead of i
}
}
extern "C" void place_particles_else_gpu_wrapper_(double *d_rpart,int *d_xdrange,double *d_v_part,double *d_rxbo,int *n,int *ndim,int *jv0,int *jv1,int *jv2, int* jv3,int *lr,int *li){
int blockSize = 1024, gridSize;
gridSize = (int)ceil((float)n[0]*3/blockSize);
//place_particles_else_gpu_kernel<<<gridSize, blockSize>>>(d_rpart,d_xdrange,d_in_part,d_bc_part,n[0],ndim[0],nr[0],ni[0],jx0[0],jx1[0],jx2[0],jx3[0],nbc_sum[0],lr[0],li[0],d_newn,llpart[0]);
// printf("GPU : particle.cu : update_aprticle_location_wrapper\n");
cudaError_t code = cudaPeekAtLastError();
if (code != cudaSuccess){
printf("cuda error update_particle_location : %s\n",cudaGetErrorString(code));
}
}
|
bf098530908f6857019fa6286ff3bf8eda26a7d7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <helpers/ConstantShapeHelper.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/declarable/helpers/scatter.h>
#include <numeric>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
// x - indices, y - contains number of bad indices, z - input/output
template <typename X>
SD_KERNEL static void checkIndicesCuda(const void *vx, const sd::LongType *xShapeInfo, sd::LongType *y,
const sd::LongType *zShapeInfo, const int axis) {
const auto x = reinterpret_cast<const X *>(vx);
__shared__ int xRank, *coords, xLastDim;
__shared__ sd::LongType xLen, numOfBadIndxPerBlock;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int *>(shmem);
xRank = shape::rank(xShapeInfo);
xLen = shape::length(xShapeInfo);
numOfBadIndxPerBlock = 0;
}
__syncthreads();
auto xCoords = coords + threadIdx.x * xRank;
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) {
shape::index2coords(i, xShapeInfo, xCoords);
const sd::LongType currentInd = x[shape::getOffset(xShapeInfo, xCoords)];
if (currentInd >= shape::sizeAt(zShapeInfo, axis == -1 ? xCoords[xRank - 1] : axis)) {
printf("checkIndices cuda: out of range element %lld at index %lld \n", currentInd, i);
sd::math::atomics::sd_atomicAdd<sd::LongType>(&numOfBadIndxPerBlock, 1);
}
}
__syncthreads();
if (threadIdx.x == 0 && numOfBadIndxPerBlock != 0)
sd::math::atomics::sd_atomicAdd<sd::LongType>(y, numOfBadIndxPerBlock);
}
///////////////////////////////////////////////////////////////////
template <typename X>
static void checkIndicesCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const hipStream_t *stream, const void *vx, const sd::LongType *xShapeInfo,
sd::LongType *y, const sd::LongType *zShapeInfo, const int axis) {
hipLaunchKernelGGL(( checkIndicesCuda<X>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, y, zShapeInfo, axis);
}
///////////////////////////////////////////////////////////////////
sd::LongType checkIndices(sd::LaunchContext *context, const NDArray &indices, const NDArray &output, const int axis) {
const int threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const int blocksPerGrid = (indices.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * indices.rankOf() + 256;
const auto xType = indices.dataType();
PointersManager manager(context, "scatterNDcheckIndices");
// scalar, initial value = 0
NDArray numOfBadIndx(sd::DataType::INT64, context, true);
NDArray::prepareSpecialUse({&numOfBadIndx}, {&indices});
BUILD_SINGLE_SELECTOR(xType, checkIndicesCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), indices.specialBuffer(),
indices.specialShapeInfo(), reinterpret_cast<sd::LongType *>(numOfBadIndx.specialBuffer()),
output.specialShapeInfo(), axis),
SD_INDEXING_TYPES);
NDArray::registerSpecialUse({&numOfBadIndx}, {&indices});
manager.synchronize();
return numOfBadIndx.t<sd::LongType>(0);
}
///////////////////////////////////////////////////////////////////
// x - indices, y - updates, z - input/output
template <typename X, typename Y>
SD_KERNEL static void scatterLockCuda(const int opCode, const void *vx, const sd::LongType *xShapeInfo, const void *vy,
const sd::LongType *yShapeInfo, void *vz, const sd::LongType *zShapeInfo) {
const auto x = reinterpret_cast<const X *>(vx);
const auto y = reinterpret_cast<const Y *>(vy);
auto z = reinterpret_cast<Y *>(vz);
__shared__ int xRank, yRank, zRank, xNonUnitDim, yNonUnitDim, zNonUnitDim, *coords;
__shared__ sd::LongType xLen, zLen;
__shared__ bool is1Dcase, xySameStride;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int *>(shmem);
xLen = shape::length(xShapeInfo);
zLen = shape::length(zShapeInfo);
xRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
zRank = shape::rank(zShapeInfo);
xNonUnitDim = yNonUnitDim = zNonUnitDim = 0;
is1Dcase = (shape::isCommonVector(zShapeInfo, zNonUnitDim) || shape::isScalar(zShapeInfo)) &&
(shape::isCommonVector(yShapeInfo, yNonUnitDim) || shape::isScalar(yShapeInfo)) &&
(shape::isCommonVector(xShapeInfo, xNonUnitDim) || shape::isScalar(xShapeInfo));
if (is1Dcase) xySameStride = shape::stride(xShapeInfo)[xNonUnitDim] = shape::stride(yShapeInfo)[yNonUnitDim];
}
__syncthreads();
sd::LongType yOffset, zOffset;
int zFirstCoord, *yCoords, *zCoords;
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < zLen; i += gridDim.x * blockDim.x) {
if (!is1Dcase) {
yCoords = coords + threadIdx.x * (yRank + zRank);
zCoords = yCoords + yRank;
shape::index2coords(i, zShapeInfo, zCoords);
}
for (sd::LongType j = 0; j < xLen; ++j) {
if (is1Dcase) {
yOffset = j * shape::stride(yShapeInfo)[yNonUnitDim];
zFirstCoord = x[xySameStride ? yOffset : j * shape::stride(xShapeInfo)[xNonUnitDim]];
if (i != zFirstCoord) continue;
zOffset = i * shape::stride(zShapeInfo)[zNonUnitDim];
}
else {
shape::index2coords(j, xShapeInfo, yCoords); // first xRank coordinates in yCoords are the same for y and x
zFirstCoord = x[shape::getOffset(xShapeInfo, yCoords)];
if (zCoords[0] != zFirstCoord) continue;
for (sd::Unsigned k = 0; k < yRank - xRank; ++k) yCoords[xRank + k] = zCoords[k + 1];
yOffset = shape::getOffset(yShapeInfo, yCoords);
zOffset = shape::getOffset(zShapeInfo, zCoords);
}
switch (opCode) {
case pairwise::Add:
z[zOffset] += y[yOffset];
break;
case pairwise::Subtract:
z[zOffset] -= y[yOffset];
break;
case pairwise::Multiply:
z[zOffset] *= y[yOffset];
break;
case pairwise::Divide:
z[zOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
z[zOffset] = y[yOffset] - z[zOffset];
break;
case pairwise::ReverseDivide:
z[zOffset] = y[yOffset] / z[zOffset];
break;
case pairwise::CopyPws:
z[zOffset] = y[yOffset];
break;
case pairwise::MaxPairwise:
if (z[zOffset] < y[yOffset]) z[zOffset] = y[yOffset];
break;
case pairwise::MinPairwise:
if (z[zOffset] > y[yOffset]) z[zOffset] = y[yOffset];
break;
default:
continue;
}
}
}
}
///////////////////////////////////////////////////////////////////
// x - indices, y - updates, z - input/output
template <typename X, typename Y>
SD_KERNEL static void scatterCuda(const int opCode, const void *vx, const sd::LongType *xShapeInfo, const void *vy,
const sd::LongType *yShapeInfo, void *vz, const sd::LongType *zShapeInfo) {
const auto x = reinterpret_cast<const X *>(vx);
const auto y = reinterpret_cast<const Y *>(vy);
auto z = reinterpret_cast<Y *>(vz);
__shared__ int xRank, yRank, zRank, xNonUnitDim, yNonUnitDim, zNonUnitDim, *coords;
__shared__ sd::LongType yLen;
__shared__ bool is1Dcase, xySameStride;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int *>(shmem);
yLen = shape::length(yShapeInfo);
xRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
zRank = shape::rank(zShapeInfo);
xNonUnitDim = yNonUnitDim = zNonUnitDim = 0;
is1Dcase = (shape::isCommonVector(zShapeInfo, zNonUnitDim) || shape::isScalar(zShapeInfo)) &&
(shape::isCommonVector(yShapeInfo, yNonUnitDim) || shape::isScalar(yShapeInfo)) &&
(shape::isCommonVector(xShapeInfo, xNonUnitDim) || shape::isScalar(xShapeInfo));
if (is1Dcase) xySameStride = shape::stride(xShapeInfo)[xNonUnitDim] = shape::stride(yShapeInfo)[yNonUnitDim];
}
__syncthreads();
sd::LongType xOffset, yOffset, zOffset;
int *yCoords, *zCoords;
if (!is1Dcase) {
yCoords = coords + threadIdx.x * (yRank + zRank);
zCoords = yCoords + yRank;
}
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < yLen; i += gridDim.x * blockDim.x) {
if (is1Dcase) {
yOffset = i * shape::stride(yShapeInfo)[yNonUnitDim];
zOffset = x[xySameStride ? yOffset : i * shape::stride(xShapeInfo)[xNonUnitDim]] *
shape::stride(zShapeInfo)[zNonUnitDim];
} else {
shape::index2coords(i, yShapeInfo, yCoords);
yOffset = shape::getOffset(yShapeInfo, yCoords);
xOffset =
shape::getOffset(xShapeInfo, yCoords); // first xRank coordinates in yCoords are the same for y and x -> for
// (sd::Unsigned j = 0; j < xRank; ++j) xCoords[j] = yCoords[j];
zCoords[0] = x[xOffset];
for (sd::Unsigned j = 0; j < yRank - xRank; ++j) zCoords[j + 1] = yCoords[xRank + j];
zOffset = shape::getOffset(zShapeInfo, zCoords);
}
switch (opCode) {
case pairwise::Add:
z[zOffset] += y[yOffset];
break;
case pairwise::Subtract:
z[zOffset] -= y[yOffset];
break;
case pairwise::Multiply:
z[zOffset] *= y[yOffset];
break;
case pairwise::Divide:
z[zOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
z[zOffset] = y[yOffset] - z[zOffset];
break;
case pairwise::ReverseDivide:
z[zOffset] = y[yOffset] / z[zOffset];
break;
case pairwise::CopyPws:
z[zOffset] = y[yOffset];
break;
case pairwise::MaxPairwise:
if (z[zOffset] < y[yOffset]) z[zOffset] = y[yOffset];
break;
case pairwise::MinPairwise:
if (z[zOffset] > y[yOffset]) z[zOffset] = y[yOffset];
break;
default:
continue;
}
}
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void scatterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const hipStream_t *stream, const int opCode, const void *vx,
const sd::LongType *xShapeInfo, const void *vy, const sd::LongType *yShapeInfo,
void *vz, const sd::LongType *zShapeInfo, const bool lock) {
if (lock)
hipLaunchKernelGGL(( scatterLockCuda<X, Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, opCode, vx, xShapeInfo, vy,
yShapeInfo, vz, zShapeInfo);
else
hipLaunchKernelGGL(( scatterCuda<X, Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, opCode, vx, xShapeInfo, vy, yShapeInfo,
vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
void scatter(sd::LaunchContext *context, pairwise::Ops op, const NDArray &indices, const NDArray &updates,
NDArray &output, const bool lock) {
const auto xType = indices.dataType();
const auto yType = updates.dataType();
const int threadsPerBlock = SD_MAX_NUM_THREADS / 4;
const int blocksPerGrid = ((lock ? output.lengthOf() : updates.lengthOf()) + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = sizeof(int) * threadsPerBlock * (updates.rankOf() + output.rankOf()) + 256;
PointersManager manager(context, "scatter");
NDArray::prepareSpecialUse({&output}, {&updates, &indices});
BUILD_DOUBLE_SELECTOR(xType, yType, scatterCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), op,
indices.specialBuffer(), indices.specialShapeInfo(), updates.specialBuffer(),
updates.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), lock),
SD_INDEXING_TYPES, SD_GENERIC_NUMERIC_TYPES);
NDArray::registerSpecialUse({&output}, {&updates, &indices});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
// x - indices, y - updates, z - output
template <typename X, typename Y>
SD_KERNEL static void scatterNDLockCuda(const int opCode, const void *vx, const sd::LongType *xShapeInfo,
const void *vy, const sd::LongType *yShapeInfo, void *vz,
const sd::LongType *zShapeInfo) {
const auto x = reinterpret_cast<const X *>(vx);
const auto y = reinterpret_cast<const Y *>(vy);
auto z = reinterpret_cast<Y *>(vz);
__shared__ int xRank, yRank, zRank, biggerXYRank, xLastDim, *coords, xNonUnitDim, yNonUnitDim, zNonUnitDim;
__shared__ sd::LongType zLen, len;
__shared__ bool is1Dcase;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int *>(shmem);
xRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
zRank = shape::rank(zShapeInfo);
xLastDim = shape::sizeAt(xShapeInfo, -1);
biggerXYRank = xRank > yRank ? xRank : yRank;
xNonUnitDim = yNonUnitDim = zNonUnitDim = 0;
is1Dcase = (shape::isCommonVector(zShapeInfo, zNonUnitDim) || shape::isScalar(zShapeInfo)) &&
(shape::isCommonVector(yShapeInfo, yNonUnitDim) || shape::isScalar(yShapeInfo)) &&
(shape::isCommonVector(xShapeInfo, xNonUnitDim) || shape::isScalar(xShapeInfo));
len = is1Dcase ? shape::length(xShapeInfo) : shape::length(xShapeInfo) / xLastDim;
zLen = shape::length(zShapeInfo);
}
__syncthreads();
sd::LongType yOffset, zOffset, xOffset;
int *yCoords, *zCoords;
if (!is1Dcase) {
yCoords = coords + threadIdx.x * (biggerXYRank + zRank);
zCoords = yCoords + biggerXYRank;
}
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < zLen; i += gridDim.x * blockDim.x) {
if (!is1Dcase) shape::index2coords(i, zShapeInfo, zCoords);
for (sd::LongType j = 0; j < len;
++j) { // if !is1Dcase then we loop through first xRank-1 dimensions of x, that is we exclude last x dimension
if (is1Dcase) {
if (x[j * shape::stride(xShapeInfo)[xNonUnitDim]] != i) continue;
yOffset = j * shape::stride(yShapeInfo)[yNonUnitDim];
zOffset = i * shape::stride(zShapeInfo)[zNonUnitDim];
} else {
shape::index2coords(j, xRank - 1, shape::shapeOf(const_cast<sd::LongType *>(xShapeInfo)),
yCoords); // first xRank-1 coordinates in yCoords are the same for y and x
// first iteration
yCoords[xRank - 1] = 0;
xOffset = shape::getOffset(xShapeInfo, yCoords);
if (zCoords[0] != x[xOffset]) continue;
// rest iterations
bool matched = true;
for (sd::Unsigned k = 1; k < xLastDim; ++k) {
yCoords[xRank - 1] = k;
xOffset += shape::stride(xShapeInfo)[xRank - 1];
if (zCoords[k] != x[xOffset]) {
matched = false;
break;
}
}
if (!matched) continue;
for (sd::Unsigned k = xLastDim; k < zRank; ++k) yCoords[yRank - zRank + k] = zCoords[k];
yOffset = shape::getOffset(yShapeInfo, yCoords);
zOffset = shape::getOffset(zShapeInfo, zCoords);
}
switch (opCode) {
case pairwise::Add:
z[zOffset] += y[yOffset];
break;
case pairwise::Subtract:
z[zOffset] -= y[yOffset];
break;
case pairwise::Multiply:
z[zOffset] *= y[yOffset];
break;
case pairwise::Divide:
z[zOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
z[zOffset] = y[yOffset] - z[zOffset];
break;
case pairwise::ReverseDivide:
z[zOffset] = y[yOffset] / z[zOffset];
break;
case pairwise::CopyPws:
z[zOffset] = y[yOffset];
break;
case pairwise::MaxPairwise:
if (z[zOffset] < y[yOffset]) z[zOffset] = y[yOffset];
break;
case pairwise::MinPairwise:
if (z[zOffset] > y[yOffset]) z[zOffset] = y[yOffset];
break;
default:
continue;
}
}
}
}
///////////////////////////////////////////////////////////////////
// x - indices, y - updates, z - output
template <typename X, typename Y>
SD_KERNEL static void scatterNDCuda(const int opCode, const void *vx, const sd::LongType *xShapeInfo, const void *vy,
const sd::LongType *yShapeInfo, void *vz, const sd::LongType *zShapeInfo) {
const auto x = reinterpret_cast<const X *>(vx);
const auto y = reinterpret_cast<const Y *>(vy);
auto z = reinterpret_cast<Y *>(vz);
__shared__ int xRank, yRank, zRank, biggerXYRank, xLastDim, *coords, xNonUnitDim, yNonUnitDim, zNonUnitDim;
__shared__ sd::LongType yLen;
__shared__ bool is1Dcase;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int *>(shmem);
yLen = shape::length(yShapeInfo);
xRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
zRank = shape::rank(zShapeInfo);
xLastDim = shape::sizeAt(xShapeInfo, -1);
biggerXYRank = xRank > yRank ? xRank : yRank;
xNonUnitDim = yNonUnitDim = zNonUnitDim = 0;
is1Dcase = (shape::isCommonVector(zShapeInfo, zNonUnitDim) || shape::isScalar(zShapeInfo)) &&
(shape::isCommonVector(yShapeInfo, yNonUnitDim) || shape::isScalar(yShapeInfo)) &&
(shape::isCommonVector(xShapeInfo, xNonUnitDim) || shape::isScalar(xShapeInfo));
}
__syncthreads();
sd::LongType yOffset, zOffset;
int *yCoords, *zCoords;
if (!is1Dcase) {
yCoords = coords + threadIdx.x * (biggerXYRank + zRank);
zCoords = yCoords + biggerXYRank;
}
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < yLen; i += gridDim.x * blockDim.x) {
if (is1Dcase) {
yOffset = i * shape::stride(yShapeInfo)[zNonUnitDim];
zOffset = x[i * shape::stride(xShapeInfo)[xNonUnitDim]] * shape::stride(zShapeInfo)[zNonUnitDim];
} else {
shape::index2coords(i, yShapeInfo, yCoords);
yOffset = shape::getOffset(yShapeInfo, yCoords);
if (yRank >= xRank)
zCoords[xLastDim] = yCoords[xRank - 1]; // saving y coordinate, since it might be changed in next instructions
for (sd::Unsigned j = 0; j < xLastDim; ++j) { // first xRank-1 coordinates in yCoords are the same for y and x
yCoords[xRank - 1] = j;
zCoords[j] = x[shape::getOffset(xShapeInfo, yCoords)];
}
for (sd::Unsigned j = xLastDim + 1; j < zRank; ++j) zCoords[j] = yCoords[yRank - zRank + j];
zOffset = shape::getOffset(zShapeInfo, zCoords);
}
switch (opCode) {
case pairwise::Add:
z[zOffset] += y[yOffset];
break;
case pairwise::Subtract:
z[zOffset] -= y[yOffset];
break;
case pairwise::Multiply:
z[zOffset] *= y[yOffset];
break;
case pairwise::Divide:
z[zOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
z[zOffset] = y[yOffset] - z[zOffset];
break;
case pairwise::ReverseDivide:
z[zOffset] = y[yOffset] / z[zOffset];
break;
case pairwise::CopyPws:
z[zOffset] = y[yOffset];
break;
case pairwise::MaxPairwise:
if (z[zOffset] < y[yOffset]) z[zOffset] = y[yOffset];
break;
case pairwise::MinPairwise:
if (z[zOffset] > y[yOffset]) z[zOffset] = y[yOffset];
break;
default:
continue;
}
}
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void scatterNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const hipStream_t *stream, const int opCode, const void *vx,
const sd::LongType *xShapeInfo, const void *vy, const sd::LongType *yShapeInfo,
void *vz, const sd::LongType *zShapeInfo, const bool lock) {
if (lock)
hipLaunchKernelGGL(( scatterNDLockCuda<X, Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, opCode, vx, xShapeInfo, vy,
yShapeInfo, vz, zShapeInfo);
else
hipLaunchKernelGGL(( scatterNDCuda<X, Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, opCode, vx, xShapeInfo, vy, yShapeInfo,
vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
void scatterND(sd::LaunchContext *context, pairwise::Ops op, const NDArray &indices, const NDArray &updates,
NDArray &output, const bool lock) {
const int xRank = indices.rankOf();
const int yRank = updates.rankOf();
const int zRank = output.rankOf();
const int threadsPerBlock = SD_MAX_NUM_THREADS / 4;
const int blocksPerGrid = ((lock ? output.lengthOf() : updates.lengthOf()) + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * ((yRank > xRank ? yRank : xRank) + zRank) + 256;
const auto xType = indices.dataType();
const auto yType = updates.dataType();
PointersManager manager(context, "scatterND");
NDArray::prepareSpecialUse({&output}, {&updates, &indices});
BUILD_DOUBLE_SELECTOR(xType, yType, scatterNDCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), op,
indices.specialBuffer(), indices.specialShapeInfo(), updates.specialBuffer(),
updates.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), lock),
SD_INDEXING_TYPES, SD_GENERIC_NUMERIC_TYPES);
NDArray::registerSpecialUse({&output}, {&updates, &indices});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Z>
SD_KERNEL void scatterForLossCuda(const void *vx, const sd::LongType *xShapeInfo, void *vy,
const sd::LongType *yShapeInfo, void *vz, const sd::LongType *zShapeInfo) {
const auto x = reinterpret_cast<const X *>(vx);
auto y = reinterpret_cast<Z *>(vy);
auto z = reinterpret_cast<Z *>(vz);
__shared__ sd::LongType xLen;
__shared__ int xRank, *sharedMem; // xRank = zRank, yRank = xRank + 1
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<int *>(shmem);
xLen = shape::length(xShapeInfo);
xRank = shape::rank(xShapeInfo);
}
__syncthreads();
const auto xInd = threadIdx.x + blockIdx.x * blockDim.x;
if (xInd >= xLen) return;
auto coords = sharedMem + threadIdx.x * (xRank + 1);
shape::index2coords(xInd, xShapeInfo, coords);
// y last coordinate
coords[xRank] = x[shape::getOffset(xShapeInfo, coords)];
const auto yOffset = shape::getOffset(yShapeInfo, coords);
if (z == nullptr) { // gradient calculation
y[yOffset] -= 1.f;
} else {
z[shape::getOffset(zShapeInfo, coords)] = y[yOffset];
}
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void scatterForLossCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const hipStream_t *stream, const void *vx, const sd::LongType *xShapeInfo,
void *vy, const sd::LongType *yShapeInfo, void *vz,
const sd::LongType *zShapeInfo) {
hipLaunchKernelGGL(( scatterForLossCuda<X, Z>)
, dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
void scatterForLoss(sd::LaunchContext *context, const NDArray &indices, NDArray &updates, NDArray &output,
const bool calcGrad) {
// shapes of indices and output must be the same
// shape of indices should be the same as updates shape with last dimension excluded, for example if updates is
// {a,b,c} then indices should be {a,b}
PointersManager manager(context, "scatterForLoss");
const int threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const int blocksPerGrid = (indices.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = updates.rankOf() * sizeof(int) * threadsPerBlock + 128;
if (calcGrad) {
NDArray::prepareSpecialUse({&updates}, {&indices});
BUILD_DOUBLE_SELECTOR(
indices.dataType(), updates.dataType(), scatterForLossCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), indices.specialBuffer(),
indices.specialShapeInfo(), updates.specialBuffer(), updates.specialShapeInfo(), nullptr, nullptr),
SD_INDEXING_TYPES, SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&updates}, {&indices});
} else {
NDArray::prepareSpecialUse({&output}, {&indices, &updates});
BUILD_DOUBLE_SELECTOR(indices.dataType(), updates.dataType(), scatterForLossCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), indices.specialBuffer(),
indices.specialShapeInfo(), updates.specialBuffer(), updates.specialShapeInfo(),
output.specialBuffer(), output.specialShapeInfo()),
SD_INDEXING_TYPES, SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&indices, &updates});
}
manager.synchronize();
}
} // namespace helpers
} // namespace ops
} // namespace sd
/*
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
static void scatterLockCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const
hipStream_t *stream, const int opCode, const void* vx, const sd::LongType *xShapeInfo, const void* vy, const
sd::LongType *yTadShapeInfo, const sd::LongType *yOffsets, void* vz, const sd::LongType *zTadShapeInfo, const
sd::LongType *zOffsets, const sd::LongType xLen, const sd::LongType yTadLen, const sd::LongType zTadLen) {
scatterLockCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(opCode, vx, xShapeInfo, vy,
yTadShapeInfo, yOffsets, vz, zTadShapeInfo, zOffsets, xLen, yTadLen, zTadLen);
}
///////////////////////////////////////////////////////////////////
// x - indices, y - updates, z - input/output
template<typename X, typename Y>
SD_KERNEL static void scatterLockCuda(const int opCode,
const void* vx, const sd::LongType *xShapeInfo,
const void* vy, const sd::LongType *yTadShapeInfo, const sd::LongType *yOffsets,
void* vz, const sd::LongType *zTadShapeInfo, const sd::LongType *zOffsets,
const sd::LongType xLen, const sd::LongType yTadLen, const sd::LongType zTadLen)
{
const int xRank = indices.rankOf();
std::vector<int> zTadDims = ShapeUtils::evalDimsToExclude(output.rankOf(), {0});
int sizeOfUpdDims = xRank;
if(output.rankOf() == updates.rankOf() && indices.isVector())
sizeOfUpdDims = 1;
std::vector<int> yTadDims(sizeOfUpdDims);
std::iota(yTadDims.begin(), yTadDims.end(), 0);
auto packY = sd::ConstantTadHelper::getInstance().tadForDimensions(updates.shapeInfo(),
ShapeUtils::evalDimsToExclude(updates.rankOf(), yTadDims)); auto packZ =
sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), zTadDims);
const sd::LongType zTadLen = shape::length(packZ.primaryShapeInfo());
const sd::LongType yTadLen = shape::length(packY.primaryShapeInfo());
const auto threadsPerBlock = sd::math::sd_max<int>(32, sd::math::sd_min<int>(zTadLen, 1024));
const auto blocksPerGrid = indices.lengthOf();
const auto xType = indices.dataType();
const auto yType = updates.dataType();
BUILD_DOUBLE_SELECTOR(xType, yType, scatterLockCudaLauncher, (blocksPerGrid, threadsPerBlock, 1024,
context->getCudaStream(), op, indices.specialBuffer(), indices.specialShapeInfo(), updates.specialBuffer(),
packY.specialShapeInfo(), packY.specialOffsets(), output.specialBuffer(), packZ.specialShapeInfo(),
packZ.specialOffsets(), indices.lengthOf(), yTadLen, zTadLen), SD_INDEXING_TYPES, SD_GENERIC_NUMERIC_TYPES);
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<Y*>(vz);
__shared__ bool vectorCase;
if(threadIdx.x == 0)
vectorCase = yTadLen == xLen && shape::rank(xShapeInfo) <= 1;
__syncthreads();
for (int e = 0; e < xLen; e++) {
const sd::LongType zIndex = x[shape::getIndexOffset(e, xShapeInfo)];
const bool isOwner = zIndex < gridDim.x ? blockIdx.x == zIndex : blockIdx.x == zIndex % gridDim.x;
if (!isOwner)
continue;
if(vectorCase) { // means z_rank = 1 and might be yTadLen != zTadLen in this case
if(threadIdx.x != 0)
continue;
const auto yOffset = shape::getIndexOffset(e, yTadShapeInfo);
const auto zOffset = shape::getIndexOffset(zIndex, zTadShapeInfo);
switch (opCode) {
case pairwise::Add:
z[zOffset] += y[yOffset];
break;
case pairwise::Subtract:
z[zOffset] -= y[yOffset];
break;
case pairwise::Multiply:
z[zOffset] *= y[yOffset];
break;
case pairwise::Divide:
z[zOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
z[zOffset] = y[yOffset] - z[zOffset];
break;
case pairwise::ReverseDivide:
z[zOffset] = y[yOffset] / z[zOffset];
break;
case pairwise::CopyPws:
z[zOffset] = y[yOffset];
break;
case pairwise::MaxPairwise:
if(z[zOffset] < y[yOffset]) z[zOffset] = y[yOffset];
break;
case pairwise::MinPairwise:
if(z[zOffset] > y[yOffset]) z[zOffset] = y[yOffset];
break;
default:
continue;
}
}
else { // yTadLen == zTadLen in this case
const Y* yTad = y + yOffsets[e];
Y* zTad = z + zOffsets[zIndex];
for (sd::LongType i = threadIdx.x; i < zTadLen; i += blockDim.x) {
const auto yOffset = shape::getIndexOffset(i, yTadShapeInfo);
const auto zOffset = shape::getIndexOffset(i, zTadShapeInfo);
switch (opCode) {
case pairwise::Add:
zTad[zOffset] += yTad[yOffset];
break;
case pairwise::Subtract:
zTad[zOffset] -= yTad[yOffset];
break;
case pairwise::Multiply:
zTad[zOffset] *= yTad[yOffset];
break;
case pairwise::Divide:
zTad[zOffset] /= yTad[yOffset];
break;
case pairwise::ReverseSubtract:
zTad[zOffset] = yTad[yOffset] - zTad[zOffset];
break;
case pairwise::ReverseDivide:
zTad[zOffset] = yTad[yOffset] / zTad[zOffset];
break;
case pairwise::CopyPws:
zTad[zOffset] = yTad[yOffset];
break;
case pairwise::MaxPairwise:
if(zTad[zOffset] < yTad[yOffset]) zTad[zOffset] = yTad[yOffset];
break;
case pairwise::MinPairwise:
if(zTad[zOffset] > yTad[yOffset]) zTad[zOffset] = yTad[yOffset];
break;
default:
continue;
}
}
}
}
}
template<typename T, bool locking>
SD_KERNEL static void scatterCuda(const int opCode, const int numOfSubArrs,
void* vx, const sd::LongType *xShapeInfo, const sd::LongType
*xOffsets, void* vy, const sd::LongType *yShapeInfo, const sd::LongType *yOffsets, const int* indexes, unsigned int
arrLenX, unsigned int arrLenY) {
__shared__ T *x, *y;
if (locking) {
for (int e = 0; e < numOfSubArrs; e++) {
const auto xIndex = indexes[e];
const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex %
gridDim.x;
if (!isOwner)
continue;
if (threadIdx.x == 0) {
x = reinterpret_cast<T *>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T *>(vy) + yOffsets[e];
}
__syncthreads();
for (sd::LongType i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo);
switch (opCode) {
case pairwise::Add:
x[xOffset] += y[yOffset];
break;
case pairwise::Subtract:
x[xOffset] -= y[yOffset];
break;
case pairwise::Multiply:
x[xOffset] *= y[yOffset];
break;
case pairwise::Divide:
x[xOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case pairwise::ReverseDivide:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case pairwise::CopyPws:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
} else {
for (int e = blockIdx.x; e < numOfSubArrs; e+= gridDim.x) {
if (threadIdx.x == 0) {
const auto xIndex = indexes[e];
x = reinterpret_cast<T *>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T *>(vy) + yOffsets[e];
}
__syncthreads();
for (sd::LongType i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo);
switch (opCode) {
case pairwise::Add:
x[xOffset] += y[yOffset];
break;
case pairwise::Subtract:
x[xOffset] -= y[yOffset];
break;
case pairwise::Multiply:
x[xOffset] *= y[yOffset];
break;
case pairwise::Divide:
x[xOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case pairwise::ReverseDivide:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case pairwise::CopyPws:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
}
}
template <typename T>
void scatter_(sd::LaunchContext *context, pairwise::Ops op, const NDArray& indices, const NDArray& updates,
NDArray& output, const bool lock) { std::vector<int> dims = {0}; auto inverted =
ShapeUtils::evalDimsToExclude(output.rankOf(), dims);
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), inverted);
auto packY = sd::ConstantTadHelper::getInstance().tadForDimensions(updates.shapeInfo(), inverted);
auto psX = packX.specialShapeInfo();
auto psY = packY.special();
PointersManager manager(context, "scatter");
auto poX = packX.specialOffsets();
auto poY = packY.special();
NDArray::prepareSpecialUse({&output}, {&updates, &indices});
unsigned int tadLengthX = shape::length(packX.primaryShapeInfo());
unsigned int tadLengthY = shape::length(packY.primary());
if (tadLengthX != tadLengthY)
throw std::runtime_error("scatter: Lengths of TADs must be equal");
auto blockSize = sd::math::sd_max<int>(32, sd::math::sd_min<int>(tadLengthX, 1024));
if (lock)
scatterCuda<T, true><<<512, blockSize, 1024, *context->getCudaStream()>>>(op, indices.lengthOf(),
output.specialBuffer(), psX, poX, updates.specialBuffer(), psY, poY, reinterpret_cast<int *>(indices.specialBuffer()),
tadLengthX, tadLengthY); else scatterCuda<T, false><<<512, blockSize, 1024, *context->getCudaStream()>>>(op,
indices.lengthOf(), output.specialBuffer(), psX, poX, updates.specialBuffer(), psY, poY, reinterpret_cast<int
*>(indices.specialBuffer()), tadLengthX, tadLengthY);
NDArray::registerSpecialUse({&output}, {&updates, &indices});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
// x - indices, y - updates, z - output
template<typename X, typename Y>
SD_KERNEL static void scatterNDLockCuda(const int opCode,
const void* vx, const sd::LongType *xTadShapeInfo, const sd::LongType
*xOffsets, const void* vy, const sd::LongType *yTadShapeInfo, const sd::LongType *yOffsets, void* vz, const sd::LongType
*zTadShapeInfo, const sd::LongType *zOffsets, const sd::LongType *zShapeInfo, const sd::LongType numOfXTads, const
sd::LongType numOfZTads, const sd::LongType yTadLen) {
---------------------------------------------------------------------------
const int xLastDim = indices.sizeAt(-1);
// y_tad and z_tad have the same shape
std::vector<int> yTadDims(zRank - xLastDim), zTadDims(zRank - xLastDim);
for (int j = 0, i = zTadDims.size() - 1; i >=0 ; --i, ++j) {
yTadDims[i] = yRank - 1 - j;
zTadDims[i] = zRank - 1 - j;
}
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(indices.shapeInfo(), {xRank - 1});
auto packY = sd::ConstantTadHelper::getInstance().tadForDimensions(updates.shapeInfo(), yTadDims);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), zTadDims);
const int threadsPerBlock = SD_MAX_NUM_THREADS / 4;
const int blocksPerGrid = packZ.numberOfTads();
const int sharedMem = 8 * threadsPerBlock * xLastDim + 128;
---------------------------------------------------------------------------
// zTadLen == yTadLen if numOfZTads > 1, in opposite case z and y are vectors
// numOfXTads == numOfYTads if numOfZTads > 1, in opposite case z and y are vectors
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<Y*>(vz);
__shared__ sd::LongType *zTadCoords;
__shared__ int xLastDim;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
zTadCoords = reinterpret_cast<sd::LongType*>(shmem);
xLastDim = xTadShapeInfo[1]; // xTad has rank = 1 always
}
__syncthreads();
sd::LongType* zTadCoordsPerThread = zTadCoords + threadIdx.x * xLastDim;
for (sd::LongType i = 0; i < numOfXTads; ++i) {
const X* xTad = x + xOffsets[i];
for (sd::Unsigned k = 0; k < xLastDim; ++k)
zTadCoordsPerThread[k] = xTad[shape::getIndexOffset(k, xTadShapeInfo)];
const auto zTadIndex = shape::coords2index(xLastDim, zShapeInfo + 1, zTadCoordsPerThread);
const bool isOwner = zTadIndex < gridDim.x ? blockIdx.x == zTadIndex : blockIdx.x == zTadIndex % gridDim.x;
if(!isOwner)
continue;
if(numOfZTads == 1) { // yTadLen == numOfXTads in this case
if(threadIdx.x != 0)
continue;
const auto yOffset = shape::getIndexOffset(i, yTadShapeInfo);
const auto zOffset = shape::getIndexOffset(zTadIndex, zTadShapeInfo);
switch (opCode) {
case pairwise::Add:
z[zOffset] += y[yOffset];
break;
case pairwise::Subtract:
z[zOffset] -= y[yOffset];
break;
case pairwise::Multiply:
z[zOffset] *= y[yOffset];
break;
case pairwise::Divide:
z[zOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
z[zOffset] = y[yOffset] - z[zOffset];
break;
case pairwise::ReverseDivide:
z[zOffset] = y[yOffset] / z[zOffset];
break;
case pairwise::CopyPws:
z[zOffset] = y[yOffset];
break;
case pairwise::MaxPairwise:
if(z[zOffset] < y[yOffset]) z[zOffset] = y[yOffset];
break;
case pairwise::MinPairwise:
if(z[zOffset] > y[yOffset]) z[zOffset] = y[yOffset];
break;
default:
continue;
}
}
else {
const auto yTad = y + yOffsets[i];
const auto zTad = z + zOffsets[zTadIndex];
for (sd::LongType j = threadIdx.x; j < yTadLen; j += blockDim.x) {
const auto yOffset = shape::getIndexOffset(j, yTadShapeInfo);
const auto zOffset = shape::getIndexOffset(j, zTadShapeInfo);
switch (opCode) {
case pairwise::Add:
zTad[zOffset] += yTad[yOffset];
break;
case pairwise::Subtract:
zTad[zOffset] -= yTad[yOffset];
break;
case pairwise::Multiply:
zTad[zOffset] *= yTad[yOffset];
break;
case pairwise::Divide:
zTad[zOffset] /= yTad[yOffset];
break;
case pairwise::ReverseSubtract:
zTad[zOffset] = yTad[yOffset] - zTad[zOffset];
break;
case pairwise::ReverseDivide:
zTad[zOffset] = yTad[yOffset] / zTad[zOffset];
break;
case pairwise::CopyPws:
zTad[zOffset] = yTad[yOffset];
break;
case pairwise::MaxPairwise:
if(zTad[zOffset] < yTad[yOffset]) zTad[zOffset] = yTad[yOffset];
break;
case pairwise::MinPairwise:
if(zTad[zOffset] > yTad[yOffset]) zTad[zOffset] = yTad[yOffset];
break;
default:
continue;
}
}
}
}
}
*/
// PointersManager manager(&context, "NativeOps::concat");
// PointersManager::printDevContentOnDev<int>(vx, 2);
// PointersManager::printDevContentOnDev<sd::LongType>(xShapeInfo, 8);
// PointersManager::printDevContentOnDev<float>(vy, 8);
// PointersManager::printDevContentOnDev<sd::LongType>(yShapeInfo, 8);
// PointersManager::printDevContentOnDev<sd::LongType>(zShapeInfo, 8);
// manager.printDevContentOnHost<int>(indices.specialBuffer(), indices.lengthOf());
// manager.printDevContentOnHost<sd::LongType>(indices.special(), shape::shapeInfoLength(indices.rankOf()));
// manager.printDevContentOnHost<float>(updates.specialBuffer(), updates.lengthOf());
// manager.printDevContentOnHost<sd::LongType>(updates.special(), shape::shapeInfoLength(updates.rankOf()));
// manager.printDevContentOnHost<sd::LongType>(output.special(), shape::shapeInfoLength(output.rankOf()));
// printf("!!!!!!!\n");
// manager.printDevContentOnHost<sd::LongType>(packX.special(), 2*shape::rank(packX.primary()) + 4);
// manager.printDevContentOnHost<sd::LongType>(packX.special(), packX.numberOfTads());
// manager.printDevContentOnHost<sd::LongType>(packY.special(), 2*shape::rank(packY.primary()) + 4);
// manager.printDevContentOnHost<sd::LongType>(packY.special(), packY.numberOfTads());
// manager.printDevContentOnHost<sd::LongType>(packZ.special(), 2*shape::rank(packZ.primary()) + 4);
// manager.printDevContentOnHost<sd::LongType>(packZ.special(), packZ.numberOfTads());
// printf("dddddddd\n");
// shape::printShapeInfoLinear(packY.primary());
| bf098530908f6857019fa6286ff3bf8eda26a7d7.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
// @author Yurii Shyrma ([email protected])
//
#include <helpers/ConstantShapeHelper.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/PointersManager.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/declarable/helpers/scatter.h>
#include <numeric>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
// x - indices, y - contains number of bad indices, z - input/output
template <typename X>
SD_KERNEL static void checkIndicesCuda(const void *vx, const sd::LongType *xShapeInfo, sd::LongType *y,
const sd::LongType *zShapeInfo, const int axis) {
const auto x = reinterpret_cast<const X *>(vx);
__shared__ int xRank, *coords, xLastDim;
__shared__ sd::LongType xLen, numOfBadIndxPerBlock;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int *>(shmem);
xRank = shape::rank(xShapeInfo);
xLen = shape::length(xShapeInfo);
numOfBadIndxPerBlock = 0;
}
__syncthreads();
auto xCoords = coords + threadIdx.x * xRank;
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < xLen; i += gridDim.x * blockDim.x) {
shape::index2coords(i, xShapeInfo, xCoords);
const sd::LongType currentInd = x[shape::getOffset(xShapeInfo, xCoords)];
if (currentInd >= shape::sizeAt(zShapeInfo, axis == -1 ? xCoords[xRank - 1] : axis)) {
printf("checkIndices cuda: out of range element %lld at index %lld \n", currentInd, i);
sd::math::atomics::sd_atomicAdd<sd::LongType>(&numOfBadIndxPerBlock, 1);
}
}
__syncthreads();
if (threadIdx.x == 0 && numOfBadIndxPerBlock != 0)
sd::math::atomics::sd_atomicAdd<sd::LongType>(y, numOfBadIndxPerBlock);
}
///////////////////////////////////////////////////////////////////
template <typename X>
static void checkIndicesCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const cudaStream_t *stream, const void *vx, const sd::LongType *xShapeInfo,
sd::LongType *y, const sd::LongType *zShapeInfo, const int axis) {
checkIndicesCuda<X><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, y, zShapeInfo, axis);
}
///////////////////////////////////////////////////////////////////
sd::LongType checkIndices(sd::LaunchContext *context, const NDArray &indices, const NDArray &output, const int axis) {
const int threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const int blocksPerGrid = (indices.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * indices.rankOf() + 256;
const auto xType = indices.dataType();
PointersManager manager(context, "scatterNDcheckIndices");
// scalar, initial value = 0
NDArray numOfBadIndx(sd::DataType::INT64, context, true);
NDArray::prepareSpecialUse({&numOfBadIndx}, {&indices});
BUILD_SINGLE_SELECTOR(xType, checkIndicesCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), indices.specialBuffer(),
indices.specialShapeInfo(), reinterpret_cast<sd::LongType *>(numOfBadIndx.specialBuffer()),
output.specialShapeInfo(), axis),
SD_INDEXING_TYPES);
NDArray::registerSpecialUse({&numOfBadIndx}, {&indices});
manager.synchronize();
return numOfBadIndx.t<sd::LongType>(0);
}
///////////////////////////////////////////////////////////////////
// x - indices, y - updates, z - input/output
template <typename X, typename Y>
SD_KERNEL static void scatterLockCuda(const int opCode, const void *vx, const sd::LongType *xShapeInfo, const void *vy,
const sd::LongType *yShapeInfo, void *vz, const sd::LongType *zShapeInfo) {
const auto x = reinterpret_cast<const X *>(vx);
const auto y = reinterpret_cast<const Y *>(vy);
auto z = reinterpret_cast<Y *>(vz);
__shared__ int xRank, yRank, zRank, xNonUnitDim, yNonUnitDim, zNonUnitDim, *coords;
__shared__ sd::LongType xLen, zLen;
__shared__ bool is1Dcase, xySameStride;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int *>(shmem);
xLen = shape::length(xShapeInfo);
zLen = shape::length(zShapeInfo);
xRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
zRank = shape::rank(zShapeInfo);
xNonUnitDim = yNonUnitDim = zNonUnitDim = 0;
is1Dcase = (shape::isCommonVector(zShapeInfo, zNonUnitDim) || shape::isScalar(zShapeInfo)) &&
(shape::isCommonVector(yShapeInfo, yNonUnitDim) || shape::isScalar(yShapeInfo)) &&
(shape::isCommonVector(xShapeInfo, xNonUnitDim) || shape::isScalar(xShapeInfo));
if (is1Dcase) xySameStride = shape::stride(xShapeInfo)[xNonUnitDim] = shape::stride(yShapeInfo)[yNonUnitDim];
}
__syncthreads();
sd::LongType yOffset, zOffset;
int zFirstCoord, *yCoords, *zCoords;
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < zLen; i += gridDim.x * blockDim.x) {
if (!is1Dcase) {
yCoords = coords + threadIdx.x * (yRank + zRank);
zCoords = yCoords + yRank;
shape::index2coords(i, zShapeInfo, zCoords);
}
for (sd::LongType j = 0; j < xLen; ++j) {
if (is1Dcase) {
yOffset = j * shape::stride(yShapeInfo)[yNonUnitDim];
zFirstCoord = x[xySameStride ? yOffset : j * shape::stride(xShapeInfo)[xNonUnitDim]];
if (i != zFirstCoord) continue;
zOffset = i * shape::stride(zShapeInfo)[zNonUnitDim];
}
else {
shape::index2coords(j, xShapeInfo, yCoords); // first xRank coordinates in yCoords are the same for y and x
zFirstCoord = x[shape::getOffset(xShapeInfo, yCoords)];
if (zCoords[0] != zFirstCoord) continue;
for (sd::Unsigned k = 0; k < yRank - xRank; ++k) yCoords[xRank + k] = zCoords[k + 1];
yOffset = shape::getOffset(yShapeInfo, yCoords);
zOffset = shape::getOffset(zShapeInfo, zCoords);
}
switch (opCode) {
case pairwise::Add:
z[zOffset] += y[yOffset];
break;
case pairwise::Subtract:
z[zOffset] -= y[yOffset];
break;
case pairwise::Multiply:
z[zOffset] *= y[yOffset];
break;
case pairwise::Divide:
z[zOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
z[zOffset] = y[yOffset] - z[zOffset];
break;
case pairwise::ReverseDivide:
z[zOffset] = y[yOffset] / z[zOffset];
break;
case pairwise::CopyPws:
z[zOffset] = y[yOffset];
break;
case pairwise::MaxPairwise:
if (z[zOffset] < y[yOffset]) z[zOffset] = y[yOffset];
break;
case pairwise::MinPairwise:
if (z[zOffset] > y[yOffset]) z[zOffset] = y[yOffset];
break;
default:
continue;
}
}
}
}
///////////////////////////////////////////////////////////////////
// x - indices, y - updates, z - input/output
template <typename X, typename Y>
SD_KERNEL static void scatterCuda(const int opCode, const void *vx, const sd::LongType *xShapeInfo, const void *vy,
const sd::LongType *yShapeInfo, void *vz, const sd::LongType *zShapeInfo) {
const auto x = reinterpret_cast<const X *>(vx);
const auto y = reinterpret_cast<const Y *>(vy);
auto z = reinterpret_cast<Y *>(vz);
__shared__ int xRank, yRank, zRank, xNonUnitDim, yNonUnitDim, zNonUnitDim, *coords;
__shared__ sd::LongType yLen;
__shared__ bool is1Dcase, xySameStride;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int *>(shmem);
yLen = shape::length(yShapeInfo);
xRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
zRank = shape::rank(zShapeInfo);
xNonUnitDim = yNonUnitDim = zNonUnitDim = 0;
is1Dcase = (shape::isCommonVector(zShapeInfo, zNonUnitDim) || shape::isScalar(zShapeInfo)) &&
(shape::isCommonVector(yShapeInfo, yNonUnitDim) || shape::isScalar(yShapeInfo)) &&
(shape::isCommonVector(xShapeInfo, xNonUnitDim) || shape::isScalar(xShapeInfo));
if (is1Dcase) xySameStride = shape::stride(xShapeInfo)[xNonUnitDim] = shape::stride(yShapeInfo)[yNonUnitDim];
}
__syncthreads();
sd::LongType xOffset, yOffset, zOffset;
int *yCoords, *zCoords;
if (!is1Dcase) {
yCoords = coords + threadIdx.x * (yRank + zRank);
zCoords = yCoords + yRank;
}
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < yLen; i += gridDim.x * blockDim.x) {
if (is1Dcase) {
yOffset = i * shape::stride(yShapeInfo)[yNonUnitDim];
zOffset = x[xySameStride ? yOffset : i * shape::stride(xShapeInfo)[xNonUnitDim]] *
shape::stride(zShapeInfo)[zNonUnitDim];
} else {
shape::index2coords(i, yShapeInfo, yCoords);
yOffset = shape::getOffset(yShapeInfo, yCoords);
xOffset =
shape::getOffset(xShapeInfo, yCoords); // first xRank coordinates in yCoords are the same for y and x -> for
// (sd::Unsigned j = 0; j < xRank; ++j) xCoords[j] = yCoords[j];
zCoords[0] = x[xOffset];
for (sd::Unsigned j = 0; j < yRank - xRank; ++j) zCoords[j + 1] = yCoords[xRank + j];
zOffset = shape::getOffset(zShapeInfo, zCoords);
}
switch (opCode) {
case pairwise::Add:
z[zOffset] += y[yOffset];
break;
case pairwise::Subtract:
z[zOffset] -= y[yOffset];
break;
case pairwise::Multiply:
z[zOffset] *= y[yOffset];
break;
case pairwise::Divide:
z[zOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
z[zOffset] = y[yOffset] - z[zOffset];
break;
case pairwise::ReverseDivide:
z[zOffset] = y[yOffset] / z[zOffset];
break;
case pairwise::CopyPws:
z[zOffset] = y[yOffset];
break;
case pairwise::MaxPairwise:
if (z[zOffset] < y[yOffset]) z[zOffset] = y[yOffset];
break;
case pairwise::MinPairwise:
if (z[zOffset] > y[yOffset]) z[zOffset] = y[yOffset];
break;
default:
continue;
}
}
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void scatterCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const cudaStream_t *stream, const int opCode, const void *vx,
const sd::LongType *xShapeInfo, const void *vy, const sd::LongType *yShapeInfo,
void *vz, const sd::LongType *zShapeInfo, const bool lock) {
if (lock)
scatterLockCuda<X, Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(opCode, vx, xShapeInfo, vy,
yShapeInfo, vz, zShapeInfo);
else
scatterCuda<X, Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(opCode, vx, xShapeInfo, vy, yShapeInfo,
vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
void scatter(sd::LaunchContext *context, pairwise::Ops op, const NDArray &indices, const NDArray &updates,
NDArray &output, const bool lock) {
const auto xType = indices.dataType();
const auto yType = updates.dataType();
const int threadsPerBlock = SD_MAX_NUM_THREADS / 4;
const int blocksPerGrid = ((lock ? output.lengthOf() : updates.lengthOf()) + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = sizeof(int) * threadsPerBlock * (updates.rankOf() + output.rankOf()) + 256;
PointersManager manager(context, "scatter");
NDArray::prepareSpecialUse({&output}, {&updates, &indices});
BUILD_DOUBLE_SELECTOR(xType, yType, scatterCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), op,
indices.specialBuffer(), indices.specialShapeInfo(), updates.specialBuffer(),
updates.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), lock),
SD_INDEXING_TYPES, SD_GENERIC_NUMERIC_TYPES);
NDArray::registerSpecialUse({&output}, {&updates, &indices});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
// x - indices, y - updates, z - output
template <typename X, typename Y>
SD_KERNEL static void scatterNDLockCuda(const int opCode, const void *vx, const sd::LongType *xShapeInfo,
const void *vy, const sd::LongType *yShapeInfo, void *vz,
const sd::LongType *zShapeInfo) {
const auto x = reinterpret_cast<const X *>(vx);
const auto y = reinterpret_cast<const Y *>(vy);
auto z = reinterpret_cast<Y *>(vz);
__shared__ int xRank, yRank, zRank, biggerXYRank, xLastDim, *coords, xNonUnitDim, yNonUnitDim, zNonUnitDim;
__shared__ sd::LongType zLen, len;
__shared__ bool is1Dcase;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int *>(shmem);
xRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
zRank = shape::rank(zShapeInfo);
xLastDim = shape::sizeAt(xShapeInfo, -1);
biggerXYRank = xRank > yRank ? xRank : yRank;
xNonUnitDim = yNonUnitDim = zNonUnitDim = 0;
is1Dcase = (shape::isCommonVector(zShapeInfo, zNonUnitDim) || shape::isScalar(zShapeInfo)) &&
(shape::isCommonVector(yShapeInfo, yNonUnitDim) || shape::isScalar(yShapeInfo)) &&
(shape::isCommonVector(xShapeInfo, xNonUnitDim) || shape::isScalar(xShapeInfo));
len = is1Dcase ? shape::length(xShapeInfo) : shape::length(xShapeInfo) / xLastDim;
zLen = shape::length(zShapeInfo);
}
__syncthreads();
sd::LongType yOffset, zOffset, xOffset;
int *yCoords, *zCoords;
if (!is1Dcase) {
yCoords = coords + threadIdx.x * (biggerXYRank + zRank);
zCoords = yCoords + biggerXYRank;
}
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < zLen; i += gridDim.x * blockDim.x) {
if (!is1Dcase) shape::index2coords(i, zShapeInfo, zCoords);
for (sd::LongType j = 0; j < len;
++j) { // if !is1Dcase then we loop through first xRank-1 dimensions of x, that is we exclude last x dimension
if (is1Dcase) {
if (x[j * shape::stride(xShapeInfo)[xNonUnitDim]] != i) continue;
yOffset = j * shape::stride(yShapeInfo)[yNonUnitDim];
zOffset = i * shape::stride(zShapeInfo)[zNonUnitDim];
} else {
shape::index2coords(j, xRank - 1, shape::shapeOf(const_cast<sd::LongType *>(xShapeInfo)),
yCoords); // first xRank-1 coordinates in yCoords are the same for y and x
// first iteration
yCoords[xRank - 1] = 0;
xOffset = shape::getOffset(xShapeInfo, yCoords);
if (zCoords[0] != x[xOffset]) continue;
// rest iterations
bool matched = true;
for (sd::Unsigned k = 1; k < xLastDim; ++k) {
yCoords[xRank - 1] = k;
xOffset += shape::stride(xShapeInfo)[xRank - 1];
if (zCoords[k] != x[xOffset]) {
matched = false;
break;
}
}
if (!matched) continue;
for (sd::Unsigned k = xLastDim; k < zRank; ++k) yCoords[yRank - zRank + k] = zCoords[k];
yOffset = shape::getOffset(yShapeInfo, yCoords);
zOffset = shape::getOffset(zShapeInfo, zCoords);
}
switch (opCode) {
case pairwise::Add:
z[zOffset] += y[yOffset];
break;
case pairwise::Subtract:
z[zOffset] -= y[yOffset];
break;
case pairwise::Multiply:
z[zOffset] *= y[yOffset];
break;
case pairwise::Divide:
z[zOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
z[zOffset] = y[yOffset] - z[zOffset];
break;
case pairwise::ReverseDivide:
z[zOffset] = y[yOffset] / z[zOffset];
break;
case pairwise::CopyPws:
z[zOffset] = y[yOffset];
break;
case pairwise::MaxPairwise:
if (z[zOffset] < y[yOffset]) z[zOffset] = y[yOffset];
break;
case pairwise::MinPairwise:
if (z[zOffset] > y[yOffset]) z[zOffset] = y[yOffset];
break;
default:
continue;
}
}
}
}
///////////////////////////////////////////////////////////////////
// x - indices, y - updates, z - output
template <typename X, typename Y>
SD_KERNEL static void scatterNDCuda(const int opCode, const void *vx, const sd::LongType *xShapeInfo, const void *vy,
const sd::LongType *yShapeInfo, void *vz, const sd::LongType *zShapeInfo) {
const auto x = reinterpret_cast<const X *>(vx);
const auto y = reinterpret_cast<const Y *>(vy);
auto z = reinterpret_cast<Y *>(vz);
__shared__ int xRank, yRank, zRank, biggerXYRank, xLastDim, *coords, xNonUnitDim, yNonUnitDim, zNonUnitDim;
__shared__ sd::LongType yLen;
__shared__ bool is1Dcase;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
coords = reinterpret_cast<int *>(shmem);
yLen = shape::length(yShapeInfo);
xRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
zRank = shape::rank(zShapeInfo);
xLastDim = shape::sizeAt(xShapeInfo, -1);
biggerXYRank = xRank > yRank ? xRank : yRank;
xNonUnitDim = yNonUnitDim = zNonUnitDim = 0;
is1Dcase = (shape::isCommonVector(zShapeInfo, zNonUnitDim) || shape::isScalar(zShapeInfo)) &&
(shape::isCommonVector(yShapeInfo, yNonUnitDim) || shape::isScalar(yShapeInfo)) &&
(shape::isCommonVector(xShapeInfo, xNonUnitDim) || shape::isScalar(xShapeInfo));
}
__syncthreads();
sd::LongType yOffset, zOffset;
int *yCoords, *zCoords;
if (!is1Dcase) {
yCoords = coords + threadIdx.x * (biggerXYRank + zRank);
zCoords = yCoords + biggerXYRank;
}
for (sd::LongType i = blockIdx.x * blockDim.x + threadIdx.x; i < yLen; i += gridDim.x * blockDim.x) {
if (is1Dcase) {
yOffset = i * shape::stride(yShapeInfo)[zNonUnitDim];
zOffset = x[i * shape::stride(xShapeInfo)[xNonUnitDim]] * shape::stride(zShapeInfo)[zNonUnitDim];
} else {
shape::index2coords(i, yShapeInfo, yCoords);
yOffset = shape::getOffset(yShapeInfo, yCoords);
if (yRank >= xRank)
zCoords[xLastDim] = yCoords[xRank - 1]; // saving y coordinate, since it might be changed in next instructions
for (sd::Unsigned j = 0; j < xLastDim; ++j) { // first xRank-1 coordinates in yCoords are the same for y and x
yCoords[xRank - 1] = j;
zCoords[j] = x[shape::getOffset(xShapeInfo, yCoords)];
}
for (sd::Unsigned j = xLastDim + 1; j < zRank; ++j) zCoords[j] = yCoords[yRank - zRank + j];
zOffset = shape::getOffset(zShapeInfo, zCoords);
}
switch (opCode) {
case pairwise::Add:
z[zOffset] += y[yOffset];
break;
case pairwise::Subtract:
z[zOffset] -= y[yOffset];
break;
case pairwise::Multiply:
z[zOffset] *= y[yOffset];
break;
case pairwise::Divide:
z[zOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
z[zOffset] = y[yOffset] - z[zOffset];
break;
case pairwise::ReverseDivide:
z[zOffset] = y[yOffset] / z[zOffset];
break;
case pairwise::CopyPws:
z[zOffset] = y[yOffset];
break;
case pairwise::MaxPairwise:
if (z[zOffset] < y[yOffset]) z[zOffset] = y[yOffset];
break;
case pairwise::MinPairwise:
if (z[zOffset] > y[yOffset]) z[zOffset] = y[yOffset];
break;
default:
continue;
}
}
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Y>
static void scatterNDCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const cudaStream_t *stream, const int opCode, const void *vx,
const sd::LongType *xShapeInfo, const void *vy, const sd::LongType *yShapeInfo,
void *vz, const sd::LongType *zShapeInfo, const bool lock) {
if (lock)
scatterNDLockCuda<X, Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(opCode, vx, xShapeInfo, vy,
yShapeInfo, vz, zShapeInfo);
else
scatterNDCuda<X, Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(opCode, vx, xShapeInfo, vy, yShapeInfo,
vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
void scatterND(sd::LaunchContext *context, pairwise::Ops op, const NDArray &indices, const NDArray &updates,
NDArray &output, const bool lock) {
const int xRank = indices.rankOf();
const int yRank = updates.rankOf();
const int zRank = output.rankOf();
const int threadsPerBlock = SD_MAX_NUM_THREADS / 4;
const int blocksPerGrid = ((lock ? output.lengthOf() : updates.lengthOf()) + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = threadsPerBlock * sizeof(int) * ((yRank > xRank ? yRank : xRank) + zRank) + 256;
const auto xType = indices.dataType();
const auto yType = updates.dataType();
PointersManager manager(context, "scatterND");
NDArray::prepareSpecialUse({&output}, {&updates, &indices});
BUILD_DOUBLE_SELECTOR(xType, yType, scatterNDCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), op,
indices.specialBuffer(), indices.specialShapeInfo(), updates.specialBuffer(),
updates.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), lock),
SD_INDEXING_TYPES, SD_GENERIC_NUMERIC_TYPES);
NDArray::registerSpecialUse({&output}, {&updates, &indices});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Z>
SD_KERNEL void scatterForLossCuda(const void *vx, const sd::LongType *xShapeInfo, void *vy,
const sd::LongType *yShapeInfo, void *vz, const sd::LongType *zShapeInfo) {
const auto x = reinterpret_cast<const X *>(vx);
auto y = reinterpret_cast<Z *>(vy);
auto z = reinterpret_cast<Z *>(vz);
__shared__ sd::LongType xLen;
__shared__ int xRank, *sharedMem; // xRank = zRank, yRank = xRank + 1
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<int *>(shmem);
xLen = shape::length(xShapeInfo);
xRank = shape::rank(xShapeInfo);
}
__syncthreads();
const auto xInd = threadIdx.x + blockIdx.x * blockDim.x;
if (xInd >= xLen) return;
auto coords = sharedMem + threadIdx.x * (xRank + 1);
shape::index2coords(xInd, xShapeInfo, coords);
// y last coordinate
coords[xRank] = x[shape::getOffset(xShapeInfo, coords)];
const auto yOffset = shape::getOffset(yShapeInfo, coords);
if (z == nullptr) { // gradient calculation
y[yOffset] -= 1.f;
} else {
z[shape::getOffset(zShapeInfo, coords)] = y[yOffset];
}
}
///////////////////////////////////////////////////////////////////
template <typename X, typename Z>
static void scatterForLossCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem,
const cudaStream_t *stream, const void *vx, const sd::LongType *xShapeInfo,
void *vy, const sd::LongType *yShapeInfo, void *vz,
const sd::LongType *zShapeInfo) {
scatterForLossCuda<X, Z>
<<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
void scatterForLoss(sd::LaunchContext *context, const NDArray &indices, NDArray &updates, NDArray &output,
const bool calcGrad) {
// shapes of indices and output must be the same
// shape of indices should be the same as updates shape with last dimension excluded, for example if updates is
// {a,b,c} then indices should be {a,b}
PointersManager manager(context, "scatterForLoss");
const int threadsPerBlock = SD_MAX_NUM_THREADS / 2;
const int blocksPerGrid = (indices.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = updates.rankOf() * sizeof(int) * threadsPerBlock + 128;
if (calcGrad) {
NDArray::prepareSpecialUse({&updates}, {&indices});
BUILD_DOUBLE_SELECTOR(
indices.dataType(), updates.dataType(), scatterForLossCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), indices.specialBuffer(),
indices.specialShapeInfo(), updates.specialBuffer(), updates.specialShapeInfo(), nullptr, nullptr),
SD_INDEXING_TYPES, SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&updates}, {&indices});
} else {
NDArray::prepareSpecialUse({&output}, {&indices, &updates});
BUILD_DOUBLE_SELECTOR(indices.dataType(), updates.dataType(), scatterForLossCudaLauncher,
(blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), indices.specialBuffer(),
indices.specialShapeInfo(), updates.specialBuffer(), updates.specialShapeInfo(),
output.specialBuffer(), output.specialShapeInfo()),
SD_INDEXING_TYPES, SD_FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&indices, &updates});
}
manager.synchronize();
}
} // namespace helpers
} // namespace ops
} // namespace sd
/*
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
static void scatterLockCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const
cudaStream_t *stream, const int opCode, const void* vx, const sd::LongType *xShapeInfo, const void* vy, const
sd::LongType *yTadShapeInfo, const sd::LongType *yOffsets, void* vz, const sd::LongType *zTadShapeInfo, const
sd::LongType *zOffsets, const sd::LongType xLen, const sd::LongType yTadLen, const sd::LongType zTadLen) {
scatterLockCuda<X,Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(opCode, vx, xShapeInfo, vy,
yTadShapeInfo, yOffsets, vz, zTadShapeInfo, zOffsets, xLen, yTadLen, zTadLen);
}
///////////////////////////////////////////////////////////////////
// x - indices, y - updates, z - input/output
template<typename X, typename Y>
SD_KERNEL static void scatterLockCuda(const int opCode,
const void* vx, const sd::LongType *xShapeInfo,
const void* vy, const sd::LongType *yTadShapeInfo, const sd::LongType *yOffsets,
void* vz, const sd::LongType *zTadShapeInfo, const sd::LongType *zOffsets,
const sd::LongType xLen, const sd::LongType yTadLen, const sd::LongType zTadLen)
{
const int xRank = indices.rankOf();
std::vector<int> zTadDims = ShapeUtils::evalDimsToExclude(output.rankOf(), {0});
int sizeOfUpdDims = xRank;
if(output.rankOf() == updates.rankOf() && indices.isVector())
sizeOfUpdDims = 1;
std::vector<int> yTadDims(sizeOfUpdDims);
std::iota(yTadDims.begin(), yTadDims.end(), 0);
auto packY = sd::ConstantTadHelper::getInstance().tadForDimensions(updates.shapeInfo(),
ShapeUtils::evalDimsToExclude(updates.rankOf(), yTadDims)); auto packZ =
sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), zTadDims);
const sd::LongType zTadLen = shape::length(packZ.primaryShapeInfo());
const sd::LongType yTadLen = shape::length(packY.primaryShapeInfo());
const auto threadsPerBlock = sd::math::sd_max<int>(32, sd::math::sd_min<int>(zTadLen, 1024));
const auto blocksPerGrid = indices.lengthOf();
const auto xType = indices.dataType();
const auto yType = updates.dataType();
BUILD_DOUBLE_SELECTOR(xType, yType, scatterLockCudaLauncher, (blocksPerGrid, threadsPerBlock, 1024,
context->getCudaStream(), op, indices.specialBuffer(), indices.specialShapeInfo(), updates.specialBuffer(),
packY.specialShapeInfo(), packY.specialOffsets(), output.specialBuffer(), packZ.specialShapeInfo(),
packZ.specialOffsets(), indices.lengthOf(), yTadLen, zTadLen), SD_INDEXING_TYPES, SD_GENERIC_NUMERIC_TYPES);
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<Y*>(vz);
__shared__ bool vectorCase;
if(threadIdx.x == 0)
vectorCase = yTadLen == xLen && shape::rank(xShapeInfo) <= 1;
__syncthreads();
for (int e = 0; e < xLen; e++) {
const sd::LongType zIndex = x[shape::getIndexOffset(e, xShapeInfo)];
const bool isOwner = zIndex < gridDim.x ? blockIdx.x == zIndex : blockIdx.x == zIndex % gridDim.x;
if (!isOwner)
continue;
if(vectorCase) { // means z_rank = 1 and might be yTadLen != zTadLen in this case
if(threadIdx.x != 0)
continue;
const auto yOffset = shape::getIndexOffset(e, yTadShapeInfo);
const auto zOffset = shape::getIndexOffset(zIndex, zTadShapeInfo);
switch (opCode) {
case pairwise::Add:
z[zOffset] += y[yOffset];
break;
case pairwise::Subtract:
z[zOffset] -= y[yOffset];
break;
case pairwise::Multiply:
z[zOffset] *= y[yOffset];
break;
case pairwise::Divide:
z[zOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
z[zOffset] = y[yOffset] - z[zOffset];
break;
case pairwise::ReverseDivide:
z[zOffset] = y[yOffset] / z[zOffset];
break;
case pairwise::CopyPws:
z[zOffset] = y[yOffset];
break;
case pairwise::MaxPairwise:
if(z[zOffset] < y[yOffset]) z[zOffset] = y[yOffset];
break;
case pairwise::MinPairwise:
if(z[zOffset] > y[yOffset]) z[zOffset] = y[yOffset];
break;
default:
continue;
}
}
else { // yTadLen == zTadLen in this case
const Y* yTad = y + yOffsets[e];
Y* zTad = z + zOffsets[zIndex];
for (sd::LongType i = threadIdx.x; i < zTadLen; i += blockDim.x) {
const auto yOffset = shape::getIndexOffset(i, yTadShapeInfo);
const auto zOffset = shape::getIndexOffset(i, zTadShapeInfo);
switch (opCode) {
case pairwise::Add:
zTad[zOffset] += yTad[yOffset];
break;
case pairwise::Subtract:
zTad[zOffset] -= yTad[yOffset];
break;
case pairwise::Multiply:
zTad[zOffset] *= yTad[yOffset];
break;
case pairwise::Divide:
zTad[zOffset] /= yTad[yOffset];
break;
case pairwise::ReverseSubtract:
zTad[zOffset] = yTad[yOffset] - zTad[zOffset];
break;
case pairwise::ReverseDivide:
zTad[zOffset] = yTad[yOffset] / zTad[zOffset];
break;
case pairwise::CopyPws:
zTad[zOffset] = yTad[yOffset];
break;
case pairwise::MaxPairwise:
if(zTad[zOffset] < yTad[yOffset]) zTad[zOffset] = yTad[yOffset];
break;
case pairwise::MinPairwise:
if(zTad[zOffset] > yTad[yOffset]) zTad[zOffset] = yTad[yOffset];
break;
default:
continue;
}
}
}
}
}
template<typename T, bool locking>
SD_KERNEL static void scatterCuda(const int opCode, const int numOfSubArrs,
void* vx, const sd::LongType *xShapeInfo, const sd::LongType
*xOffsets, void* vy, const sd::LongType *yShapeInfo, const sd::LongType *yOffsets, const int* indexes, unsigned int
arrLenX, unsigned int arrLenY) {
__shared__ T *x, *y;
if (locking) {
for (int e = 0; e < numOfSubArrs; e++) {
const auto xIndex = indexes[e];
const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex %
gridDim.x;
if (!isOwner)
continue;
if (threadIdx.x == 0) {
x = reinterpret_cast<T *>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T *>(vy) + yOffsets[e];
}
__syncthreads();
for (sd::LongType i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo);
switch (opCode) {
case pairwise::Add:
x[xOffset] += y[yOffset];
break;
case pairwise::Subtract:
x[xOffset] -= y[yOffset];
break;
case pairwise::Multiply:
x[xOffset] *= y[yOffset];
break;
case pairwise::Divide:
x[xOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case pairwise::ReverseDivide:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case pairwise::CopyPws:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
} else {
for (int e = blockIdx.x; e < numOfSubArrs; e+= gridDim.x) {
if (threadIdx.x == 0) {
const auto xIndex = indexes[e];
x = reinterpret_cast<T *>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T *>(vy) + yOffsets[e];
}
__syncthreads();
for (sd::LongType i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo);
switch (opCode) {
case pairwise::Add:
x[xOffset] += y[yOffset];
break;
case pairwise::Subtract:
x[xOffset] -= y[yOffset];
break;
case pairwise::Multiply:
x[xOffset] *= y[yOffset];
break;
case pairwise::Divide:
x[xOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case pairwise::ReverseDivide:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case pairwise::CopyPws:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
}
}
template <typename T>
void scatter_(sd::LaunchContext *context, pairwise::Ops op, const NDArray& indices, const NDArray& updates,
NDArray& output, const bool lock) { std::vector<int> dims = {0}; auto inverted =
ShapeUtils::evalDimsToExclude(output.rankOf(), dims);
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), inverted);
auto packY = sd::ConstantTadHelper::getInstance().tadForDimensions(updates.shapeInfo(), inverted);
auto psX = packX.specialShapeInfo();
auto psY = packY.special();
PointersManager manager(context, "scatter");
auto poX = packX.specialOffsets();
auto poY = packY.special();
NDArray::prepareSpecialUse({&output}, {&updates, &indices});
unsigned int tadLengthX = shape::length(packX.primaryShapeInfo());
unsigned int tadLengthY = shape::length(packY.primary());
if (tadLengthX != tadLengthY)
throw std::runtime_error("scatter: Lengths of TADs must be equal");
auto blockSize = sd::math::sd_max<int>(32, sd::math::sd_min<int>(tadLengthX, 1024));
if (lock)
scatterCuda<T, true><<<512, blockSize, 1024, *context->getCudaStream()>>>(op, indices.lengthOf(),
output.specialBuffer(), psX, poX, updates.specialBuffer(), psY, poY, reinterpret_cast<int *>(indices.specialBuffer()),
tadLengthX, tadLengthY); else scatterCuda<T, false><<<512, blockSize, 1024, *context->getCudaStream()>>>(op,
indices.lengthOf(), output.specialBuffer(), psX, poX, updates.specialBuffer(), psY, poY, reinterpret_cast<int
*>(indices.specialBuffer()), tadLengthX, tadLengthY);
NDArray::registerSpecialUse({&output}, {&updates, &indices});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
// x - indices, y - updates, z - output
template<typename X, typename Y>
SD_KERNEL static void scatterNDLockCuda(const int opCode,
const void* vx, const sd::LongType *xTadShapeInfo, const sd::LongType
*xOffsets, const void* vy, const sd::LongType *yTadShapeInfo, const sd::LongType *yOffsets, void* vz, const sd::LongType
*zTadShapeInfo, const sd::LongType *zOffsets, const sd::LongType *zShapeInfo, const sd::LongType numOfXTads, const
sd::LongType numOfZTads, const sd::LongType yTadLen) {
---------------------------------------------------------------------------
const int xLastDim = indices.sizeAt(-1);
// y_tad and z_tad have the same shape
std::vector<int> yTadDims(zRank - xLastDim), zTadDims(zRank - xLastDim);
for (int j = 0, i = zTadDims.size() - 1; i >=0 ; --i, ++j) {
yTadDims[i] = yRank - 1 - j;
zTadDims[i] = zRank - 1 - j;
}
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(indices.shapeInfo(), {xRank - 1});
auto packY = sd::ConstantTadHelper::getInstance().tadForDimensions(updates.shapeInfo(), yTadDims);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), zTadDims);
const int threadsPerBlock = SD_MAX_NUM_THREADS / 4;
const int blocksPerGrid = packZ.numberOfTads();
const int sharedMem = 8 * threadsPerBlock * xLastDim + 128;
---------------------------------------------------------------------------
// zTadLen == yTadLen if numOfZTads > 1, in opposite case z and y are vectors
// numOfXTads == numOfYTads if numOfZTads > 1, in opposite case z and y are vectors
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<Y*>(vz);
__shared__ sd::LongType *zTadCoords;
__shared__ int xLastDim;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
zTadCoords = reinterpret_cast<sd::LongType*>(shmem);
xLastDim = xTadShapeInfo[1]; // xTad has rank = 1 always
}
__syncthreads();
sd::LongType* zTadCoordsPerThread = zTadCoords + threadIdx.x * xLastDim;
for (sd::LongType i = 0; i < numOfXTads; ++i) {
const X* xTad = x + xOffsets[i];
for (sd::Unsigned k = 0; k < xLastDim; ++k)
zTadCoordsPerThread[k] = xTad[shape::getIndexOffset(k, xTadShapeInfo)];
const auto zTadIndex = shape::coords2index(xLastDim, zShapeInfo + 1, zTadCoordsPerThread);
const bool isOwner = zTadIndex < gridDim.x ? blockIdx.x == zTadIndex : blockIdx.x == zTadIndex % gridDim.x;
if(!isOwner)
continue;
if(numOfZTads == 1) { // yTadLen == numOfXTads in this case
if(threadIdx.x != 0)
continue;
const auto yOffset = shape::getIndexOffset(i, yTadShapeInfo);
const auto zOffset = shape::getIndexOffset(zTadIndex, zTadShapeInfo);
switch (opCode) {
case pairwise::Add:
z[zOffset] += y[yOffset];
break;
case pairwise::Subtract:
z[zOffset] -= y[yOffset];
break;
case pairwise::Multiply:
z[zOffset] *= y[yOffset];
break;
case pairwise::Divide:
z[zOffset] /= y[yOffset];
break;
case pairwise::ReverseSubtract:
z[zOffset] = y[yOffset] - z[zOffset];
break;
case pairwise::ReverseDivide:
z[zOffset] = y[yOffset] / z[zOffset];
break;
case pairwise::CopyPws:
z[zOffset] = y[yOffset];
break;
case pairwise::MaxPairwise:
if(z[zOffset] < y[yOffset]) z[zOffset] = y[yOffset];
break;
case pairwise::MinPairwise:
if(z[zOffset] > y[yOffset]) z[zOffset] = y[yOffset];
break;
default:
continue;
}
}
else {
const auto yTad = y + yOffsets[i];
const auto zTad = z + zOffsets[zTadIndex];
for (sd::LongType j = threadIdx.x; j < yTadLen; j += blockDim.x) {
const auto yOffset = shape::getIndexOffset(j, yTadShapeInfo);
const auto zOffset = shape::getIndexOffset(j, zTadShapeInfo);
switch (opCode) {
case pairwise::Add:
zTad[zOffset] += yTad[yOffset];
break;
case pairwise::Subtract:
zTad[zOffset] -= yTad[yOffset];
break;
case pairwise::Multiply:
zTad[zOffset] *= yTad[yOffset];
break;
case pairwise::Divide:
zTad[zOffset] /= yTad[yOffset];
break;
case pairwise::ReverseSubtract:
zTad[zOffset] = yTad[yOffset] - zTad[zOffset];
break;
case pairwise::ReverseDivide:
zTad[zOffset] = yTad[yOffset] / zTad[zOffset];
break;
case pairwise::CopyPws:
zTad[zOffset] = yTad[yOffset];
break;
case pairwise::MaxPairwise:
if(zTad[zOffset] < yTad[yOffset]) zTad[zOffset] = yTad[yOffset];
break;
case pairwise::MinPairwise:
if(zTad[zOffset] > yTad[yOffset]) zTad[zOffset] = yTad[yOffset];
break;
default:
continue;
}
}
}
}
}
*/
// PointersManager manager(&context, "NativeOps::concat");
// PointersManager::printDevContentOnDev<int>(vx, 2);
// PointersManager::printDevContentOnDev<sd::LongType>(xShapeInfo, 8);
// PointersManager::printDevContentOnDev<float>(vy, 8);
// PointersManager::printDevContentOnDev<sd::LongType>(yShapeInfo, 8);
// PointersManager::printDevContentOnDev<sd::LongType>(zShapeInfo, 8);
// manager.printDevContentOnHost<int>(indices.specialBuffer(), indices.lengthOf());
// manager.printDevContentOnHost<sd::LongType>(indices.special(), shape::shapeInfoLength(indices.rankOf()));
// manager.printDevContentOnHost<float>(updates.specialBuffer(), updates.lengthOf());
// manager.printDevContentOnHost<sd::LongType>(updates.special(), shape::shapeInfoLength(updates.rankOf()));
// manager.printDevContentOnHost<sd::LongType>(output.special(), shape::shapeInfoLength(output.rankOf()));
// printf("!!!!!!!\n");
// manager.printDevContentOnHost<sd::LongType>(packX.special(), 2*shape::rank(packX.primary()) + 4);
// manager.printDevContentOnHost<sd::LongType>(packX.special(), packX.numberOfTads());
// manager.printDevContentOnHost<sd::LongType>(packY.special(), 2*shape::rank(packY.primary()) + 4);
// manager.printDevContentOnHost<sd::LongType>(packY.special(), packY.numberOfTads());
// manager.printDevContentOnHost<sd::LongType>(packZ.special(), 2*shape::rank(packZ.primary()) + 4);
// manager.printDevContentOnHost<sd::LongType>(packZ.special(), packZ.numberOfTads());
// printf("dddddddd\n");
// shape::printShapeInfoLinear(packY.primary());
|
e1b2b8d56b74a6f6ce49d6d167793124291f50e0.hip | // !!! This is a file automatically generated by hipify!!!
//xfail:BOOGIE_ERROR
//main.cu: error: possible read-write race
//however, this didn't happen in the tests
//altough in CUDA providing the inline keyword should still keep a copy of the function around,
//this kind of access is considered a error by ESBMC
//ps: the values from A[N-1-offset) to A[N-1] always will receive unpredictable values,
//because they acess values because they access memory positions that were not initiated
#include <stdio.h>
#include <hip/hip_runtime.h>
#define tid threadIdx.x
#define N 2//1024
__device__ inline void inlined(int *A, int offset)
{
int temp = A[tid + offset];
A[tid] += temp;
}
__global__ void inline_test(int *A, int offset) {
inlined(A, offset);
}
| e1b2b8d56b74a6f6ce49d6d167793124291f50e0.cu | //xfail:BOOGIE_ERROR
//main.cu: error: possible read-write race
//however, this didn't happen in the tests
//altough in CUDA providing the inline keyword should still keep a copy of the function around,
//this kind of access is considered a error by ESBMC
//ps: the values from A[N-1-offset) to A[N-1] always will receive unpredictable values,
//because they acess values because they access memory positions that were not initiated
#include <stdio.h>
#include <cuda.h>
#define tid threadIdx.x
#define N 2//1024
__device__ inline void inlined(int *A, int offset)
{
int temp = A[tid + offset];
A[tid] += temp;
}
__global__ void inline_test(int *A, int offset) {
inlined(A, offset);
}
|
03059bb42fd331c54362fec461ebec4def1318ae.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_slowKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *ptr = NULL;
hipMalloc(&ptr, XSIZE*YSIZE);
int sz = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_slowKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ptr,sz);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_slowKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ptr,sz);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_slowKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, ptr,sz);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 03059bb42fd331c54362fec461ebec4def1318ae.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_slowKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
char *ptr = NULL;
cudaMalloc(&ptr, XSIZE*YSIZE);
int sz = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_slowKernel<<<gridBlock,threadBlock>>>(ptr,sz);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_slowKernel<<<gridBlock,threadBlock>>>(ptr,sz);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_slowKernel<<<gridBlock,threadBlock>>>(ptr,sz);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
60c57fac6d7929d2b05591d57ad2ae4d03c2a920.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// Copyright (c) 2018-2023 www.open3d.org
// SPDX-License-Identifier: MIT
// ----------------------------------------------------------------------------
//
// Based on PVCNN Library (MIT License):
// https://github.com/mit-han-lab/pvcnn
//
// Copyright (c) 2018 Zhijian Liu, Haotian Tang, Yujun Lin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// ----------------------------------------------------------------------------
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "ATen/hip/HIPContext.h"
#include "open3d/ml/contrib/TrilinearDevoxelize.cuh"
#include "open3d/ml/contrib/cuda_utils.h"
#include "open3d/ml/pytorch/pvcnn/TrilinearDevoxelizeKernel.h"
using namespace open3d::ml::contrib;
void TrilinearDevoxelize(int b,
int c,
int n,
int r,
int r2,
int r3,
bool training,
const float *coords,
const float *feat,
int *inds,
float *wgts,
float *outs) {
hipError_t err;
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( TrilinearDevoxelizeKernel), dim3(b), dim3(OptNumThreads(n)), 0, stream,
b, c, n, r, r2, r3, training, coords, feat, inds, wgts, outs);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
void TrilinearDevoxelizeGrad(int b,
int c,
int n,
int r3,
const int *inds,
const float *wgts,
const float *grad_y,
float *grad_x) {
hipError_t err;
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( TrilinearDevoxelizeGradKernel), dim3(b), dim3(OptNumThreads(n)), 0, stream,
b, c, n, r3, inds, wgts, grad_y, grad_x);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| 60c57fac6d7929d2b05591d57ad2ae4d03c2a920.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// Copyright (c) 2018-2023 www.open3d.org
// SPDX-License-Identifier: MIT
// ----------------------------------------------------------------------------
//
// Based on PVCNN Library (MIT License):
// https://github.com/mit-han-lab/pvcnn
//
// Copyright (c) 2018 Zhijian Liu, Haotian Tang, Yujun Lin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// ----------------------------------------------------------------------------
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "ATen/cuda/CUDAContext.h"
#include "open3d/ml/contrib/TrilinearDevoxelize.cuh"
#include "open3d/ml/contrib/cuda_utils.h"
#include "open3d/ml/pytorch/pvcnn/TrilinearDevoxelizeKernel.h"
using namespace open3d::ml::contrib;
void TrilinearDevoxelize(int b,
int c,
int n,
int r,
int r2,
int r3,
bool training,
const float *coords,
const float *feat,
int *inds,
float *wgts,
float *outs) {
cudaError_t err;
auto stream = at::cuda::getCurrentCUDAStream();
TrilinearDevoxelizeKernel<<<b, OptNumThreads(n), 0, stream>>>(
b, c, n, r, r2, r3, training, coords, feat, inds, wgts, outs);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
void TrilinearDevoxelizeGrad(int b,
int c,
int n,
int r3,
const int *inds,
const float *wgts,
const float *grad_y,
float *grad_x) {
cudaError_t err;
auto stream = at::cuda::getCurrentCUDAStream();
TrilinearDevoxelizeGradKernel<<<b, OptNumThreads(n), 0, stream>>>(
b, c, n, r3, inds, wgts, grad_y, grad_x);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
803b75460c3415f023f6434bff0f14317048f1ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2017 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "rocblas.h"
#include "./debug.h"
typedef float floatType_t;
/* macro for index calculations */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* matrix size and thread dimensions */
#define SIZE 16384
#define THREADS_PER_BLOCK_X 32
#define THREADS_PER_BLOCK_Y 32
/* naive GPU kernel where each element of C is computed by a single thread */
__global__ void GPU_naive( const int m, floatType_t const * const a,
floatType_t const * const b, floatType_t * const c )
{
/* determine my threads's row and col indices in the global C matrix */
const int myrow = blockDim.x * blockIdx.x + threadIdx.x;
const int mycol = blockDim.y * blockIdx.y + threadIdx.y;
/* if my row and col are in the C matrix, then calculate that value of C */
if( myrow < m && mycol < m )
{
register floatType_t temp = 0.0;
for( int k = 0; k < m; k++ )
temp += a[INDX( myrow, k, m )] * b[INDX( k, mycol, m )];
c[INDX( myrow, mycol, m )] = temp;
} /* end if */
return;
} /* end GPU_naive */
int main( int argc, char *argv[] )
{
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
const int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
floatType_t *h_a, *h_b, *h_c, *h_c1;
floatType_t *d_a, *d_b, *d_c;
size_t numbytes = (size_t ) size * (size_t ) size * sizeof( floatType_t );
/* memory allocation on host */
h_a = (floatType_t *) malloc( numbytes );
h_b = (floatType_t *) malloc( numbytes );
h_c = (floatType_t *) malloc( numbytes );
h_c1 = (floatType_t *) malloc( numbytes );
/* zero out the host memory for C matrices */
memset( h_c, 0, numbytes );
memset( h_c1, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize the A and B matrices */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* allocate a, b, c in gpu memory */
checkCUDA( hipMalloc( (void **)&d_a, numbytes ) );
checkCUDA( hipMalloc( (void **)&d_b, numbytes ) );
checkCUDA( hipMalloc( (void **)&d_c, numbytes ) );
/* copy a and b to device */
checkCUDA( hipMemcpy( d_a, h_a, numbytes, hipMemcpyHostToDevice ) );
checkCUDA( hipMemcpy( d_b, h_b, numbytes, hipMemcpyHostToDevice ) );
hipblasHandle_t handle;
checkCUBLAS( hipblasCreate( &handle ) );
floatType_t alpha = 1.0;
floatType_t beta = 0.0;
/* start timers */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
/* call CUBLAS dgemm */
hipblasSgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
size, size, size,
(float *)&alpha,
(float *)d_a, size,
(float *)d_b, size,
(float *)&beta,
(float *)d_c, size );
/* stop timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
/* print GPU CUBLAS timing information */
fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C from device to host for error checking */
checkCUDA( hipMemcpy( h_c, d_c, numbytes, hipMemcpyDeviceToHost ) );
/* reset C on device to zero */
checkCUDA( hipMemset( d_c, 0, numbytes ) );
/* setup grid and block sizes */
dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 );
dim3 blocks( size / THREADS_PER_BLOCK_X + 1,
size / THREADS_PER_BLOCK_Y + 1, 1 );
/* start timers */
checkCUDA( hipEventRecord( start, 0 ) );
/* call GPU_naive */
hipLaunchKernelGGL(( GPU_naive), dim3(blocks), dim3(threads) , 0, 0, size, d_a, d_b, d_c );
checkKERNEL()
/* stop timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
/* print data for GPU naive */
fprintf(stdout, "Total time GPU NAIVE is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C back to host */
checkCUDA( hipMemcpy( h_c1, d_c, numbytes, hipMemcpyDeviceToHost ) );
checkCUBLAS( hipblasDestroy( handle ) );
checkCUDA( hipEventDestroy( start ) );
checkCUDA( hipEventDestroy( stop ) );
/* check CUBLAS versus GPU NAIVE numerical results */
double temp = 0.0;
for( int i = 0; i < size * size; i++ )
{
temp = max( temp, abs( (double)h_c[i] - (double)h_c1[i] )/
abs((double)h_c[i]) );
} /* end for */
printf("Maximum error is %e percent \n",temp*100.0);
if( temp > 0.001 ) printf("FAIL\n");
else printf("PASS\n");
/* cleanup */
checkCUDA( hipFree( d_a ) );
checkCUDA( hipFree( d_b ) );
checkCUDA( hipFree( d_c ) );
free( h_a );
free( h_b );
free( h_c );
free( h_c1 );
checkCUDA( hipDeviceReset() );
return 0;
}
| 803b75460c3415f023f6434bff0f14317048f1ab.cu | /*
* Copyright 2017 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "cublas_v2.h"
#include "./debug.h"
typedef float floatType_t;
/* macro for index calculations */
#define INDX( row, col, ld ) ( ( (col) * (ld) ) + (row) )
/* matrix size and thread dimensions */
#define SIZE 16384
#define THREADS_PER_BLOCK_X 32
#define THREADS_PER_BLOCK_Y 32
/* naive GPU kernel where each element of C is computed by a single thread */
__global__ void GPU_naive( const int m, floatType_t const * const a,
floatType_t const * const b, floatType_t * const c )
{
/* determine my threads's row and col indices in the global C matrix */
const int myrow = blockDim.x * blockIdx.x + threadIdx.x;
const int mycol = blockDim.y * blockIdx.y + threadIdx.y;
/* if my row and col are in the C matrix, then calculate that value of C */
if( myrow < m && mycol < m )
{
register floatType_t temp = 0.0;
for( int k = 0; k < m; k++ )
temp += a[INDX( myrow, k, m )] * b[INDX( k, mycol, m )];
c[INDX( myrow, mycol, m )] = temp;
} /* end if */
return;
} /* end GPU_naive */
int main( int argc, char *argv[] )
{
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
const int size = SIZE;
fprintf(stdout, "Matrix size is %d\n",size);
floatType_t *h_a, *h_b, *h_c, *h_c1;
floatType_t *d_a, *d_b, *d_c;
size_t numbytes = (size_t ) size * (size_t ) size * sizeof( floatType_t );
/* memory allocation on host */
h_a = (floatType_t *) malloc( numbytes );
h_b = (floatType_t *) malloc( numbytes );
h_c = (floatType_t *) malloc( numbytes );
h_c1 = (floatType_t *) malloc( numbytes );
/* zero out the host memory for C matrices */
memset( h_c, 0, numbytes );
memset( h_c1, 0, numbytes );
fprintf( stdout, "Total memory required is %lf MB\n",
3.0 * (double) numbytes / 1000000.0 );
/* initialize the A and B matrices */
for( int i = 0; i < size * size; i++ )
{
h_a[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
h_b[i] = double( rand() ) / ( double(RAND_MAX) + 1.0 );
}
/* allocate a, b, c in gpu memory */
checkCUDA( cudaMalloc( (void **)&d_a, numbytes ) );
checkCUDA( cudaMalloc( (void **)&d_b, numbytes ) );
checkCUDA( cudaMalloc( (void **)&d_c, numbytes ) );
/* copy a and b to device */
checkCUDA( cudaMemcpy( d_a, h_a, numbytes, cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemcpy( d_b, h_b, numbytes, cudaMemcpyHostToDevice ) );
cublasHandle_t handle;
checkCUBLAS( cublasCreate( &handle ) );
floatType_t alpha = 1.0;
floatType_t beta = 0.0;
/* start timers */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
/* call CUBLAS dgemm */
cublasSgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N,
size, size, size,
(float *)&alpha,
(float *)d_a, size,
(float *)d_b, size,
(float *)&beta,
(float *)d_c, size );
/* stop timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
/* print GPU CUBLAS timing information */
fprintf(stdout, "Total time GPU CUBLAS is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C from device to host for error checking */
checkCUDA( cudaMemcpy( h_c, d_c, numbytes, cudaMemcpyDeviceToHost ) );
/* reset C on device to zero */
checkCUDA( cudaMemset( d_c, 0, numbytes ) );
/* setup grid and block sizes */
dim3 threads( THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1 );
dim3 blocks( size / THREADS_PER_BLOCK_X + 1,
size / THREADS_PER_BLOCK_Y + 1, 1 );
/* start timers */
checkCUDA( cudaEventRecord( start, 0 ) );
/* call GPU_naive */
GPU_naive<<< blocks, threads >>> ( size, d_a, d_b, d_c );
checkKERNEL()
/* stop timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
/* print data for GPU naive */
fprintf(stdout, "Total time GPU NAIVE is %f sec\n", elapsedTime / 1000.0f );
fprintf(stdout, "Performance is %f GFlop/s\n",
2.0 * (double) size * (double) size * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9 );
/* copy C back to host */
checkCUDA( cudaMemcpy( h_c1, d_c, numbytes, cudaMemcpyDeviceToHost ) );
checkCUBLAS( cublasDestroy( handle ) );
checkCUDA( cudaEventDestroy( start ) );
checkCUDA( cudaEventDestroy( stop ) );
/* check CUBLAS versus GPU NAIVE numerical results */
double temp = 0.0;
for( int i = 0; i < size * size; i++ )
{
temp = max( temp, abs( (double)h_c[i] - (double)h_c1[i] )/
abs((double)h_c[i]) );
} /* end for */
printf("Maximum error is %e percent \n",temp*100.0);
if( temp > 0.001 ) printf("FAIL\n");
else printf("PASS\n");
/* cleanup */
checkCUDA( cudaFree( d_a ) );
checkCUDA( cudaFree( d_b ) );
checkCUDA( cudaFree( d_c ) );
free( h_a );
free( h_b );
free( h_c );
free( h_c1 );
checkCUDA( cudaDeviceReset() );
return 0;
}
|
cuMin.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <climits>
#define SERIAL_SCALE 2
#define SERIAL_PART (1<<SERIAL_SCALE)
extern "C" {
__global__
void kernelMain(int *input, int *output){
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
__shared__ int mem[1024];
int m=input[thid*SERIAL_PART];
for(unsigned int i=1;i<SERIAL_PART;++i)
{
int t=input[thid*SERIAL_PART+i];
if(t<m)
m=t;
}
mem[threadIdx.x]=m;
__syncthreads();
for(unsigned int shift=1;shift<1024;shift*=2)
{
int val=mem[threadIdx.x];
if(threadIdx.x>=shift)
{
if(val>mem[threadIdx.x-shift])
val=mem[threadIdx.x-shift];
}
__syncthreads();
mem[threadIdx.x]=val;
}
if(threadIdx.x==1023)
output[blockIdx.x]=mem[1023];
}
__global__ void kernelPrepare(int *input, int *output, int* args)
{
const unsigned int count=args[0];
const unsigned int n=args[1];
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(thid*count>=n)
return;
int m=input[thid*count];
for(unsigned int i=1;i<count && thid*count+i<n;++i)
{
if(m>input[thid*count+i])
m=input[thid*count+i];
}
output[thid]=m;
}
}
| cuMin.cu | #include <cstdio>
#include <climits>
#define SERIAL_SCALE 2
#define SERIAL_PART (1<<SERIAL_SCALE)
extern "C" {
__global__
void kernelMain(int *input, int *output){
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
__shared__ int mem[1024];
int m=input[thid*SERIAL_PART];
for(unsigned int i=1;i<SERIAL_PART;++i)
{
int t=input[thid*SERIAL_PART+i];
if(t<m)
m=t;
}
mem[threadIdx.x]=m;
__syncthreads();
for(unsigned int shift=1;shift<1024;shift*=2)
{
int val=mem[threadIdx.x];
if(threadIdx.x>=shift)
{
if(val>mem[threadIdx.x-shift])
val=mem[threadIdx.x-shift];
}
__syncthreads();
mem[threadIdx.x]=val;
}
if(threadIdx.x==1023)
output[blockIdx.x]=mem[1023];
}
__global__ void kernelPrepare(int *input, int *output, int* args)
{
const unsigned int count=args[0];
const unsigned int n=args[1];
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(thid*count>=n)
return;
int m=input[thid*count];
for(unsigned int i=1;i<count && thid*count+i<n;++i)
{
if(m>input[thid*count+i])
m=input[thid*count+i];
}
output[thid]=m;
}
}
|
cd25034bdb627d96dd3b3c0fafc8be67f4110773.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void vecAdd(float *A, float *B, float *C) {
int i;
i = blockIdx.x*blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
} | cd25034bdb627d96dd3b3c0fafc8be67f4110773.cu | #include "includes.h"
__global__ void vecAdd(float *A, float *B, float *C) {
int i;
i = blockIdx.x*blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
} |
3951ba425bfba93cb9a6635590dc735f678adf84.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/device_functions.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <iomanip>
#include <hip/hip_runtime_api.h>
#include "device_launch_parameters.h"
#include <math.h>
#define BLOCKSIZE 32
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != hipSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << hipGetErrorString(err) << " " << func << std::endl;
system("pause");
exit(1);
}
}
__shared__ float sharedMatM[BLOCKSIZE * BLOCKSIZE];
__shared__ float sharedMatm[BLOCKSIZE * BLOCKSIZE];
__global__ void calculateMinMax(const float* const d_logLuminancem, const float* const d_logLuminanceM,
float *min_logLum, float *max_logLum,
const size_t numRows,
const size_t numCols){
//Conseguimos la posicin del pxel en la imagen del que se ocupar el hilo
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//Calculamos la posicin del hilo en el bloque
const int posThreadBlock = threadIdx.x * BLOCKSIZE + threadIdx.y;
//Si estamos fuera de los lmites de la imagen, paramos
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
//Almacenamos en memoria compartida el valor correspondiente a cada thread
sharedMatm[posThreadBlock] = d_logLuminancem[thread_1D_pos];
sharedMatM[posThreadBlock] = d_logLuminanceM[thread_1D_pos];
__syncthreads();
//Ahora iteraremos sobre los elementos de memoria compartida para ir comparando y obtener el elemento menor.
for (int i = BLOCKSIZE * BLOCKSIZE / 2; i > 0; i /= 2){
if (posThreadBlock < i){
if (sharedMatm[posThreadBlock] > sharedMatm[posThreadBlock + i])
sharedMatm[posThreadBlock] = sharedMatm[posThreadBlock + i];
if (sharedMatM[posThreadBlock] < sharedMatM[posThreadBlock + i])
sharedMatM[posThreadBlock] = sharedMatM[posThreadBlock + i];
}
__syncthreads();
}
if (posThreadBlock == 0){
if (sharedMatm[0] < min_logLum[blockIdx.y * gridDim.x + blockIdx.x])
min_logLum[blockIdx.y * gridDim.x + blockIdx.x] = sharedMatm[0];
if (sharedMatM[0] > max_logLum[blockIdx.y * gridDim.x + blockIdx.x])
max_logLum[blockIdx.y * gridDim.x + blockIdx.x] = sharedMatM[0];
}
}
__global__ void histograma(const float* const d_logLuminance,
float min_logLum,
float max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins,
unsigned int *histo){
//Conseguimos la posicin del pxel en la imagen del que se ocupar el hilo
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//Si estamos fuera de los lmites de la imagen, paramos
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
int bin = (d_logLuminance[thread_1D_pos] - min_logLum) / (max_logLum - min_logLum) * numBins;
atomicAdd(&histo[bin], 1);
}
__global__ void exclusiveScan(unsigned int *histo, const size_t numBins){
__shared__ int tempArray[BLOCKSIZE * BLOCKSIZE * 2];
int id = blockIdx.x * blockDim.x + threadIdx.x;
int threadId = threadIdx.x;
int offset = 1, temp;
int ai = threadId;
int bi = threadId + numBins / 2;
tempArray[ai] = histo[id];
tempArray[bi] = histo[id + numBins / 2];
for (int i = numBins >> 1; i > 0; i >>= 1){
__syncthreads();
if (threadId < i){
ai = offset * (2 * threadId + 1) - 1;
bi = offset * (2 * threadId + 2) - 1;
tempArray[bi] += tempArray[ai];
}
offset <<= 1;
}
if (threadId == 0){
tempArray[numBins - 1] = 0;
}
for (int i = 1; i < numBins; i <<= 1){
offset >>= 1;
__syncthreads();
if (threadId < i){
ai = offset * (2 * threadId + 1) - 1;
bi = offset * (2 * threadId + 2) - 1;
temp = tempArray[ai];
tempArray[ai] = tempArray[bi];
tempArray[bi] += temp;
}
}
__syncthreads();
histo[id] = tempArray[threadId];
histo[id + numBins / 2] = tempArray[threadId + numBins / 2];
}
void calculate_cdf(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
/* TODO
1) Encontrar el valor mximo y mnimo de luminancia en min_logLum and max_logLum a partir del canal logLuminance
2) Obtener el rango a representar
3) Generar un histograma de todos los valores del canal logLuminance usando la formula
bin = (Lum [i] - lumMin) / lumRange * numBins
4) Realizar un exclusive scan en el histograma para obtener la distribucin acumulada (cdf)
de los valores de luminancia. Se debe almacenar en el puntero c_cdf
*/
//TODO: Calcular tamaos de bloque
const dim3 blockSize(BLOCKSIZE, BLOCKSIZE, 1);
dim3 gridSize((numCols / blockSize.x) + 1, (numRows / blockSize.y) + 1, 1);
int numBloques = ((numCols * numRows) / (BLOCKSIZE * BLOCKSIZE)) + 2;
//Vectores usados para almacenar mximos y mnimos entre iteraciones.
float *myMin, *myMax;
hipMalloc((float **)&myMin, sizeof(float) * numBloques);
hipMalloc((float **)&myMax, sizeof(float) * numBloques);
//Inicializados al valor aportado para comparar con los valores que el device vaya extrayendo.
hipMemset(myMin, min_logLum, sizeof(float) * numBloques);
hipMemset(myMax, max_logLum, sizeof(float) * numBloques);
calculateMinMax << < gridSize, blockSize >> >(d_logLuminance, d_logLuminance, myMin, myMax, numRows, numCols);
//Lanzamos kernels de manera iterativa hasta que solo quede un valor, el valor final.
for (int i = numBloques; i > 1; i /= BLOCKSIZE * BLOCKSIZE){
dim3 newGridSize((sqrt(numBloques) / blockSize.x) + 1, (sqrt(numBloques) / blockSize.y) + 1, 1);
calculateMinMax << < newGridSize, blockSize >> >(myMin, myMax, myMin, myMax, sqrt(numBloques) + 1, sqrt(numBloques) + 1);
numBloques /= (BLOCKSIZE * BLOCKSIZE);
}
hipDeviceSynchronize();
//Pasamos el resultado al host.
hipMemcpy(&min_logLum, myMin, sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&max_logLum, myMax, sizeof(float), hipMemcpyDeviceToHost);
//Lanzamos el kernel para la creacin de histogramas.
histograma << < gridSize, blockSize >> >(d_logLuminance, min_logLum, max_logLum, numRows, numCols, numBins, d_cdf);
hipDeviceSynchronize();
//Llamamos al kernel encargado de realizar el exclusive scan.
int nsize = numBins / (BLOCKSIZE * BLOCKSIZE);
exclusiveScan << <nsize, numBins >> >(d_cdf, numBins);
hipDeviceSynchronize();
//Liberamos memoria.
hipFree(myMin);
hipFree(myMax);
checkCudaErrors(hipGetLastError());
} | 3951ba425bfba93cb9a6635590dc735f678adf84.cu | #include <device_functions.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <iomanip>
#include <cuda_runtime_api.h>
#include "device_launch_parameters.h"
#include <math.h>
#define BLOCKSIZE 32
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
system("pause");
exit(1);
}
}
__shared__ float sharedMatM[BLOCKSIZE * BLOCKSIZE];
__shared__ float sharedMatm[BLOCKSIZE * BLOCKSIZE];
__global__ void calculateMinMax(const float* const d_logLuminancem, const float* const d_logLuminanceM,
float *min_logLum, float *max_logLum,
const size_t numRows,
const size_t numCols){
//Conseguimos la posición del píxel en la imagen del que se ocupará el hilo
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//Calculamos la posición del hilo en el bloque
const int posThreadBlock = threadIdx.x * BLOCKSIZE + threadIdx.y;
//Si estamos fuera de los límites de la imagen, paramos
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
//Almacenamos en memoria compartida el valor correspondiente a cada thread
sharedMatm[posThreadBlock] = d_logLuminancem[thread_1D_pos];
sharedMatM[posThreadBlock] = d_logLuminanceM[thread_1D_pos];
__syncthreads();
//Ahora iteraremos sobre los elementos de memoria compartida para ir comparando y obtener el elemento menor.
for (int i = BLOCKSIZE * BLOCKSIZE / 2; i > 0; i /= 2){
if (posThreadBlock < i){
if (sharedMatm[posThreadBlock] > sharedMatm[posThreadBlock + i])
sharedMatm[posThreadBlock] = sharedMatm[posThreadBlock + i];
if (sharedMatM[posThreadBlock] < sharedMatM[posThreadBlock + i])
sharedMatM[posThreadBlock] = sharedMatM[posThreadBlock + i];
}
__syncthreads();
}
if (posThreadBlock == 0){
if (sharedMatm[0] < min_logLum[blockIdx.y * gridDim.x + blockIdx.x])
min_logLum[blockIdx.y * gridDim.x + blockIdx.x] = sharedMatm[0];
if (sharedMatM[0] > max_logLum[blockIdx.y * gridDim.x + blockIdx.x])
max_logLum[blockIdx.y * gridDim.x + blockIdx.x] = sharedMatM[0];
}
}
__global__ void histograma(const float* const d_logLuminance,
float min_logLum,
float max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins,
unsigned int *histo){
//Conseguimos la posición del píxel en la imagen del que se ocupará el hilo
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//Si estamos fuera de los límites de la imagen, paramos
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
int bin = (d_logLuminance[thread_1D_pos] - min_logLum) / (max_logLum - min_logLum) * numBins;
atomicAdd(&histo[bin], 1);
}
__global__ void exclusiveScan(unsigned int *histo, const size_t numBins){
__shared__ int tempArray[BLOCKSIZE * BLOCKSIZE * 2];
int id = blockIdx.x * blockDim.x + threadIdx.x;
int threadId = threadIdx.x;
int offset = 1, temp;
int ai = threadId;
int bi = threadId + numBins / 2;
tempArray[ai] = histo[id];
tempArray[bi] = histo[id + numBins / 2];
for (int i = numBins >> 1; i > 0; i >>= 1){
__syncthreads();
if (threadId < i){
ai = offset * (2 * threadId + 1) - 1;
bi = offset * (2 * threadId + 2) - 1;
tempArray[bi] += tempArray[ai];
}
offset <<= 1;
}
if (threadId == 0){
tempArray[numBins - 1] = 0;
}
for (int i = 1; i < numBins; i <<= 1){
offset >>= 1;
__syncthreads();
if (threadId < i){
ai = offset * (2 * threadId + 1) - 1;
bi = offset * (2 * threadId + 2) - 1;
temp = tempArray[ai];
tempArray[ai] = tempArray[bi];
tempArray[bi] += temp;
}
}
__syncthreads();
histo[id] = tempArray[threadId];
histo[id + numBins / 2] = tempArray[threadId + numBins / 2];
}
void calculate_cdf(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
/* TODO
1) Encontrar el valor máximo y mínimo de luminancia en min_logLum and max_logLum a partir del canal logLuminance
2) Obtener el rango a representar
3) Generar un histograma de todos los valores del canal logLuminance usando la formula
bin = (Lum [i] - lumMin) / lumRange * numBins
4) Realizar un exclusive scan en el histograma para obtener la distribución acumulada (cdf)
de los valores de luminancia. Se debe almacenar en el puntero c_cdf
*/
//TODO: Calcular tamaños de bloque
const dim3 blockSize(BLOCKSIZE, BLOCKSIZE, 1);
dim3 gridSize((numCols / blockSize.x) + 1, (numRows / blockSize.y) + 1, 1);
int numBloques = ((numCols * numRows) / (BLOCKSIZE * BLOCKSIZE)) + 2;
//Vectores usados para almacenar máximos y mínimos entre iteraciones.
float *myMin, *myMax;
cudaMalloc((float **)&myMin, sizeof(float) * numBloques);
cudaMalloc((float **)&myMax, sizeof(float) * numBloques);
//Inicializados al valor aportado para comparar con los valores que el device vaya extrayendo.
cudaMemset(myMin, min_logLum, sizeof(float) * numBloques);
cudaMemset(myMax, max_logLum, sizeof(float) * numBloques);
calculateMinMax << < gridSize, blockSize >> >(d_logLuminance, d_logLuminance, myMin, myMax, numRows, numCols);
//Lanzamos kernels de manera iterativa hasta que solo quede un valor, el valor final.
for (int i = numBloques; i > 1; i /= BLOCKSIZE * BLOCKSIZE){
dim3 newGridSize((sqrt(numBloques) / blockSize.x) + 1, (sqrt(numBloques) / blockSize.y) + 1, 1);
calculateMinMax << < newGridSize, blockSize >> >(myMin, myMax, myMin, myMax, sqrt(numBloques) + 1, sqrt(numBloques) + 1);
numBloques /= (BLOCKSIZE * BLOCKSIZE);
}
cudaDeviceSynchronize();
//Pasamos el resultado al host.
cudaMemcpy(&min_logLum, myMin, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&max_logLum, myMax, sizeof(float), cudaMemcpyDeviceToHost);
//Lanzamos el kernel para la creación de histogramas.
histograma << < gridSize, blockSize >> >(d_logLuminance, min_logLum, max_logLum, numRows, numCols, numBins, d_cdf);
cudaDeviceSynchronize();
//Llamamos al kernel encargado de realizar el exclusive scan.
int nsize = numBins / (BLOCKSIZE * BLOCKSIZE);
exclusiveScan << <nsize, numBins >> >(d_cdf, numBins);
cudaDeviceSynchronize();
//Liberamos memoria.
cudaFree(myMin);
cudaFree(myMax);
checkCudaErrors(cudaGetLastError());
} |
73a5b814d26348a0b79062a30030444e9d65575d.hip | // !!! This is a file automatically generated by hipify!!!
//********************************
#include <hip/hip_runtime.h>
#include <cstdlib>
#include <random>
#include <set>
#include <list>
#include <vector>
#include <memory>
#include "PlatletSystemBuilder.h"
#include "PlatletSystem.h"
# define M_PI 3.14159265358979323846 /* pi */
//********************************
PlatletSystemBuilder::PlatletSystemBuilder(double _epsilon, double _dt):
epsilon(_epsilon), dt(_dt) {
generalParams.epsilon = epsilon;
generalParams.dt = dt;
};
PlatletSystemBuilder::~PlatletSystemBuilder() {
}
unsigned PlatletSystemBuilder::addMembraneNode(glm::dvec3 pos) {
// We want this to easily calculate distance between nodes during initialization.
nodePosition.push_back(pos);
pos_x.push_back(pos.x);
pos_y.push_back(pos.y);
pos_z.push_back(pos.z);
// Include this part with create_system_on_device().
isFixed.push_back(false);
return pos_x.size();
}
unsigned PlatletSystemBuilder::addInteriorNode(glm::dvec3 pos) {
// We want this to easily calculate distance between nodes during initialization.
// nodePosition.push_back(pos);
pos_x.push_back(pos.x);
pos_y.push_back(pos.y);
pos_z.push_back(pos.z);
return pos_x.size();
}
void PlatletSystemBuilder::printNodes() {
std::cout << "memNodeCount: " << memNodeCount << '\n';
std::cout << "intNodeCount: " << intNodeCount << '\n';
std::cout << "pos_x.size(): " << pos_x.size() << '\n';
std::cout << "Testing initialization of position vector:\n";
for(auto i = 0; i < pos_x.size(); ++i) {
std::cout << "Node " << i << ": ("
<< pos_x[i] << ", "
<< pos_y[i] << ", "
<< pos_z[i] << ")\n";
}
}
unsigned PlatletSystemBuilder::addMembraneEdge(unsigned n1, unsigned n2) {
double length = glm::length(nodePosition[n1] - nodePosition[n2]);
len_0.push_back(length);
nodeID_L.push_back(n1);
nodeID_R.push_back(n2);
return nodeID_L.size();
}
void PlatletSystemBuilder::printEdges() {
std::cout << "Testing initialization of edge connections:\n";
for(auto i = 0; i < nodeID_L.size(); ++i) {
std::cout << "Edge " << i << " connecting: "
<< nodeID_L[i] << " and "
<< nodeID_R[i] << '\n';
}
}
void PlatletSystemBuilder::fixNode(unsigned id) {
isFixed[id] = true;
}
std::shared_ptr<PlatletSystem> PlatletSystemBuilder::Create_Platlet_System_On_Device() {
// *****************************************************
// Create and initialize (the pointer to) the final system on device.
std::shared_ptr<PlatletSystem> ptr_Platlet_System_Host = std::make_shared<PlatletSystem>();
// *****************************************************
// Calculations of parameter values based on vector info (size, max, etc.)
if ( (pos_x.size() != pos_y.size()) || (pos_y.size() != pos_z.size()) || (pos_z.size() != memNodeCount + intNodeCount) ) {
std::cout << "ERROR: Position vectors not all the same size.\n";
return nullptr;
}
// std::cerr << "nodeID.size(): " << nodeID_L.size() << '\n';
if (nodeID_L.size() == nodeID_R.size()) {
springEdgeCount = nodeID_L.size();
ptr_Platlet_System_Host->springEdge.count = springEdgeCount;
} else {
std::cout << "ERROR: Missing entry on membrane edge connection.\n";
return nullptr;
}
// std::cerr << "springEdgeCount: " << springEdgeCount << '\n';
// Temporary value for 2D.
// Not sure what this should be in general.
ptr_Platlet_System_Host->node.maxConnectedSpringCount = 20;
ptr_Platlet_System_Host->node.membrane_count = memNodeCount;
ptr_Platlet_System_Host->node.membrane_mass = memNodeMass;
ptr_Platlet_System_Host->node.interior_count = intNodeCount;
ptr_Platlet_System_Host->node.interior_mass = intNodeMass;
ptr_Platlet_System_Host->springEdge.stiffness = memSpringStiffness;
ptr_Platlet_System_Host->node.total_count = memNodeCount + intNodeCount;
ptr_Platlet_System_Host->generalParams = generalParams;
// *****************************************************
ptr_Platlet_System_Host->initializePlatletSystem(
pos_x,
pos_y,
pos_z,
isFixed,
nodeID_L,
nodeID_R,
len_0);
return ptr_Platlet_System_Host;
// return nullptr;
}
| 73a5b814d26348a0b79062a30030444e9d65575d.cu | //********************************
#include <cuda.h>
#include <cstdlib>
#include <random>
#include <set>
#include <list>
#include <vector>
#include <memory>
#include "PlatletSystemBuilder.h"
#include "PlatletSystem.h"
# define M_PI 3.14159265358979323846 /* pi */
//********************************
PlatletSystemBuilder::PlatletSystemBuilder(double _epsilon, double _dt):
epsilon(_epsilon), dt(_dt) {
generalParams.epsilon = epsilon;
generalParams.dt = dt;
};
PlatletSystemBuilder::~PlatletSystemBuilder() {
}
unsigned PlatletSystemBuilder::addMembraneNode(glm::dvec3 pos) {
// We want this to easily calculate distance between nodes during initialization.
nodePosition.push_back(pos);
pos_x.push_back(pos.x);
pos_y.push_back(pos.y);
pos_z.push_back(pos.z);
// Include this part with create_system_on_device().
isFixed.push_back(false);
return pos_x.size();
}
unsigned PlatletSystemBuilder::addInteriorNode(glm::dvec3 pos) {
// We want this to easily calculate distance between nodes during initialization.
// nodePosition.push_back(pos);
pos_x.push_back(pos.x);
pos_y.push_back(pos.y);
pos_z.push_back(pos.z);
return pos_x.size();
}
void PlatletSystemBuilder::printNodes() {
std::cout << "memNodeCount: " << memNodeCount << '\n';
std::cout << "intNodeCount: " << intNodeCount << '\n';
std::cout << "pos_x.size(): " << pos_x.size() << '\n';
std::cout << "Testing initialization of position vector:\n";
for(auto i = 0; i < pos_x.size(); ++i) {
std::cout << "Node " << i << ": ("
<< pos_x[i] << ", "
<< pos_y[i] << ", "
<< pos_z[i] << ")\n";
}
}
unsigned PlatletSystemBuilder::addMembraneEdge(unsigned n1, unsigned n2) {
double length = glm::length(nodePosition[n1] - nodePosition[n2]);
len_0.push_back(length);
nodeID_L.push_back(n1);
nodeID_R.push_back(n2);
return nodeID_L.size();
}
void PlatletSystemBuilder::printEdges() {
std::cout << "Testing initialization of edge connections:\n";
for(auto i = 0; i < nodeID_L.size(); ++i) {
std::cout << "Edge " << i << " connecting: "
<< nodeID_L[i] << " and "
<< nodeID_R[i] << '\n';
}
}
void PlatletSystemBuilder::fixNode(unsigned id) {
isFixed[id] = true;
}
std::shared_ptr<PlatletSystem> PlatletSystemBuilder::Create_Platlet_System_On_Device() {
// *****************************************************
// Create and initialize (the pointer to) the final system on device.
std::shared_ptr<PlatletSystem> ptr_Platlet_System_Host = std::make_shared<PlatletSystem>();
// *****************************************************
// Calculations of parameter values based on vector info (size, max, etc.)
if ( (pos_x.size() != pos_y.size()) || (pos_y.size() != pos_z.size()) || (pos_z.size() != memNodeCount + intNodeCount) ) {
std::cout << "ERROR: Position vectors not all the same size.\n";
return nullptr;
}
// std::cerr << "nodeID.size(): " << nodeID_L.size() << '\n';
if (nodeID_L.size() == nodeID_R.size()) {
springEdgeCount = nodeID_L.size();
ptr_Platlet_System_Host->springEdge.count = springEdgeCount;
} else {
std::cout << "ERROR: Missing entry on membrane edge connection.\n";
return nullptr;
}
// std::cerr << "springEdgeCount: " << springEdgeCount << '\n';
// Temporary value for 2D.
// Not sure what this should be in general.
ptr_Platlet_System_Host->node.maxConnectedSpringCount = 20;
ptr_Platlet_System_Host->node.membrane_count = memNodeCount;
ptr_Platlet_System_Host->node.membrane_mass = memNodeMass;
ptr_Platlet_System_Host->node.interior_count = intNodeCount;
ptr_Platlet_System_Host->node.interior_mass = intNodeMass;
ptr_Platlet_System_Host->springEdge.stiffness = memSpringStiffness;
ptr_Platlet_System_Host->node.total_count = memNodeCount + intNodeCount;
ptr_Platlet_System_Host->generalParams = generalParams;
// *****************************************************
ptr_Platlet_System_Host->initializePlatletSystem(
pos_x,
pos_y,
pos_z,
isFixed,
nodeID_L,
nodeID_R,
len_0);
return ptr_Platlet_System_Host;
// return nullptr;
}
|
8fc5947a80fcdc45342dafbdd863256e354bd038.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "parameters.h"
__device__ int32_t* accumulators;
int32_t* accumulators_host;
void accumulators_ini()
{
hipMalloc(&accumulators_host, sizeof(int32_t) * sys_array_size * sys_array_size);
hipMemcpyToSymbol(accumulators, &accumulators_host, sizeof(int32_t*));
}
void flush_accumulators()
{
hipMemset(accumulators_host, 0, sizeof(int32_t)*sys_array_size*sys_array_size);
}
void accumulators_free()
{
hipFree(accumulators_host);
}
__device__ void accumulate(int x, int y, int16_t result)
{
accumulators[y*sys_array_size + x] += result;
}
__global__ void _result_activate()
{
if (accumulators[blockIdx.x + blockIdx.y*sys_array_size] < 0)
accumulators[blockIdx.x + blockIdx.y*sys_array_size] = 0;
}
void result_activate()
{
_result_activate << <grid, 1 >> > ();
} | 8fc5947a80fcdc45342dafbdd863256e354bd038.cu | #include "parameters.h"
__device__ int32_t* accumulators;
int32_t* accumulators_host;
void accumulators_ini()
{
cudaMalloc(&accumulators_host, sizeof(int32_t) * sys_array_size * sys_array_size);
cudaMemcpyToSymbol(accumulators, &accumulators_host, sizeof(int32_t*));
}
void flush_accumulators()
{
cudaMemset(accumulators_host, 0, sizeof(int32_t)*sys_array_size*sys_array_size);
}
void accumulators_free()
{
cudaFree(accumulators_host);
}
__device__ void accumulate(int x, int y, int16_t result)
{
accumulators[y*sys_array_size + x] += result;
}
__global__ void _result_activate()
{
if (accumulators[blockIdx.x + blockIdx.y*sys_array_size] < 0)
accumulators[blockIdx.x + blockIdx.y*sys_array_size] = 0;
}
void result_activate()
{
_result_activate << <grid, 1 >> > ();
} |
91074f081127ffb49da15a11a7c6619aff08bddb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void histogram_kernel(int* PartialHist, int* DeviceData, int DataCount,int* timer)
{
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
clock_t start_atomic=0;
clock_t stop_atomic=0;
extern __shared__ int hist[];
if(tid==0)
{
start_atomic = clock();
}
for(int i = 0; i< H; i++)
hist[tid * H + i] = 0;
for(int j = gid; j < DataCount; j += stride)
hist[tid * H + DeviceData[j]]++;
__syncthreads();
for(int t_hist = 0; t_hist < blockDim.x; t_hist++)
{
atomicAdd(&PartialHist[tid],hist[t_hist * H + tid]);
atomicAdd(&PartialHist[tid + blockDim.x],hist[t_hist * H + tid + blockDim.x]);
}
stop_atomic=clock();
if(tid==0)
{
timer[blockIdx.x] = stop_atomic - start_atomic;
}
} | 91074f081127ffb49da15a11a7c6619aff08bddb.cu | #include "includes.h"
__global__ void histogram_kernel(int* PartialHist, int* DeviceData, int DataCount,int* timer)
{
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
clock_t start_atomic=0;
clock_t stop_atomic=0;
extern __shared__ int hist[];
if(tid==0)
{
start_atomic = clock();
}
for(int i = 0; i< H; i++)
hist[tid * H + i] = 0;
for(int j = gid; j < DataCount; j += stride)
hist[tid * H + DeviceData[j]]++;
__syncthreads();
for(int t_hist = 0; t_hist < blockDim.x; t_hist++)
{
atomicAdd(&PartialHist[tid],hist[t_hist * H + tid]);
atomicAdd(&PartialHist[tid + blockDim.x],hist[t_hist * H + tid + blockDim.x]);
}
stop_atomic=clock();
if(tid==0)
{
timer[blockIdx.x] = stop_atomic - start_atomic;
}
} |
db7520081e02c7dc0fe8d0343a16a907e2a3f444.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _SCAN_NAIVE_KERNEL_H_
#define _SCAN_NAIVE_KERNEL_H_
#include "compact_stream.h"
///////////////////////////////////////////////////////////////////////////////
//! Naive compute implementation of scan, one thread per element
//! Not work efficient: log(n) steps, but n * (log(n) - 1) adds.
//! Not shared storage efficient either -- this requires ping-ponging
//! arrays in shared memory due to hazards so 2 * n storage space.
//!
//! Pro: Simple
//! Con: Not work efficient
//!
//! @param g_odata output data in global memory
//! @param g_idata input data in global memory
//! @param n input number of elements to scan from input data
///////////////////////////////////////////////////////////////////////////////
__global__ void scan_naive(float *g_odata, float *g_idata, int n)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float temp[];
int thid = threadIdx.x;
int pout = 0;
int pin = 1;
// Cache the computational window in shared memory
temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0;
for (int offset = 1; offset < n; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
temp[pout*n+thid] = temp[pin*n+thid];
if (thid >= offset)
temp[pout*n+thid] += temp[pin*n+thid - offset];
}
__syncthreads();
g_odata[thid] = temp[pout*n+thid];
}
// Add additional kernels here
__global__ void compact_stream_kernel(float *reference, float *idata, unsigned int len, int *n)
{
// index from block and thread
int idx = blockIdx.x*blockDim.x + threadIdx.x;
for (unsigned int i = 0; i < len; i++) {
if (idata[idx] > 0.0) {
//reference[n++] = idata[idx];
*n++;
}
}
}
#endif // #ifndef _SCAN_NAIVE_KERNEL_H_
| db7520081e02c7dc0fe8d0343a16a907e2a3f444.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifndef _SCAN_NAIVE_KERNEL_H_
#define _SCAN_NAIVE_KERNEL_H_
#include "compact_stream.h"
///////////////////////////////////////////////////////////////////////////////
//! Naive compute implementation of scan, one thread per element
//! Not work efficient: log(n) steps, but n * (log(n) - 1) adds.
//! Not shared storage efficient either -- this requires ping-ponging
//! arrays in shared memory due to hazards so 2 * n storage space.
//!
//! Pro: Simple
//! Con: Not work efficient
//!
//! @param g_odata output data in global memory
//! @param g_idata input data in global memory
//! @param n input number of elements to scan from input data
///////////////////////////////////////////////////////////////////////////////
__global__ void scan_naive(float *g_odata, float *g_idata, int n)
{
// Dynamically allocated shared memory for scan kernels
extern __shared__ float temp[];
int thid = threadIdx.x;
int pout = 0;
int pin = 1;
// Cache the computational window in shared memory
temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0;
for (int offset = 1; offset < n; offset *= 2)
{
pout = 1 - pout;
pin = 1 - pout;
__syncthreads();
temp[pout*n+thid] = temp[pin*n+thid];
if (thid >= offset)
temp[pout*n+thid] += temp[pin*n+thid - offset];
}
__syncthreads();
g_odata[thid] = temp[pout*n+thid];
}
// Add additional kernels here
__global__ void compact_stream_kernel(float *reference, float *idata, unsigned int len, int *n)
{
// index from block and thread
int idx = blockIdx.x*blockDim.x + threadIdx.x;
for (unsigned int i = 0; i < len; i++) {
if (idata[idx] > 0.0) {
//reference[n++] = idata[idx];
*n++;
}
}
}
#endif // #ifndef _SCAN_NAIVE_KERNEL_H_
|
8a29a9f4e5accc325b23e174dcda6a832f337a0d.hip | // !!! This is a file automatically generated by hipify!!!
#include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "hip/hip_runtime.h"
#include "utility/src/csrmv.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void fillArray(T* x, const int n, const T v)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
x[i] = v;
}
}
template <typename T>
__global__ void computeExp(const T* x, const T* nx, const int n, const T* gamma, T* exp_x)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
exp_x[i] = exp(x[i]/(*gamma));
}
}
template <typename T>
__global__ void computeNegExp(const T* x, const T* nx, const int n, const T* gamma, T* exp_nx)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
exp_nx[i] = exp(-x[i]/(*gamma));
}
}
template <typename T>
__global__ void computeMaxAndExp(
const T* x,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* x_max,
T* exp_x
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
x_max[i] = -FLT_MAX;
if (net_mask[i])
{
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
x_max[i] = max(x_max[i], xx);
}
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
exp_x[jj] = exp((xx-x_max[i])/(*gamma));
}
}
}
}
template <typename T>
__global__ void computeMinAndNegExp(
const T* x,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* x_min,
T* exp_nx
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
x_min[i] = FLT_MAX;
if (net_mask[i])
{
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
x_min[i] = min(x_min[i], xx);
}
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
exp_nx[jj] = exp(-(xx-x_min[i])/(*gamma));
}
}
}
}
template <typename T>
__global__ void computeLogSumExp(
const T* exp_x_sum,
const T* x_max,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_x_sum[i]) + x_max[i];
}
else
{
partial_wl[i] = 0;
}
}
}
template <typename T>
__global__ void computeLogSumNegExp(
const T* exp_nx_sum,
const T* x_min,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_nx_sum[i]) - x_min[i];
}
else
{
partial_wl[i] = 0;
}
}
}
template <typename T>
__global__ void sumArray(const T* x, const int n, T* output)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i == 0)
{
*output = 0;
for (int j = 0; j < n; ++j)
{
*output += x[j];
}
}
}
template <typename T>
__global__ void computeLogSumExpWirelengthGrad(
const T* exp_x, const T* exp_nx,
const T* exp_x_sum, const T* exp_nx_sum,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
const T* grad_tensor,
T* grad_x_tensor
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
if (net_mask[i])
{
T reciprocal_exp_x_sum = 1.0/exp_x_sum[i];
T reciprocal_exp_nx_sum = 1.0/exp_nx_sum[i];
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
grad_x_tensor[jj] = (exp_x[jj]*reciprocal_exp_x_sum - exp_nx[jj]*reciprocal_exp_nx_sum)*(*grad_tensor);
//grad_x_tensor[jj] = (exp_x[jj]/exp_x_sum[i] - exp_nx[jj]/exp_nx_sum[i])*(*grad_tensor);
}
}
}
}
template <typename T>
int computeLogSumExpWirelengthCudaLauncher(
const T* x, const T* y,
const int* flat_netpin,
const int* netpin_start,
const T* netpin_values,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
T* exp_xy, T* exp_nxy,
T* exp_xy_sum, T* exp_nxy_sum,
T* partial_wl, // wirelength of each net
const T* grad_tensor,
T* grad_x_tensor, T* grad_y_tensor // the gradient is partial total wirelength to partial pin position
)
{
int thread_count = 512;
int block_count_nets = (num_nets + thread_count - 1) / thread_count; // separate x and y
hipError_t status;
hipStream_t stream_x_exp;
hipStream_t stream_nx_exp;
hipStream_t stream_y_exp;
hipStream_t stream_ny_exp;
status = hipStreamCreate(&stream_x_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_x_exp\n");
fflush(stdout);
return 1;
}
status = hipStreamCreate(&stream_y_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_y_exp\n");
fflush(stdout);
return 1;
}
if (grad_tensor)
{
hipLaunchKernelGGL(( computeLogSumExpWirelengthGrad), dim3(block_count_nets), dim3(thread_count), 0, stream_x_exp,
exp_xy, exp_nxy,
exp_xy_sum, exp_nxy_sum,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
grad_tensor,
grad_x_tensor
);
hipLaunchKernelGGL(( computeLogSumExpWirelengthGrad), dim3(block_count_nets), dim3(thread_count), 0, stream_y_exp,
exp_xy+num_pins, exp_nxy+num_pins,
exp_xy_sum+num_nets, exp_nxy_sum+num_nets,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
grad_tensor,
grad_y_tensor
);
}
else
{
status = hipStreamCreate(&stream_nx_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_nx_exp\n");
fflush(stdout);
return 1;
}
status = hipStreamCreate(&stream_ny_exp);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_ny_exp\n");
fflush(stdout);
return 1;
}
T* xy_max = nullptr;
status = hipMalloc((void**)&xy_max, 2*num_nets*sizeof(T));
if (status != hipSuccess)
{
printf("hipMalloc failed for xy_max\n");
fflush(stdout);
return 1;
}
T* xy_min = nullptr;
status = hipMalloc((void**)&xy_min, 2*num_nets*sizeof(T));
if (status != hipSuccess)
{
printf("hipMalloc failed for xy_min\n");
fflush(stdout);
return 1;
}
//T* partial_wl = nullptr;
//status = hipMalloc((void**)&partial_wl, 2*num_nets*sizeof(T));
//if (status != hipSuccess)
//{
// printf("hipMalloc failed for partial_wl\n");
// fflush(stdout);
// return 1;
//}
//// be careful, partial_wl is not initialized yet
T alpha = 1.0;
T beta = 0.0;
hipLaunchKernelGGL(( computeMaxAndExp), dim3(block_count_nets), dim3(thread_count), 0, stream_x_exp,
x,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_max,
exp_xy
);
hipLaunchKernelGGL(( computeMinAndNegExp), dim3(block_count_nets), dim3(thread_count), 0, stream_nx_exp,
x,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_min,
exp_nxy
);
hipLaunchKernelGGL(( computeMaxAndExp), dim3(block_count_nets), dim3(thread_count), 0, stream_y_exp,
y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_max+num_nets,
exp_xy+num_pins
);
hipLaunchKernelGGL(( computeMinAndNegExp), dim3(block_count_nets), dim3(thread_count), 0, stream_ny_exp,
y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_min+num_nets,
exp_nxy+num_pins
);
hipsparseStatus_t sparse_status;
hipsparseHandle_t handle_x_exp = 0;
hipsparseHandle_t handle_nx_exp = 0;
hipsparseHandle_t handle_y_exp = 0;
hipsparseHandle_t handle_ny_exp = 0;
hipsparseMatDescr_t descr = 0;
/* initialize cusparse library */
sparse_status= hipsparseCreate(&handle_x_exp);
if (sparse_status != HIPSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
sparse_status= hipsparseCreate(&handle_nx_exp);
if (sparse_status != HIPSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
sparse_status= hipsparseCreate(&handle_y_exp);
if (sparse_status != HIPSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
sparse_status= hipsparseCreate(&handle_ny_exp);
if (sparse_status != HIPSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
/* create and setup matrix descriptor */
sparse_status= hipsparseCreateMatDescr(&descr);
if (sparse_status != HIPSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor initialization failed\n");
fflush(stdout);
return 1;
}
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
hipsparseSetStream(handle_x_exp, stream_x_exp);
hipsparseSetStream(handle_nx_exp, stream_nx_exp);
hipsparseSetStream(handle_y_exp, stream_y_exp);
hipsparseSetStream(handle_ny_exp, stream_ny_exp);
csrmv(
handle_x_exp,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_xy,
&beta,
exp_xy_sum
);
csrmv(
handle_y_exp,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_xy+num_pins,
&beta,
exp_xy_sum+num_nets
);
csrmv(
handle_nx_exp,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_nxy,
&beta,
exp_nxy_sum
);
csrmv(
handle_ny_exp,
HIPSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_nxy+num_pins,
&beta,
exp_nxy_sum+num_nets
);
hipLaunchKernelGGL(( computeLogSumExp), dim3(block_count_nets), dim3(thread_count), 0, stream_x_exp,
exp_xy_sum,
xy_max,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl
);
hipLaunchKernelGGL(( computeLogSumNegExp), dim3(block_count_nets), dim3(thread_count), 0, stream_nx_exp,
exp_nxy_sum,
xy_min,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl+num_nets
);
hipLaunchKernelGGL(( computeLogSumExp), dim3(block_count_nets), dim3(thread_count), 0, stream_y_exp,
exp_xy_sum+num_nets,
xy_max+num_nets,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl+2*num_nets
);
hipLaunchKernelGGL(( computeLogSumNegExp), dim3(block_count_nets), dim3(thread_count), 0, stream_ny_exp,
exp_nxy_sum+num_nets,
xy_min+num_nets,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl+3*num_nets
);
/* destroy matrix descriptor */
sparse_status = hipsparseDestroyMatDescr(descr);
descr = 0;
if (sparse_status != HIPSPARSE_STATUS_SUCCESS)
{
printf("Matrix descriptor destruction failed\n");
fflush(stdout);
return 1;
}
/* destroy handle */
sparse_status = hipsparseDestroy(handle_x_exp);
handle_x_exp = 0;
if (sparse_status != HIPSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
sparse_status = hipsparseDestroy(handle_nx_exp);
handle_nx_exp = 0;
if (sparse_status != HIPSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
sparse_status = hipsparseDestroy(handle_y_exp);
handle_y_exp = 0;
if (sparse_status != HIPSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
sparse_status = hipsparseDestroy(handle_ny_exp);
handle_ny_exp = 0;
if (sparse_status != HIPSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
// I move out the summation to use ATen
// significant speedup is observed
//sumArray<<<1, 1>>>(partial_wl, 2*num_nets, wl);
if (xy_max)
{
hipFree(xy_max);
xy_max = nullptr;
}
if (xy_min)
{
hipFree(xy_min);
xy_min = nullptr;
}
//if (partial_wl)
//{
// hipFree(partial_wl);
// partial_wl = nullptr;
//}
fflush(stdout);
status = hipStreamDestroy(stream_nx_exp);
if (status != hipSuccess)
{
printf("stream_nx_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = hipStreamDestroy(stream_ny_exp);
if (status != hipSuccess)
{
printf("stream_ny_exp destroy failed\n");
fflush(stdout);
return 1;
}
}
/* destroy stream */
status = hipStreamDestroy(stream_x_exp);
if (status != hipSuccess)
{
printf("stream_x_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = hipStreamDestroy(stream_y_exp);
if (status != hipSuccess)
{
printf("stream_y_exp destroy failed\n");
fflush(stdout);
return 1;
}
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
int instantiateComputeLogSumExpWirelengthLauncher(\
const T* x, const T* y, \
const int* flat_netpin, \
const int* netpin_start, \
const T* netpin_values, \
const unsigned char* net_mask, \
int num_nets,\
int num_pins,\
const T* gamma, \
T* exp_xy, T* exp_nxy, \
T* exp_xy_sum, T* exp_nxy_sum, \
T* partial_wl, \
const T* grad_tensor, \
T* grad_x_tensor, T* grad_y_tensor \
)\
{\
return computeLogSumExpWirelengthCudaLauncher(\
x, y, \
flat_netpin, \
netpin_start, \
netpin_values, \
net_mask, \
num_nets,\
num_pins,\
gamma, \
exp_xy, exp_nxy, \
exp_xy_sum, exp_nxy_sum, \
partial_wl, \
grad_tensor, \
grad_x_tensor, grad_y_tensor \
);\
}
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
| 8a29a9f4e5accc325b23e174dcda6a832f337a0d.cu | #include <cfloat>
#include <stdio.h>
#include "assert.h"
#include "cuda_runtime.h"
#include "utility/src/csrmv.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void fillArray(T* x, const int n, const T v)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
x[i] = v;
}
}
template <typename T>
__global__ void computeExp(const T* x, const T* nx, const int n, const T* gamma, T* exp_x)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
exp_x[i] = exp(x[i]/(*gamma));
}
}
template <typename T>
__global__ void computeNegExp(const T* x, const T* nx, const int n, const T* gamma, T* exp_nx)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
exp_nx[i] = exp(-x[i]/(*gamma));
}
}
template <typename T>
__global__ void computeMaxAndExp(
const T* x,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* x_max,
T* exp_x
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
x_max[i] = -FLT_MAX;
if (net_mask[i])
{
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
x_max[i] = max(x_max[i], xx);
}
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
exp_x[jj] = exp((xx-x_max[i])/(*gamma));
}
}
}
}
template <typename T>
__global__ void computeMinAndNegExp(
const T* x,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* x_min,
T* exp_nx
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
x_min[i] = FLT_MAX;
if (net_mask[i])
{
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
x_min[i] = min(x_min[i], xx);
}
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
T xx = x[jj];
exp_nx[jj] = exp(-(xx-x_min[i])/(*gamma));
}
}
}
}
template <typename T>
__global__ void computeLogSumExp(
const T* exp_x_sum,
const T* x_max,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_x_sum[i]) + x_max[i];
}
else
{
partial_wl[i] = 0;
}
}
}
template <typename T>
__global__ void computeLogSumNegExp(
const T* exp_nx_sum,
const T* x_min,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
T* partial_wl
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
if (net_mask[i])
{
partial_wl[i] = (*gamma)*log(exp_nx_sum[i]) - x_min[i];
}
else
{
partial_wl[i] = 0;
}
}
}
template <typename T>
__global__ void sumArray(const T* x, const int n, T* output)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i == 0)
{
*output = 0;
for (int j = 0; j < n; ++j)
{
*output += x[j];
}
}
}
template <typename T>
__global__ void computeLogSumExpWirelengthGrad(
const T* exp_x, const T* exp_nx,
const T* exp_x_sum, const T* exp_nx_sum,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
const T* gamma,
const T* grad_tensor,
T* grad_x_tensor
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
if (net_mask[i])
{
T reciprocal_exp_x_sum = 1.0/exp_x_sum[i];
T reciprocal_exp_nx_sum = 1.0/exp_nx_sum[i];
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
int jj = flat_netpin[j];
grad_x_tensor[jj] = (exp_x[jj]*reciprocal_exp_x_sum - exp_nx[jj]*reciprocal_exp_nx_sum)*(*grad_tensor);
//grad_x_tensor[jj] = (exp_x[jj]/exp_x_sum[i] - exp_nx[jj]/exp_nx_sum[i])*(*grad_tensor);
}
}
}
}
template <typename T>
int computeLogSumExpWirelengthCudaLauncher(
const T* x, const T* y,
const int* flat_netpin,
const int* netpin_start,
const T* netpin_values,
const unsigned char* net_mask,
int num_nets,
int num_pins,
const T* gamma,
T* exp_xy, T* exp_nxy,
T* exp_xy_sum, T* exp_nxy_sum,
T* partial_wl, // wirelength of each net
const T* grad_tensor,
T* grad_x_tensor, T* grad_y_tensor // the gradient is partial total wirelength to partial pin position
)
{
int thread_count = 512;
int block_count_nets = (num_nets + thread_count - 1) / thread_count; // separate x and y
cudaError_t status;
cudaStream_t stream_x_exp;
cudaStream_t stream_nx_exp;
cudaStream_t stream_y_exp;
cudaStream_t stream_ny_exp;
status = cudaStreamCreate(&stream_x_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_x_exp\n");
fflush(stdout);
return 1;
}
status = cudaStreamCreate(&stream_y_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_y_exp\n");
fflush(stdout);
return 1;
}
if (grad_tensor)
{
computeLogSumExpWirelengthGrad<<<block_count_nets, thread_count, 0, stream_x_exp>>>(
exp_xy, exp_nxy,
exp_xy_sum, exp_nxy_sum,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
grad_tensor,
grad_x_tensor
);
computeLogSumExpWirelengthGrad<<<block_count_nets, thread_count, 0, stream_y_exp>>>(
exp_xy+num_pins, exp_nxy+num_pins,
exp_xy_sum+num_nets, exp_nxy_sum+num_nets,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
grad_tensor,
grad_y_tensor
);
}
else
{
status = cudaStreamCreate(&stream_nx_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_nx_exp\n");
fflush(stdout);
return 1;
}
status = cudaStreamCreate(&stream_ny_exp);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_ny_exp\n");
fflush(stdout);
return 1;
}
T* xy_max = nullptr;
status = cudaMalloc((void**)&xy_max, 2*num_nets*sizeof(T));
if (status != cudaSuccess)
{
printf("cudaMalloc failed for xy_max\n");
fflush(stdout);
return 1;
}
T* xy_min = nullptr;
status = cudaMalloc((void**)&xy_min, 2*num_nets*sizeof(T));
if (status != cudaSuccess)
{
printf("cudaMalloc failed for xy_min\n");
fflush(stdout);
return 1;
}
//T* partial_wl = nullptr;
//status = cudaMalloc((void**)&partial_wl, 2*num_nets*sizeof(T));
//if (status != cudaSuccess)
//{
// printf("cudaMalloc failed for partial_wl\n");
// fflush(stdout);
// return 1;
//}
//// be careful, partial_wl is not initialized yet
T alpha = 1.0;
T beta = 0.0;
computeMaxAndExp<<<block_count_nets, thread_count, 0, stream_x_exp>>>(
x,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_max,
exp_xy
);
computeMinAndNegExp<<<block_count_nets, thread_count, 0, stream_nx_exp>>>(
x,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_min,
exp_nxy
);
computeMaxAndExp<<<block_count_nets, thread_count, 0, stream_y_exp>>>(
y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_max+num_nets,
exp_xy+num_pins
);
computeMinAndNegExp<<<block_count_nets, thread_count, 0, stream_ny_exp>>>(
y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
xy_min+num_nets,
exp_nxy+num_pins
);
cusparseStatus_t sparse_status;
cusparseHandle_t handle_x_exp = 0;
cusparseHandle_t handle_nx_exp = 0;
cusparseHandle_t handle_y_exp = 0;
cusparseHandle_t handle_ny_exp = 0;
cusparseMatDescr_t descr = 0;
/* initialize cusparse library */
sparse_status= cusparseCreate(&handle_x_exp);
if (sparse_status != CUSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
sparse_status= cusparseCreate(&handle_nx_exp);
if (sparse_status != CUSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
sparse_status= cusparseCreate(&handle_y_exp);
if (sparse_status != CUSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
sparse_status= cusparseCreate(&handle_ny_exp);
if (sparse_status != CUSPARSE_STATUS_SUCCESS) {
printf("CUSPARSE Library initialization failed\n");
fflush(stdout);
return 1;
}
/* create and setup matrix descriptor */
sparse_status= cusparseCreateMatDescr(&descr);
if (sparse_status != CUSPARSE_STATUS_SUCCESS) {
printf("Matrix descriptor initialization failed\n");
fflush(stdout);
return 1;
}
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
cusparseSetStream(handle_x_exp, stream_x_exp);
cusparseSetStream(handle_nx_exp, stream_nx_exp);
cusparseSetStream(handle_y_exp, stream_y_exp);
cusparseSetStream(handle_ny_exp, stream_ny_exp);
csrmv(
handle_x_exp,
CUSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_xy,
&beta,
exp_xy_sum
);
csrmv(
handle_y_exp,
CUSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_xy+num_pins,
&beta,
exp_xy_sum+num_nets
);
csrmv(
handle_nx_exp,
CUSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_nxy,
&beta,
exp_nxy_sum
);
csrmv(
handle_ny_exp,
CUSPARSE_OPERATION_NON_TRANSPOSE,
num_nets,
num_pins,
num_pins,
&alpha,
descr,
netpin_values,
netpin_start, flat_netpin,
exp_nxy+num_pins,
&beta,
exp_nxy_sum+num_nets
);
computeLogSumExp<<<block_count_nets, thread_count, 0, stream_x_exp>>>(
exp_xy_sum,
xy_max,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl
);
computeLogSumNegExp<<<block_count_nets, thread_count, 0, stream_nx_exp>>>(
exp_nxy_sum,
xy_min,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl+num_nets
);
computeLogSumExp<<<block_count_nets, thread_count, 0, stream_y_exp>>>(
exp_xy_sum+num_nets,
xy_max+num_nets,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl+2*num_nets
);
computeLogSumNegExp<<<block_count_nets, thread_count, 0, stream_ny_exp>>>(
exp_nxy_sum+num_nets,
xy_min+num_nets,
flat_netpin,
netpin_start,
net_mask,
num_nets,
gamma,
partial_wl+3*num_nets
);
/* destroy matrix descriptor */
sparse_status = cusparseDestroyMatDescr(descr);
descr = 0;
if (sparse_status != CUSPARSE_STATUS_SUCCESS)
{
printf("Matrix descriptor destruction failed\n");
fflush(stdout);
return 1;
}
/* destroy handle */
sparse_status = cusparseDestroy(handle_x_exp);
handle_x_exp = 0;
if (sparse_status != CUSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
sparse_status = cusparseDestroy(handle_nx_exp);
handle_nx_exp = 0;
if (sparse_status != CUSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
sparse_status = cusparseDestroy(handle_y_exp);
handle_y_exp = 0;
if (sparse_status != CUSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
sparse_status = cusparseDestroy(handle_ny_exp);
handle_ny_exp = 0;
if (sparse_status != CUSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE Library release of resources failed\n");
fflush(stdout);
return 1;
}
// I move out the summation to use ATen
// significant speedup is observed
//sumArray<<<1, 1>>>(partial_wl, 2*num_nets, wl);
if (xy_max)
{
cudaFree(xy_max);
xy_max = nullptr;
}
if (xy_min)
{
cudaFree(xy_min);
xy_min = nullptr;
}
//if (partial_wl)
//{
// cudaFree(partial_wl);
// partial_wl = nullptr;
//}
fflush(stdout);
status = cudaStreamDestroy(stream_nx_exp);
if (status != cudaSuccess)
{
printf("stream_nx_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = cudaStreamDestroy(stream_ny_exp);
if (status != cudaSuccess)
{
printf("stream_ny_exp destroy failed\n");
fflush(stdout);
return 1;
}
}
/* destroy stream */
status = cudaStreamDestroy(stream_x_exp);
if (status != cudaSuccess)
{
printf("stream_x_exp destroy failed\n");
fflush(stdout);
return 1;
}
status = cudaStreamDestroy(stream_y_exp);
if (status != cudaSuccess)
{
printf("stream_y_exp destroy failed\n");
fflush(stdout);
return 1;
}
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
int instantiateComputeLogSumExpWirelengthLauncher(\
const T* x, const T* y, \
const int* flat_netpin, \
const int* netpin_start, \
const T* netpin_values, \
const unsigned char* net_mask, \
int num_nets,\
int num_pins,\
const T* gamma, \
T* exp_xy, T* exp_nxy, \
T* exp_xy_sum, T* exp_nxy_sum, \
T* partial_wl, \
const T* grad_tensor, \
T* grad_x_tensor, T* grad_y_tensor \
)\
{\
return computeLogSumExpWirelengthCudaLauncher(\
x, y, \
flat_netpin, \
netpin_start, \
netpin_values, \
net_mask, \
num_nets,\
num_pins,\
gamma, \
exp_xy, exp_nxy, \
exp_xy_sum, exp_nxy_sum, \
partial_wl, \
grad_tensor, \
grad_x_tensor, grad_y_tensor \
);\
}
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
387a96ad24f5d521e2a69ef2a5d5b5a394ce646f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _KERNEL_CU_
#define _KERNEL_CU_
__global__ void testKernel(double* g_idata, double* g_odata){
extern __shared__ double sdata[];
const unsigned int bid = blockIdx.x;
const unsigned int tid_in_block = threadIdx.x;
const unsigned int tid_in_grid = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid_in_block] = g_idata[tid_in_grid];
__syncthreads();
sdata[tid_in_block] *= bid;
__syncthreads();
g_odata[tid_in_grid] = sdata[tid_in_block];
}
#endif
| 387a96ad24f5d521e2a69ef2a5d5b5a394ce646f.cu | #ifndef _KERNEL_CU_
#define _KERNEL_CU_
__global__ void testKernel(double* g_idata, double* g_odata){
extern __shared__ double sdata[];
const unsigned int bid = blockIdx.x;
const unsigned int tid_in_block = threadIdx.x;
const unsigned int tid_in_grid = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid_in_block] = g_idata[tid_in_grid];
__syncthreads();
sdata[tid_in_block] *= bid;
__syncthreads();
g_odata[tid_in_grid] = sdata[tid_in_block];
}
#endif
|
e58c820ff7799c8bbaa1f40ee0a6f01cbab76d85.hip | // !!! This is a file automatically generated by hipify!!!
//
// Program to solve Laplace equation on a regular 3D grid
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 16
#define BLOCK_Y 4
#define BLOCK_Z 4
////////////////////////////////////////////////////////////////////////
// include kernel function
////////////////////////////////////////////////////////////////////////
#include <laplace3d_kernel_new.h>
////////////////////////////////////////////////////////////////////////
// declare Gold routine
////////////////////////////////////////////////////////////////////////
void Gold_laplace3d(int NX, int NY, int NZ, float* h_u1, float* h_u2);
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
// 'h_' prefix - CPU (host) memory space
int NX=256, NY=256, NZ=256, REPEAT=10,
bx, by, bz, i, j, k, ind;
float *h_u1, *h_u2, *h_u3, *h_foo, err;
// 'd_' prefix - GPU (device) memory space
float *d_u1, *d_u2, *d_foo;
printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ);
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// allocate memory for arrays
h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u3 = (float *)malloc(sizeof(float)*NX*NY*NZ);
checkCudaErrors( hipMalloc((void **)&d_u1, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( hipMalloc((void **)&d_u2, sizeof(float)*NX*NY*NZ) );
// initialise u1
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1)
h_u1[ind] = 1.0f; // Dirichlet b.c.'s
else
h_u1[ind] = 0.0f;
}
}
}
// copy u1 to device
hipEventRecord(start);
checkCudaErrors( hipMemcpy(d_u1, h_u1, sizeof(float)*NX*NY*NZ,
hipMemcpyHostToDevice) );
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\nCopy u1 to device: %.1f (ms) \n", milli);
// Set up the execution configuration
bx = 1 + (NX-1)/BLOCK_X;
by = 1 + (NY-1)/BLOCK_Y;
bz = 1 + (NZ-1)/BLOCK_Z;
dim3 dimGrid(bx,by,bz);
dim3 dimBlock(BLOCK_X,BLOCK_Y,BLOCK_Z);
// printf("\n dimGrid = %d %d %d \n",dimGrid.x,dimGrid.y,dimGrid.z);
// printf(" dimBlock = %d %d %d \n",dimBlock.x,dimBlock.y,dimBlock.z);
// Execute GPU kernel
hipEventRecord(start);
for (i = 1; i <= REPEAT; ++i) {
hipLaunchKernelGGL(( GPU_laplace3d), dim3(dimGrid), dim3(dimBlock), 0, 0, NX, NY, NZ, d_u1, d_u2);
getLastCudaError("GPU_laplace3d execution failed\n");
d_foo = d_u1; d_u1 = d_u2; d_u2 = d_foo; // swap d_u1 and d_u2
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\n%dx GPU_laplace3d_naive: %.1f (ms) \n", REPEAT, milli);
// Read back GPU results
hipEventRecord(start);
checkCudaErrors( hipMemcpy(h_u2, d_u1, sizeof(float)*NX*NY*NZ,
hipMemcpyDeviceToHost) );
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\nCopy u2 to host: %.1f (ms) \n", milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u2[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// Gold treatment
hipEventRecord(start);
for (int i = 1; i <= REPEAT; ++i) {
Gold_laplace3d(NX, NY, NZ, h_u1, h_u3);
h_foo = h_u1; h_u1 = h_u3; h_u3 = h_foo; // swap h_u1 and h_u3
}
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milli, start, stop);
printf("\n%dx Gold_laplace3d: %.1f (ms) \n \n", REPEAT, milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u1[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// error check
err = 0.0;
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]);
}
}
}
printf("rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ)));
// Release GPU and CPU memory
checkCudaErrors( hipFree(d_u1) );
checkCudaErrors( hipFree(d_u2) );
free(h_u1);
free(h_u2);
free(h_u3);
hipDeviceReset();
}
| e58c820ff7799c8bbaa1f40ee0a6f01cbab76d85.cu | //
// Program to solve Laplace equation on a regular 3D grid
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////
// define kernel block size
////////////////////////////////////////////////////////////////////////
#define BLOCK_X 16
#define BLOCK_Y 4
#define BLOCK_Z 4
////////////////////////////////////////////////////////////////////////
// include kernel function
////////////////////////////////////////////////////////////////////////
#include <laplace3d_kernel_new.h>
////////////////////////////////////////////////////////////////////////
// declare Gold routine
////////////////////////////////////////////////////////////////////////
void Gold_laplace3d(int NX, int NY, int NZ, float* h_u1, float* h_u2);
////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////
int main(int argc, const char **argv){
// 'h_' prefix - CPU (host) memory space
int NX=256, NY=256, NZ=256, REPEAT=10,
bx, by, bz, i, j, k, ind;
float *h_u1, *h_u2, *h_u3, *h_foo, err;
// 'd_' prefix - GPU (device) memory space
float *d_u1, *d_u2, *d_foo;
printf("\nGrid dimensions: %d x %d x %d\n", NX, NY, NZ);
// initialise card
findCudaDevice(argc, argv);
// initialise CUDA timing
float milli;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// allocate memory for arrays
h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ);
h_u3 = (float *)malloc(sizeof(float)*NX*NY*NZ);
checkCudaErrors( cudaMalloc((void **)&d_u1, sizeof(float)*NX*NY*NZ) );
checkCudaErrors( cudaMalloc((void **)&d_u2, sizeof(float)*NX*NY*NZ) );
// initialise u1
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1)
h_u1[ind] = 1.0f; // Dirichlet b.c.'s
else
h_u1[ind] = 0.0f;
}
}
}
// copy u1 to device
cudaEventRecord(start);
checkCudaErrors( cudaMemcpy(d_u1, h_u1, sizeof(float)*NX*NY*NZ,
cudaMemcpyHostToDevice) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\nCopy u1 to device: %.1f (ms) \n", milli);
// Set up the execution configuration
bx = 1 + (NX-1)/BLOCK_X;
by = 1 + (NY-1)/BLOCK_Y;
bz = 1 + (NZ-1)/BLOCK_Z;
dim3 dimGrid(bx,by,bz);
dim3 dimBlock(BLOCK_X,BLOCK_Y,BLOCK_Z);
// printf("\n dimGrid = %d %d %d \n",dimGrid.x,dimGrid.y,dimGrid.z);
// printf(" dimBlock = %d %d %d \n",dimBlock.x,dimBlock.y,dimBlock.z);
// Execute GPU kernel
cudaEventRecord(start);
for (i = 1; i <= REPEAT; ++i) {
GPU_laplace3d<<<dimGrid, dimBlock>>>(NX, NY, NZ, d_u1, d_u2);
getLastCudaError("GPU_laplace3d execution failed\n");
d_foo = d_u1; d_u1 = d_u2; d_u2 = d_foo; // swap d_u1 and d_u2
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\n%dx GPU_laplace3d_naive: %.1f (ms) \n", REPEAT, milli);
// Read back GPU results
cudaEventRecord(start);
checkCudaErrors( cudaMemcpy(h_u2, d_u1, sizeof(float)*NX*NY*NZ,
cudaMemcpyDeviceToHost) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\nCopy u2 to host: %.1f (ms) \n", milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u2[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// Gold treatment
cudaEventRecord(start);
for (int i = 1; i <= REPEAT; ++i) {
Gold_laplace3d(NX, NY, NZ, h_u1, h_u3);
h_foo = h_u1; h_u1 = h_u3; h_u3 = h_foo; // swap h_u1 and h_u3
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("\n%dx Gold_laplace3d: %.1f (ms) \n \n", REPEAT, milli);
// print out corner of array
/*
for (k=0; k<3; k++) {
for (j=0; j<8; j++) {
for (i=0; i<8; i++) {
ind = i + j*NX + k*NX*NY;
printf(" %5.2f ", h_u1[ind]);
}
printf("\n");
}
printf("\n");
}
*/
// error check
err = 0.0;
for (k=0; k<NZ; k++) {
for (j=0; j<NY; j++) {
for (i=0; i<NX; i++) {
ind = i + j*NX + k*NX*NY;
err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]);
}
}
}
printf("rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ)));
// Release GPU and CPU memory
checkCudaErrors( cudaFree(d_u1) );
checkCudaErrors( cudaFree(d_u2) );
free(h_u1);
free(h_u2);
free(h_u3);
cudaDeviceReset();
}
|
b5b14a5d952e1fac667f48e1bb50b1f1552facac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//#include <stdio.h>
#include "RayTracer.h"
//#include "helper_cuda.h"
__device__ float SphereIntersection(float4 rayOrigin, float4 rayDirection, float4 spherePosition, float sphereRadius);
__device__ float QuadatricSolver(float A, float B, float C);
__device__ float4 PointLightContribution(float4 position, float4 normal, float4 color, float4 lightPosition, float4 cameraPosition);
__global__ void RayTracer(uchar4* dest, const int imageW, const int imageH, float4 cameraPosition, float4 cameraUp, float4 cameraForward, float4 cameraRight, float nearPlaneDistance, float2 viewSize)
{
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
// Compute the location in the dest array that will be written to
const int pixelIndex = imageW * iy + ix;
float4 pixelColor;
// Compute the center of the near plane. All rays will be computed as an offset from this point
const float4 lookAt = cameraPosition + cameraForward * nearPlaneDistance;
// Find where the ray intersects the near plane and create the vector portion of the ray from that
const float4 rayMidPoint = lookAt + cameraRight * ((float(ix) / float(imageW) - 0.5f) * viewSize.x) + cameraUp * ((float(iy) / float(imageH) - 0.5f) * viewSize.y);
const float4 ray = normalize(rayMidPoint - cameraPosition);
// Hardcoded sphere
const float4 sphereCenter = make_float4(0, 0, 50, 1);
const float4 sphereColor = make_float4(0.4f, 0, 0.4f, 1.0f);
const float radius = 10.0f;
const float4 otherSphereCenter = make_float4(5, 0, 30, 1);
const float4 otherSphereColor = make_float4(0, 0.4f, 0.4f, 1.0f);
const float otherRadius = 1.0f;
// Hardcoded light
const float4 lightPosition = make_float4(10, 0, 20, 1);
float t = SphereIntersection(cameraPosition, ray, sphereCenter, radius);
float otherT = SphereIntersection(cameraPosition, ray, otherSphereCenter, otherRadius);
float4 intersectionPoint;
float4 intersectionNormal;
if(t > 0 && (t < otherT || otherT == -1.0f))
{
intersectionPoint = cameraPosition + t * ray;
intersectionNormal = normalize(intersectionPoint - sphereCenter);
float lightT = SphereIntersection(intersectionPoint, normalize(lightPosition - intersectionPoint), otherSphereCenter, otherRadius);
if(lightT <= 0)
{
pixelColor = PointLightContribution(intersectionPoint, intersectionNormal, sphereColor, lightPosition, cameraPosition);
}
else
{
pixelColor = sphereColor * AMBIENT_STRENGTH;
pixelColor.w = 1.0f;
}
}
else if(otherT > 0)
{
intersectionPoint = cameraPosition + otherT * ray;
intersectionNormal = normalize(intersectionPoint - otherSphereCenter);
pixelColor = PointLightContribution(intersectionPoint, intersectionNormal, otherSphereColor, lightPosition, cameraPosition);
}
else
{
pixelColor = make_float4(BACKGROUND_COLOR);
}
dest[pixelIndex] = make_uchar4((unsigned char)(pixelColor.x * 255), (unsigned char)(pixelColor.y * 255), (unsigned char)(pixelColor.z * 255), 255);
}
__device__ float4 PointLightContribution(float4 position, float4 normal, float4 color, float4 lightPosition, float4 cameraPosition)
{
const float4 lightDirection = normalize(lightPosition - position);
const float4 halfVector = normalize(lightDirection + normalize(cameraPosition - position));
float diffuseStrength = dot(normal, lightDirection);
float specularStrength = dot(normal, halfVector);
diffuseStrength = clamp(diffuseStrength, 0.0f, 1.0f);
specularStrength = clamp(specularStrength, 0.0f, 1.0f);
specularStrength = pow(specularStrength, 15);
float lightCoefficient = diffuseStrength + AMBIENT_STRENGTH;
const float4 litColor = make_float4(clamp(color.x * lightCoefficient + specularStrength, 0.0f, 1.0f),
clamp(color.y * lightCoefficient + specularStrength, 0.0f, 1.0f),
clamp(color.z * lightCoefficient + specularStrength, 0.0f, 1.0f),
1.0f);
return litColor;
}
__device__ float SphereIntersection(float4 rayOrigin, float4 rayDirection, float4 spherePosition, float sphereRadius)
{
// Calculate the three coefficients in the quadratic equation
const float4 rayOriginMinusSphereCenter = rayOrigin - spherePosition;
const float A = dot(rayDirection, rayDirection);
const float B = 2 * dot(rayOriginMinusSphereCenter, rayDirection);
const float C = dot(rayOriginMinusSphereCenter, rayOriginMinusSphereCenter) - sphereRadius * sphereRadius;
return QuadatricSolver(A, B, C);
}
__device__ float QuadatricSolver(float A, float B, float C)
{
//Calculate the discriminant
const float disc = B * B - 4 * A * C;
float t = -1.0f;
if(disc >= 0)
{
const float discSqrt = sqrtf(disc);
float q;
if(B < 0)
{
q = (-B - discSqrt) / 2.0f;
}
else
{
q = (-B + discSqrt) / 2.0f;
}
float t0 = q / A;
float t1 = C / q;
if(t0 > t1)
{
float temp = t0;
t0 = t1;
t1 = temp;
}
if(t1 < 0)
{
}
else if(t0 < 0)
{
t = t1;
}
else
{
t = t0;
}
}
return t;
}
void RunRayTracer(uchar4* dest, const int imageW, const int imageH, const int xThreadsPerBlock, const float4 a_vCameraPosition, const float4 a_vCameraForward, const float4 a_vCameraUp, const float4 a_vCameraRight, const float a_fNearPlaneDistance)
{
dim3 numThreads(20, 20);
dim3 numBlocks(64, 36);
float2 viewSize;
viewSize = make_float2(imageW, imageH);
hipLaunchKernelGGL(( RayTracer), dim3(numBlocks), dim3(numThreads), 0, 0, dest, imageW, imageH, a_vCameraPosition, a_vCameraUp, a_vCameraForward, a_vCameraRight, a_fNearPlaneDistance, viewSize);
} | b5b14a5d952e1fac667f48e1bb50b1f1552facac.cu | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//#include <stdio.h>
#include "RayTracer.h"
//#include "helper_cuda.h"
__device__ float SphereIntersection(float4 rayOrigin, float4 rayDirection, float4 spherePosition, float sphereRadius);
__device__ float QuadatricSolver(float A, float B, float C);
__device__ float4 PointLightContribution(float4 position, float4 normal, float4 color, float4 lightPosition, float4 cameraPosition);
__global__ void RayTracer(uchar4* dest, const int imageW, const int imageH, float4 cameraPosition, float4 cameraUp, float4 cameraForward, float4 cameraRight, float nearPlaneDistance, float2 viewSize)
{
const int ix = blockIdx.x * blockDim.x + threadIdx.x;
const int iy = blockIdx.y * blockDim.y + threadIdx.y;
// Compute the location in the dest array that will be written to
const int pixelIndex = imageW * iy + ix;
float4 pixelColor;
// Compute the center of the near plane. All rays will be computed as an offset from this point
const float4 lookAt = cameraPosition + cameraForward * nearPlaneDistance;
// Find where the ray intersects the near plane and create the vector portion of the ray from that
const float4 rayMidPoint = lookAt + cameraRight * ((float(ix) / float(imageW) - 0.5f) * viewSize.x) + cameraUp * ((float(iy) / float(imageH) - 0.5f) * viewSize.y);
const float4 ray = normalize(rayMidPoint - cameraPosition);
// Hardcoded sphere
const float4 sphereCenter = make_float4(0, 0, 50, 1);
const float4 sphereColor = make_float4(0.4f, 0, 0.4f, 1.0f);
const float radius = 10.0f;
const float4 otherSphereCenter = make_float4(5, 0, 30, 1);
const float4 otherSphereColor = make_float4(0, 0.4f, 0.4f, 1.0f);
const float otherRadius = 1.0f;
// Hardcoded light
const float4 lightPosition = make_float4(10, 0, 20, 1);
float t = SphereIntersection(cameraPosition, ray, sphereCenter, radius);
float otherT = SphereIntersection(cameraPosition, ray, otherSphereCenter, otherRadius);
float4 intersectionPoint;
float4 intersectionNormal;
if(t > 0 && (t < otherT || otherT == -1.0f))
{
intersectionPoint = cameraPosition + t * ray;
intersectionNormal = normalize(intersectionPoint - sphereCenter);
float lightT = SphereIntersection(intersectionPoint, normalize(lightPosition - intersectionPoint), otherSphereCenter, otherRadius);
if(lightT <= 0)
{
pixelColor = PointLightContribution(intersectionPoint, intersectionNormal, sphereColor, lightPosition, cameraPosition);
}
else
{
pixelColor = sphereColor * AMBIENT_STRENGTH;
pixelColor.w = 1.0f;
}
}
else if(otherT > 0)
{
intersectionPoint = cameraPosition + otherT * ray;
intersectionNormal = normalize(intersectionPoint - otherSphereCenter);
pixelColor = PointLightContribution(intersectionPoint, intersectionNormal, otherSphereColor, lightPosition, cameraPosition);
}
else
{
pixelColor = make_float4(BACKGROUND_COLOR);
}
dest[pixelIndex] = make_uchar4((unsigned char)(pixelColor.x * 255), (unsigned char)(pixelColor.y * 255), (unsigned char)(pixelColor.z * 255), 255);
}
__device__ float4 PointLightContribution(float4 position, float4 normal, float4 color, float4 lightPosition, float4 cameraPosition)
{
const float4 lightDirection = normalize(lightPosition - position);
const float4 halfVector = normalize(lightDirection + normalize(cameraPosition - position));
float diffuseStrength = dot(normal, lightDirection);
float specularStrength = dot(normal, halfVector);
diffuseStrength = clamp(diffuseStrength, 0.0f, 1.0f);
specularStrength = clamp(specularStrength, 0.0f, 1.0f);
specularStrength = pow(specularStrength, 15);
float lightCoefficient = diffuseStrength + AMBIENT_STRENGTH;
const float4 litColor = make_float4(clamp(color.x * lightCoefficient + specularStrength, 0.0f, 1.0f),
clamp(color.y * lightCoefficient + specularStrength, 0.0f, 1.0f),
clamp(color.z * lightCoefficient + specularStrength, 0.0f, 1.0f),
1.0f);
return litColor;
}
__device__ float SphereIntersection(float4 rayOrigin, float4 rayDirection, float4 spherePosition, float sphereRadius)
{
// Calculate the three coefficients in the quadratic equation
const float4 rayOriginMinusSphereCenter = rayOrigin - spherePosition;
const float A = dot(rayDirection, rayDirection);
const float B = 2 * dot(rayOriginMinusSphereCenter, rayDirection);
const float C = dot(rayOriginMinusSphereCenter, rayOriginMinusSphereCenter) - sphereRadius * sphereRadius;
return QuadatricSolver(A, B, C);
}
__device__ float QuadatricSolver(float A, float B, float C)
{
//Calculate the discriminant
const float disc = B * B - 4 * A * C;
float t = -1.0f;
if(disc >= 0)
{
const float discSqrt = sqrtf(disc);
float q;
if(B < 0)
{
q = (-B - discSqrt) / 2.0f;
}
else
{
q = (-B + discSqrt) / 2.0f;
}
float t0 = q / A;
float t1 = C / q;
if(t0 > t1)
{
float temp = t0;
t0 = t1;
t1 = temp;
}
if(t1 < 0)
{
}
else if(t0 < 0)
{
t = t1;
}
else
{
t = t0;
}
}
return t;
}
void RunRayTracer(uchar4* dest, const int imageW, const int imageH, const int xThreadsPerBlock, const float4 a_vCameraPosition, const float4 a_vCameraForward, const float4 a_vCameraUp, const float4 a_vCameraRight, const float a_fNearPlaneDistance)
{
dim3 numThreads(20, 20);
dim3 numBlocks(64, 36);
float2 viewSize;
viewSize = make_float2(imageW, imageH);
RayTracer<<<numBlocks, numThreads>>>(dest, imageW, imageH, a_vCameraPosition, a_vCameraUp, a_vCameraForward, a_vCameraRight, a_fNearPlaneDistance, viewSize);
} |
31b3a59420fc7c4714f2699c9f0e9682a374b085.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include "image.h"
#include "resize.h"
#include "error_check.h"
int main(int argc, char **argv) {
if (argc != 3) {
std::cout << "Expected 2 filenames to be provided: 1st is input, 2nd is output" << std::endl;
return 1;
}
std::string filename_input(argv[1]);
std::cout << "Loading " << filename_input << std::endl;
unsigned char *input;
int width;
int height;
int channels;
const int scale = 8;
int allocation_size_source = 0;
auto input_image_allocation_function = [&allocation_size_source, &input](const int &allocation_size) -> void {
if (allocation_size_source < allocation_size) {
// If already has some allocation clear
if (allocation_size_source > 0) {
gpuErrchk(hipFree(&input));
}
gpuErrchk(hipMallocManaged(&input, allocation_size));
allocation_size_source = allocation_size;
}
};
auto get_ptr_function = [&input]() -> unsigned char ** {
return &input;
};
read_jpeg_image_cu(filename_input, width, height, channels, input_image_allocation_function, get_ptr_function);
std::cout << "Read image: " << width << " x " << height << " x " << channels << std::endl;
int target_width = width / scale;
int target_height = height / scale;
const int allocation_size_target = width * height * channels;
std::cout << "Target size: " << target_width << " x " << target_height << std::endl;
// Separate image channels
unsigned char *output;
gpuErrchk(hipMallocManaged(&output, allocation_size_target));
// Call resize kernel
{
resize_uchar(input, width, height, scale, output);
}
hipDeviceSynchronize();
// Dump output
std::string filename_output(argv[2]);
std::cout << "Saving " << filename_output << std::endl;
write_jpeg_image(filename_output, output, target_width, target_height, channels);
hipFree(&input);
hipFree(&output);
std::cout << "Done!" << std::endl;
return 0;
}
| 31b3a59420fc7c4714f2699c9f0e9682a374b085.cu | #include <iostream>
#include <vector>
#include "image.h"
#include "resize.h"
#include "error_check.h"
int main(int argc, char **argv) {
if (argc != 3) {
std::cout << "Expected 2 filenames to be provided: 1st is input, 2nd is output" << std::endl;
return 1;
}
std::string filename_input(argv[1]);
std::cout << "Loading " << filename_input << std::endl;
unsigned char *input;
int width;
int height;
int channels;
const int scale = 8;
int allocation_size_source = 0;
auto input_image_allocation_function = [&allocation_size_source, &input](const int &allocation_size) -> void {
if (allocation_size_source < allocation_size) {
// If already has some allocation clear
if (allocation_size_source > 0) {
gpuErrchk(cudaFree(&input));
}
gpuErrchk(cudaMallocManaged(&input, allocation_size));
allocation_size_source = allocation_size;
}
};
auto get_ptr_function = [&input]() -> unsigned char ** {
return &input;
};
read_jpeg_image_cu(filename_input, width, height, channels, input_image_allocation_function, get_ptr_function);
std::cout << "Read image: " << width << " x " << height << " x " << channels << std::endl;
int target_width = width / scale;
int target_height = height / scale;
const int allocation_size_target = width * height * channels;
std::cout << "Target size: " << target_width << " x " << target_height << std::endl;
// Separate image channels
unsigned char *output;
gpuErrchk(cudaMallocManaged(&output, allocation_size_target));
// Call resize kernel
{
resize_uchar(input, width, height, scale, output);
}
cudaDeviceSynchronize();
// Dump output
std::string filename_output(argv[2]);
std::cout << "Saving " << filename_output << std::endl;
write_jpeg_image(filename_output, output, target_width, target_height, channels);
cudaFree(&input);
cudaFree(&output);
std::cout << "Done!" << std::endl;
return 0;
}
|
70719e000109f0fec2c5d5b10665363e3ef5c839.hip | // !!! This is a file automatically generated by hipify!!!
#include "CUDA_WRAPPER.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
bool checkCudaErrorStatus(hipError_t status, const std::string& functionName)
{
if (status != hipSuccess)
{
std::cerr << "ERROR [ " << functionName << " ]: " << hipGetErrorString(status) << std::endl;
return false;
}
return true;
}
bool getCudaDeviceProperties(int device)
{
hipError_t deviceStatus;
hipDeviceProp_t properties;
deviceStatus = hipGetDeviceProperties(&properties, device);
if (!checkCudaErrorStatus(deviceStatus, "hipGetDeviceProperties"))
{
return false;
}
std::cout << "Compute Capabilities for " << properties.name << " : " << std::endl;
std::cout << "Major: " << properties.major << ", Minor: " << properties.minor << std::endl;
std::cout << "Details: " << std::endl;
std::cout << " Num of SM : " << properties.multiProcessorCount << std::endl;
std::cout << " Mem per Block: " << properties.sharedMemPerBlock << std::endl;
std::cout << " Mem per SM : " << properties.sharedMemPerMultiprocessor << std::endl;
return true;
}
bool queryCUDADevices()
{
hipError_t deviceStatus;
int deviceCount = 0;
deviceStatus = hipGetDeviceCount(&deviceCount);
if (!checkCudaErrorStatus(deviceStatus, "hipGetDeviceCount"))
{
return false;
}
std::cout << "Num CUDA Devices Found: " << deviceCount << std::endl;
return true;
}
bool startCUDAApplication(int device)
{
hipError_t deviceStatus;
deviceStatus = hipSetDevice(device);
if (!checkCudaErrorStatus(deviceStatus, "hipSetDevice"))
{
return false;
}
else
{
return true;
}
}
bool quiteCUDAApplication()
{
hipError_t deviceStatus;
deviceStatus = deviceStatus = hipDeviceReset();
if (!checkCudaErrorStatus(deviceStatus, "hipDeviceReset"))
{
return false;
}
else
{
return true;
}
} | 70719e000109f0fec2c5d5b10665363e3ef5c839.cu | #include "CUDA_WRAPPER.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
bool checkCudaErrorStatus(cudaError_t status, const std::string& functionName)
{
if (status != cudaSuccess)
{
std::cerr << "ERROR [ " << functionName << " ]: " << cudaGetErrorString(status) << std::endl;
return false;
}
return true;
}
bool getCudaDeviceProperties(int device)
{
cudaError_t deviceStatus;
cudaDeviceProp properties;
deviceStatus = cudaGetDeviceProperties(&properties, device);
if (!checkCudaErrorStatus(deviceStatus, "cudaGetDeviceProperties"))
{
return false;
}
std::cout << "Compute Capabilities for " << properties.name << " : " << std::endl;
std::cout << "Major: " << properties.major << ", Minor: " << properties.minor << std::endl;
std::cout << "Details: " << std::endl;
std::cout << " Num of SM : " << properties.multiProcessorCount << std::endl;
std::cout << " Mem per Block: " << properties.sharedMemPerBlock << std::endl;
std::cout << " Mem per SM : " << properties.sharedMemPerMultiprocessor << std::endl;
return true;
}
bool queryCUDADevices()
{
cudaError_t deviceStatus;
int deviceCount = 0;
deviceStatus = cudaGetDeviceCount(&deviceCount);
if (!checkCudaErrorStatus(deviceStatus, "cudaGetDeviceCount"))
{
return false;
}
std::cout << "Num CUDA Devices Found: " << deviceCount << std::endl;
return true;
}
bool startCUDAApplication(int device)
{
cudaError_t deviceStatus;
deviceStatus = cudaSetDevice(device);
if (!checkCudaErrorStatus(deviceStatus, "cudaSetDevice"))
{
return false;
}
else
{
return true;
}
}
bool quiteCUDAApplication()
{
cudaError_t deviceStatus;
deviceStatus = deviceStatus = cudaDeviceReset();
if (!checkCudaErrorStatus(deviceStatus, "cudaDeviceReset"))
{
return false;
}
else
{
return true;
}
} |
881f66828311b6f798baac0b68703855e76af252.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// input: iou_matrix, [n, n], points_sampling, [n, npoint], merge function, 0:union, 1: intersection
// min_keep_num
// output: keep_inds [n, n], 0/1
// nmsed_points_sample: [n, npoint], 0/1
#include <stdio.h>
#include <iostream>
#include <vector>
#include <time.h>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
const int block_num = 512;
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
const int threadsPerBlock = sizeof(unsigned long long) * 8;
__global__ void points_inside_boxes(const int n, const int npoint, const float *points, const float* anchors, int* points_sample_mask){
// n: boxes_num, npoint: points_num, points: points_num x 3, anchors: boxes_num, 6
// return: points_sample_mask: boxes_num x npoint
for (int batch_idx=blockIdx.x; batch_idx < n; batch_idx += gridDim.x){
// xmin, ymin, zmin, xmax, ymax, zmax
const float* cur_anchors = anchors + batch_idx * 6;
int *cur_points_sample_mask = points_sample_mask + batch_idx * npoint;
int x_index = threadIdx.x;
int x_stride = blockDim.x;
const float cur_anchors_xmin = cur_anchors[0] - cur_anchors[3] / 2.;
const float cur_anchors_ymin = cur_anchors[1] - cur_anchors[4];
const float cur_anchors_zmin = cur_anchors[2] - cur_anchors[5] / 2.;
const float cur_anchors_xmax = cur_anchors[0] + cur_anchors[3] / 2.;
const float cur_anchors_ymax = cur_anchors[1];
const float cur_anchors_zmax = cur_anchors[2] + cur_anchors[5] / 2.;
for (int points_idx = x_index; points_idx < npoint; points_idx += x_stride){
const float* cur_points = points + points_idx * 3;
const float cur_points_x = cur_points[0];
const float cur_points_y = cur_points[1];
const float cur_points_z = cur_points[2];
int _x = (cur_points_x >= cur_anchors_xmin) * (cur_points_x <= cur_anchors_xmax);
int _y = (cur_points_y >= cur_anchors_ymin) * (cur_points_y <= cur_anchors_ymax);
int _z = (cur_points_z >= cur_anchors_zmin) * (cur_points_z <= cur_anchors_zmax);
cur_points_sample_mask[points_idx] = _x * _y * _z;
}
}
}
__global__ void points_iou_kernel(const int n, const int npoint, const int* points_sample_mask, float* iou_matrix){
// points_sample_mask, [n, npoint], 0/1
// iou_matrix, [n, n]
for (int x_num_idx=blockIdx.x; x_num_idx<n; x_num_idx+=gridDim.x){
for(int y_num_idx=blockIdx.y; y_num_idx<n; y_num_idx+=gridDim.y){
const int* x_points_sample_mask = points_sample_mask + x_num_idx * npoint;
const int* y_points_sample_mask = points_sample_mask + y_num_idx * npoint;
int x_index = threadIdx.x;
int x_stride = blockDim.x;
__shared__ float intersect_list[threadsPerBlock];
__shared__ float union_list[threadsPerBlock];
// first initialize intersect_list and union_list by zero
intersect_list[x_index] = 0;
union_list[x_index] = 0;
__syncthreads();
for(int i_x=x_index; i_x<npoint; i_x+= x_stride){
intersect_list[x_index] = intersect_list[x_index] + float(x_points_sample_mask[i_x] && y_points_sample_mask[i_x]);
union_list[x_index] = union_list[x_index] + float(x_points_sample_mask[i_x] || y_points_sample_mask[i_x]);
}
__syncthreads();
// after calc the intersect
// then get the sum
__shared__ float intersect_sum;
__shared__ float union_sum;
intersect_sum = 0;
union_sum = 0;
__syncthreads();
atomicAdd(&intersect_sum, intersect_list[x_index]);
atomicAdd(&union_sum, union_list[x_index]);
__syncthreads();
float iou = intersect_sum / max(union_sum, 1.);
iou_matrix[x_num_idx * n + y_num_idx] = iou;
}
}
}
__device__ inline float devIou(const int *a, const int *b, int npoint) {
// a:[npoint], b[npoint], then calc the iou
float intersect = 0;
float union_sect = 0;
for (int i = 0; i < npoint; i ++){
intersect += a[i] && b[i];
union_sect += a[i] || b[i];
}
return intersect / union_sect;
}
__global__ void points_nms_block_kernel(const int n, const int npoint, const int merge_function, const float iou_thresh, const int*points_sample, unsigned long long *keep_inds, int *nmsed_points_sample){
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int row_size = min(n - row_start * threadsPerBlock, threadsPerBlock);
const int col_size = min(n - col_start * threadsPerBlock, threadsPerBlock);
const int* col_points_sample = points_sample + (threadsPerBlock * col_start) * npoint;
if (threadIdx.x < row_size){
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const int *cur_points_sample = points_sample + cur_box_idx * npoint;
int *cur_nmsed_points_sample = nmsed_points_sample + cur_box_idx * npoint;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start){
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i ++){
if (devIou(cur_points_sample, col_points_sample + i * npoint, npoint) > iou_thresh) {
// merge the nmsed_points_sample
const int *merged_col_points_sample = col_points_sample + i * npoint;
if (merge_function == 0){
for (int j = 0; j < npoint; j++){
atomicOr(&cur_nmsed_points_sample[j], merged_col_points_sample[j]);
}
}
else if (merge_function == 1){
for (int j = 0; j < npoint; j++){
atomicAnd(&cur_nmsed_points_sample[j], merged_col_points_sample[j]);
}
}
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n, threadsPerBlock);
// keep_inds, [col_blocks, threadsPerBlock]
keep_inds[cur_box_idx * col_blocks + col_start] = t;
}
}
__global__ void points_nms_kernel(const int n, const int npoint, const int merge_function, float iou_thresh, const float *iou_matrix, const int *points_sample, int *keep_inds, int *nmsed_points_sample) {
// nmsed_points_sample [n, npoint]
for (int x_num_idx=blockIdx.x; x_num_idx<n; x_num_idx+=gridDim.x){
for(int y_num_idx=blockIdx.y; y_num_idx<n; y_num_idx+=gridDim.y){
if (x_num_idx == y_num_idx)
continue;
// const int* x_points_sample = points_sample + x_num_idx * npoint;
const int* y_points_sample = points_sample + y_num_idx * npoint;
const float* x_iou_matrix = iou_matrix + x_num_idx * n;
int *x_keep_inds = keep_inds + x_num_idx * n;
int* x_nmsed_points_sample = nmsed_points_sample + x_num_idx * npoint;
int index = threadIdx.x;
int stride = blockDim.x;
float cur_iou = x_iou_matrix[y_num_idx];
if (cur_iou > iou_thresh){
// merge them togethor
x_keep_inds[y_num_idx] = 1;
for (int i=index;i<npoint;i+=stride){
// merge the result
if (merge_function == 0){
// union the two vector
atomicOr(&x_nmsed_points_sample[i], y_points_sample[i]);
}
else if(merge_function == 1){
atomicAnd(&x_nmsed_points_sample[i], y_points_sample[i]);
}
else{
continue;
}
}
}
}
}
}
__global__ void points_nms_sample(const int n, const int npoint, int merge_function, int* nmsed_points_sample_media, int* nmsed_points_sample){
for (int num_idx=blockIdx.x; num_idx<n; num_idx+=gridDim.x){
int *batch_nmsed_points_sample_media = nmsed_points_sample_media + num_idx * n *npoint;
int *batch_nmsed_points_sample = nmsed_points_sample + num_idx * npoint;
int index = threadIdx.x;
int stride = blockDim.x;
for (int i=index; i<n; i+=stride){
for(int j=0; j < npoint; j++){
if (merge_function == 0 || merge_function == 2){
// union or keep the origin
atomicOr(&batch_nmsed_points_sample[j], batch_nmsed_points_sample_media[i * npoint + j]);
// batch_nmsed_points_sample[j] = batch_nmsed_points_sample[j] + batch_nmsed_points_sample_media[i * npoint + j];
}
else if (merge_function == 1){
atomicAnd(&batch_nmsed_points_sample[j], batch_nmsed_points_sample_media[i * npoint + j]);
// batch_nmsed_points_sample[j] = batch_nmsed_points_sample[j] && batch_nmsed_points_sample_media[i * npoint + j];
}
}
}
}
}
void points_iou_gpu(const int n, const int npoint, const int* points_sample_mask, float* iou_matrix){
dim3 blocks(512, 512);
hipLaunchKernelGGL(( points_iou_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, n, npoint, points_sample_mask, iou_matrix);
// std::cout << "Iou Caluculating Done!!" << std::endl;
}
void points_inside_boxes_gpu(const int n, const int npoint, const float *points, const float* anchors, int* points_sample_mask){
CUDA_CHECK(hipMemset(points_sample_mask, 1, n * npoint * sizeof(int)));
hipLaunchKernelGGL(( points_inside_boxes), dim3(512), dim3(threadsPerBlock), 0, 0, n, npoint, points, anchors, points_sample_mask);
}
void points_nms_block_gpu(const int n, const int npoint, const int merge_function, const float iou_thresh, const int num_to_keep, const int *points_sample, int *keep_inds, int *nmsed_points_sample){
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(n, threadsPerBlock);
CUDA_CHECK(hipMalloc(&mask_dev, n * col_blocks * sizeof(unsigned long long)));
CUDA_CHECK(hipMemcpy(nmsed_points_sample, points_sample, sizeof(int) * n * npoint, hipMemcpyDeviceToDevice));
time_t c_start, c_end;
c_start = clock();
hipEvent_t start, stop; // variables that holds 2 events
float time; // Variable that will hold the time
hipEventCreate(&start); // creating the event 1
hipEventCreate(&stop); // creating the event 2
hipEventRecord(start, 0); // start measuring the time
dim3 blocks(DIVUP(n, threadsPerBlock),
DIVUP(n, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( points_nms_block_kernel), dim3(blocks), dim3(threads), 0, 0, n, npoint, merge_function, iou_thresh, points_sample, mask_dev, nmsed_points_sample);
c_end = clock();
hipEventRecord(stop, 0); // Stop time measuring
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
std::cout << difftime(c_end,c_start) << std::endl;
std::cout << time << std::endl;
std::cout << "Finished main working !!!" << std::endl;
c_start = clock();
std::vector<unsigned long long> mask_host(n * col_blocks);
hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * n * col_blocks,
hipMemcpyDeviceToHost);
c_end = clock();
std::cout << difftime(c_end,c_start) << std::endl;
std::cout << "Finished copying" << std::endl;
c_start = clock();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
std::vector<int> cpu_keep_inds(n);
memset(&cpu_keep_inds[0], -1, sizeof(int) * num_to_keep);
std::cout << "setting the output to -1" << std::endl;
int keeping_num = 0;
for (int i=0; i < n; i ++){
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))){
cpu_keep_inds[keeping_num++] = i;
if (keeping_num >= num_to_keep)
break;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j ++){
remv[j] |= p[j];
}
}
}
c_end = clock();
std::cout << difftime(c_end,c_start) << std::endl;
CUDA_CHECK(hipFree(mask_dev));
CUDA_CHECK(hipMemcpy(keep_inds, &cpu_keep_inds[0], sizeof(int) * num_to_keep, hipMemcpyHostToDevice));
std::cout << "Finished!!!" << std::endl;
}
void points_nms_gpu(const int n, const int npoint, const int merge_function, float iou_thresh, const float *iou_matrix, const int *points_sample, int *keep_inds, int *nmsed_points_sample) {
// std::cout << "Beginning points nms !!!" << std::endl;
int *remove_inds = NULL;
CUDA_CHECK(hipMalloc(&remove_inds, n * n * sizeof(int)));
CUDA_CHECK(hipMemset(remove_inds, 0, n * n * sizeof(int)));
std::vector<int> cpu_keep_inds(n, 1);
// First initialize the nmsed_points_sample by the points_sample
CUDA_CHECK(hipMemcpy(nmsed_points_sample, points_sample, sizeof(int) * n * npoint, hipMemcpyDeviceToDevice));
dim3 blocks(block_num, block_num);
hipLaunchKernelGGL(( points_nms_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, n, npoint, merge_function, iou_thresh, iou_matrix, points_sample, remove_inds, nmsed_points_sample);
// Using for Debug
// std::vector<int> debug(n * npoint);
// CUDA_CHECK(hipMemcpy(&debug[0], media_nmsed_points_sample, sizeof(int) * n * npoint, hipMemcpyDeviceToHost));
// for (int i=0; i<n; i++){
// for (int j=0; j< npoint; j++)
// std::cout << debug[i * npoint + j] << " ";
// std::cout << std::endl;
// }
// std::cout << std::endl;
std::vector<int> cpu_remove_inds(n * n);
CUDA_CHECK(hipMemcpy(&cpu_remove_inds[0], remove_inds, sizeof(int) * n * n, hipMemcpyDeviceToHost));
// std::cout << "points nms_remove inds Done !!!" << std::endl;
// finally get the keep_inds
for (int i=0; i<n; i++){
// std::cout << 1 << std::endl;
if (cpu_keep_inds[i] == 0){
continue;
}
for(int j=i+1; j<n; j++){
if (cpu_remove_inds[i * n + j] == 1){
// remove this point
cpu_keep_inds[j] = 0;
}
}
}
// at last, make it back
CUDA_CHECK(hipMemcpy(keep_inds, &cpu_keep_inds[0], sizeof(int) * n, hipMemcpyHostToDevice));
CUDA_CHECK(hipFree(remove_inds));
// std::cout << "points nms Done !!!" << std::endl;
}
| 881f66828311b6f798baac0b68703855e76af252.cu | // input: iou_matrix, [n, n], points_sampling, [n, npoint], merge function, 0:union, 1: intersection
// min_keep_num
// output: keep_inds [n, n], 0/1
// nmsed_points_sample: [n, npoint], 0/1
#include <stdio.h>
#include <iostream>
#include <vector>
#include <time.h>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
const int block_num = 512;
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
const int threadsPerBlock = sizeof(unsigned long long) * 8;
__global__ void points_inside_boxes(const int n, const int npoint, const float *points, const float* anchors, int* points_sample_mask){
// n: boxes_num, npoint: points_num, points: points_num x 3, anchors: boxes_num, 6
// return: points_sample_mask: boxes_num x npoint
for (int batch_idx=blockIdx.x; batch_idx < n; batch_idx += gridDim.x){
// xmin, ymin, zmin, xmax, ymax, zmax
const float* cur_anchors = anchors + batch_idx * 6;
int *cur_points_sample_mask = points_sample_mask + batch_idx * npoint;
int x_index = threadIdx.x;
int x_stride = blockDim.x;
const float cur_anchors_xmin = cur_anchors[0] - cur_anchors[3] / 2.;
const float cur_anchors_ymin = cur_anchors[1] - cur_anchors[4];
const float cur_anchors_zmin = cur_anchors[2] - cur_anchors[5] / 2.;
const float cur_anchors_xmax = cur_anchors[0] + cur_anchors[3] / 2.;
const float cur_anchors_ymax = cur_anchors[1];
const float cur_anchors_zmax = cur_anchors[2] + cur_anchors[5] / 2.;
for (int points_idx = x_index; points_idx < npoint; points_idx += x_stride){
const float* cur_points = points + points_idx * 3;
const float cur_points_x = cur_points[0];
const float cur_points_y = cur_points[1];
const float cur_points_z = cur_points[2];
int _x = (cur_points_x >= cur_anchors_xmin) * (cur_points_x <= cur_anchors_xmax);
int _y = (cur_points_y >= cur_anchors_ymin) * (cur_points_y <= cur_anchors_ymax);
int _z = (cur_points_z >= cur_anchors_zmin) * (cur_points_z <= cur_anchors_zmax);
cur_points_sample_mask[points_idx] = _x * _y * _z;
}
}
}
__global__ void points_iou_kernel(const int n, const int npoint, const int* points_sample_mask, float* iou_matrix){
// points_sample_mask, [n, npoint], 0/1
// iou_matrix, [n, n]
for (int x_num_idx=blockIdx.x; x_num_idx<n; x_num_idx+=gridDim.x){
for(int y_num_idx=blockIdx.y; y_num_idx<n; y_num_idx+=gridDim.y){
const int* x_points_sample_mask = points_sample_mask + x_num_idx * npoint;
const int* y_points_sample_mask = points_sample_mask + y_num_idx * npoint;
int x_index = threadIdx.x;
int x_stride = blockDim.x;
__shared__ float intersect_list[threadsPerBlock];
__shared__ float union_list[threadsPerBlock];
// first initialize intersect_list and union_list by zero
intersect_list[x_index] = 0;
union_list[x_index] = 0;
__syncthreads();
for(int i_x=x_index; i_x<npoint; i_x+= x_stride){
intersect_list[x_index] = intersect_list[x_index] + float(x_points_sample_mask[i_x] && y_points_sample_mask[i_x]);
union_list[x_index] = union_list[x_index] + float(x_points_sample_mask[i_x] || y_points_sample_mask[i_x]);
}
__syncthreads();
// after calc the intersect
// then get the sum
__shared__ float intersect_sum;
__shared__ float union_sum;
intersect_sum = 0;
union_sum = 0;
__syncthreads();
atomicAdd(&intersect_sum, intersect_list[x_index]);
atomicAdd(&union_sum, union_list[x_index]);
__syncthreads();
float iou = intersect_sum / max(union_sum, 1.);
iou_matrix[x_num_idx * n + y_num_idx] = iou;
}
}
}
__device__ inline float devIou(const int *a, const int *b, int npoint) {
// a:[npoint], b[npoint], then calc the iou
float intersect = 0;
float union_sect = 0;
for (int i = 0; i < npoint; i ++){
intersect += a[i] && b[i];
union_sect += a[i] || b[i];
}
return intersect / union_sect;
}
__global__ void points_nms_block_kernel(const int n, const int npoint, const int merge_function, const float iou_thresh, const int*points_sample, unsigned long long *keep_inds, int *nmsed_points_sample){
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
const int row_size = min(n - row_start * threadsPerBlock, threadsPerBlock);
const int col_size = min(n - col_start * threadsPerBlock, threadsPerBlock);
const int* col_points_sample = points_sample + (threadsPerBlock * col_start) * npoint;
if (threadIdx.x < row_size){
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const int *cur_points_sample = points_sample + cur_box_idx * npoint;
int *cur_nmsed_points_sample = nmsed_points_sample + cur_box_idx * npoint;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start){
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i ++){
if (devIou(cur_points_sample, col_points_sample + i * npoint, npoint) > iou_thresh) {
// merge the nmsed_points_sample
const int *merged_col_points_sample = col_points_sample + i * npoint;
if (merge_function == 0){
for (int j = 0; j < npoint; j++){
atomicOr(&cur_nmsed_points_sample[j], merged_col_points_sample[j]);
}
}
else if (merge_function == 1){
for (int j = 0; j < npoint; j++){
atomicAnd(&cur_nmsed_points_sample[j], merged_col_points_sample[j]);
}
}
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n, threadsPerBlock);
// keep_inds, [col_blocks, threadsPerBlock]
keep_inds[cur_box_idx * col_blocks + col_start] = t;
}
}
__global__ void points_nms_kernel(const int n, const int npoint, const int merge_function, float iou_thresh, const float *iou_matrix, const int *points_sample, int *keep_inds, int *nmsed_points_sample) {
// nmsed_points_sample [n, npoint]
for (int x_num_idx=blockIdx.x; x_num_idx<n; x_num_idx+=gridDim.x){
for(int y_num_idx=blockIdx.y; y_num_idx<n; y_num_idx+=gridDim.y){
if (x_num_idx == y_num_idx)
continue;
// const int* x_points_sample = points_sample + x_num_idx * npoint;
const int* y_points_sample = points_sample + y_num_idx * npoint;
const float* x_iou_matrix = iou_matrix + x_num_idx * n;
int *x_keep_inds = keep_inds + x_num_idx * n;
int* x_nmsed_points_sample = nmsed_points_sample + x_num_idx * npoint;
int index = threadIdx.x;
int stride = blockDim.x;
float cur_iou = x_iou_matrix[y_num_idx];
if (cur_iou > iou_thresh){
// merge them togethor
x_keep_inds[y_num_idx] = 1;
for (int i=index;i<npoint;i+=stride){
// merge the result
if (merge_function == 0){
// union the two vector
atomicOr(&x_nmsed_points_sample[i], y_points_sample[i]);
}
else if(merge_function == 1){
atomicAnd(&x_nmsed_points_sample[i], y_points_sample[i]);
}
else{
continue;
}
}
}
}
}
}
__global__ void points_nms_sample(const int n, const int npoint, int merge_function, int* nmsed_points_sample_media, int* nmsed_points_sample){
for (int num_idx=blockIdx.x; num_idx<n; num_idx+=gridDim.x){
int *batch_nmsed_points_sample_media = nmsed_points_sample_media + num_idx * n *npoint;
int *batch_nmsed_points_sample = nmsed_points_sample + num_idx * npoint;
int index = threadIdx.x;
int stride = blockDim.x;
for (int i=index; i<n; i+=stride){
for(int j=0; j < npoint; j++){
if (merge_function == 0 || merge_function == 2){
// union or keep the origin
atomicOr(&batch_nmsed_points_sample[j], batch_nmsed_points_sample_media[i * npoint + j]);
// batch_nmsed_points_sample[j] = batch_nmsed_points_sample[j] + batch_nmsed_points_sample_media[i * npoint + j];
}
else if (merge_function == 1){
atomicAnd(&batch_nmsed_points_sample[j], batch_nmsed_points_sample_media[i * npoint + j]);
// batch_nmsed_points_sample[j] = batch_nmsed_points_sample[j] && batch_nmsed_points_sample_media[i * npoint + j];
}
}
}
}
}
void points_iou_gpu(const int n, const int npoint, const int* points_sample_mask, float* iou_matrix){
dim3 blocks(512, 512);
points_iou_kernel<<<blocks, threadsPerBlock>>>(n, npoint, points_sample_mask, iou_matrix);
// std::cout << "Iou Caluculating Done!!" << std::endl;
}
void points_inside_boxes_gpu(const int n, const int npoint, const float *points, const float* anchors, int* points_sample_mask){
CUDA_CHECK(cudaMemset(points_sample_mask, 1, n * npoint * sizeof(int)));
points_inside_boxes<<<512, threadsPerBlock>>>(n, npoint, points, anchors, points_sample_mask);
}
void points_nms_block_gpu(const int n, const int npoint, const int merge_function, const float iou_thresh, const int num_to_keep, const int *points_sample, int *keep_inds, int *nmsed_points_sample){
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(n, threadsPerBlock);
CUDA_CHECK(cudaMalloc(&mask_dev, n * col_blocks * sizeof(unsigned long long)));
CUDA_CHECK(cudaMemcpy(nmsed_points_sample, points_sample, sizeof(int) * n * npoint, cudaMemcpyDeviceToDevice));
time_t c_start, c_end;
c_start = clock();
cudaEvent_t start, stop; // variables that holds 2 events
float time; // Variable that will hold the time
cudaEventCreate(&start); // creating the event 1
cudaEventCreate(&stop); // creating the event 2
cudaEventRecord(start, 0); // start measuring the time
dim3 blocks(DIVUP(n, threadsPerBlock),
DIVUP(n, threadsPerBlock));
dim3 threads(threadsPerBlock);
points_nms_block_kernel<<<blocks, threads>>>(n, npoint, merge_function, iou_thresh, points_sample, mask_dev, nmsed_points_sample);
c_end = clock();
cudaEventRecord(stop, 0); // Stop time measuring
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::cout << difftime(c_end,c_start) << std::endl;
std::cout << time << std::endl;
std::cout << "Finished main working !!!" << std::endl;
c_start = clock();
std::vector<unsigned long long> mask_host(n * col_blocks);
cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * n * col_blocks,
cudaMemcpyDeviceToHost);
c_end = clock();
std::cout << difftime(c_end,c_start) << std::endl;
std::cout << "Finished copying" << std::endl;
c_start = clock();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
std::vector<int> cpu_keep_inds(n);
memset(&cpu_keep_inds[0], -1, sizeof(int) * num_to_keep);
std::cout << "setting the output to -1" << std::endl;
int keeping_num = 0;
for (int i=0; i < n; i ++){
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))){
cpu_keep_inds[keeping_num++] = i;
if (keeping_num >= num_to_keep)
break;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j ++){
remv[j] |= p[j];
}
}
}
c_end = clock();
std::cout << difftime(c_end,c_start) << std::endl;
CUDA_CHECK(cudaFree(mask_dev));
CUDA_CHECK(cudaMemcpy(keep_inds, &cpu_keep_inds[0], sizeof(int) * num_to_keep, cudaMemcpyHostToDevice));
std::cout << "Finished!!!" << std::endl;
}
void points_nms_gpu(const int n, const int npoint, const int merge_function, float iou_thresh, const float *iou_matrix, const int *points_sample, int *keep_inds, int *nmsed_points_sample) {
// std::cout << "Beginning points nms !!!" << std::endl;
int *remove_inds = NULL;
CUDA_CHECK(cudaMalloc(&remove_inds, n * n * sizeof(int)));
CUDA_CHECK(cudaMemset(remove_inds, 0, n * n * sizeof(int)));
std::vector<int> cpu_keep_inds(n, 1);
// First initialize the nmsed_points_sample by the points_sample
CUDA_CHECK(cudaMemcpy(nmsed_points_sample, points_sample, sizeof(int) * n * npoint, cudaMemcpyDeviceToDevice));
dim3 blocks(block_num, block_num);
points_nms_kernel<<<blocks, threadsPerBlock>>>(n, npoint, merge_function, iou_thresh, iou_matrix, points_sample, remove_inds, nmsed_points_sample);
// Using for Debug
// std::vector<int> debug(n * npoint);
// CUDA_CHECK(cudaMemcpy(&debug[0], media_nmsed_points_sample, sizeof(int) * n * npoint, cudaMemcpyDeviceToHost));
// for (int i=0; i<n; i++){
// for (int j=0; j< npoint; j++)
// std::cout << debug[i * npoint + j] << " ";
// std::cout << std::endl;
// }
// std::cout << std::endl;
std::vector<int> cpu_remove_inds(n * n);
CUDA_CHECK(cudaMemcpy(&cpu_remove_inds[0], remove_inds, sizeof(int) * n * n, cudaMemcpyDeviceToHost));
// std::cout << "points nms_remove inds Done !!!" << std::endl;
// finally get the keep_inds
for (int i=0; i<n; i++){
// std::cout << 1 << std::endl;
if (cpu_keep_inds[i] == 0){
continue;
}
for(int j=i+1; j<n; j++){
if (cpu_remove_inds[i * n + j] == 1){
// remove this point
cpu_keep_inds[j] = 0;
}
}
}
// at last, make it back
CUDA_CHECK(cudaMemcpy(keep_inds, &cpu_keep_inds[0], sizeof(int) * n, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaFree(remove_inds));
// std::cout << "points nms Done !!!" << std::endl;
}
|
d35c30a150cadf0b8afb640aafd086cceb9ebd89.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GpuGaussNewtonSolver.h"
#include "device_utils.h"
#include "cudpp\thrust_wrapper.h"
#include "cudpp\ModerGpuWrapper.h"
#include <iostream>
#include "GpuCholeSky.h"
namespace dfusion
{
//#define DEFINE_USE_HALF_GRAPH_EDGE
//#define CALC_DATA_TERM_NUMERIC
//#define CALC_REG_TERM_NUMERIC
//#define DEBUG_ASSIGN_10M_TO_NO_CORR
//#define DEBUG_ASSIGN_BIG_ENERGY_TO_NO_CORR
//#define ENABLE_ANTI_PODALITY
#ifdef DEFINE_USE_HALF_GRAPH_EDGE
enum{RowPerNode_RegTerm = 3};
#else
enum{ RowPerNode_RegTerm = 6 };
#endif
//#define USE_L2_NORM_DATA_TERM
//#define USE_L2_NORM_REG_TERM
#define CHECK(a, msg){if(!(a)) throw std::exception(msg);}
#define CHECK_LE(a, b){if((a) > (b)) {std::cout << "" << #a << "(" << a << ")<=" << #b << "(" << b << ")";throw std::exception(" ###error!");}}
texture<KnnIdx, hipTextureType1D, hipReadModeElementType> g_nodesKnnTex;
texture<float4, hipTextureType1D, hipReadModeElementType> g_nodesVwTex;
texture<float, hipTextureType1D, hipReadModeElementType> g_twistTex;
__device__ __forceinline__ float4 get_nodesVw(int i)
{
return tex1Dfetch(g_nodesVwTex, i);
}
__device__ __forceinline__ KnnIdx get_nodesKnn(int i)
{
return tex1Dfetch(g_nodesKnnTex, i);
}
__device__ __forceinline__ void get_twist(int i, Tbx::Vec3& r, Tbx::Vec3& t)
{
int i6 = i * 6;
r.x = tex1Dfetch(g_twistTex, i6++);
r.y = tex1Dfetch(g_twistTex, i6++);
r.z = tex1Dfetch(g_twistTex, i6++);
t.x = tex1Dfetch(g_twistTex, i6++);
t.y = tex1Dfetch(g_twistTex, i6++);
t.z = tex1Dfetch(g_twistTex, i6++);
}
__device__ __forceinline__ float3 read_float3_4(float4 a)
{
return make_float3(a.x, a.y, a.z);
}
__device__ __forceinline__ float sqr(float a)
{
return a*a;
}
__device__ __forceinline__ float pow3(float a)
{
return a*a*a;
}
__device__ __forceinline__ float sign(float a)
{
return (a>0.f) - (a<0.f);
}
__device__ __forceinline__ void sort_knn(KnnIdx& knn)
{
for (int i = 1; i < KnnK; i++)
{
KnnIdxType x = knn_k(knn,i);
int j = i;
while (j > 0 && knn_k(knn, j - 1) > x)
{
knn_k(knn, j) = knn_k(knn, j - 1);
j = j - 1;
}
knn_k(knn, j) = x;
}
}
#pragma region --bind textures
void GpuGaussNewtonSolver::bindTextures()
{
if (1)
{
size_t offset;
hipChannelFormatDesc desc = hipCreateChannelDesc<KnnIdx>();
hipBindTexture(&offset, &g_nodesKnnTex, m_nodesKnn.ptr(), &desc,
m_nodesKnn.size() * sizeof(KnnIdx));
if (offset != 0)
throw std::exception("GpuGaussNewtonSolver::bindTextures(): non-zero-offset error1!");
}
if (1)
{
size_t offset;
hipChannelFormatDesc desc = hipCreateChannelDesc<float4>();
hipBindTexture(&offset, &g_nodesVwTex, m_nodesVw.ptr(), &desc,
m_nodesVw.size() * sizeof(float4));
if (offset != 0)
throw std::exception("GpuGaussNewtonSolver::bindTextures(): non-zero-offset error2!");
}
if (1)
{
size_t offset;
hipChannelFormatDesc desc = hipCreateChannelDesc<float>();
hipBindTexture(&offset, &g_twistTex, m_twist.ptr(), &desc,
m_twist.size() * sizeof(float));
if (offset != 0)
throw std::exception("GpuGaussNewtonSolver::bindTextures(): non-zero-offset error3!");
}
}
void GpuGaussNewtonSolver::unBindTextures()
{
hipUnbindTexture(g_twistTex);
hipUnbindTexture(g_nodesVwTex);
hipUnbindTexture(g_nodesKnnTex);
}
#pragma endregion
#pragma region --calc data term
struct DataTermCombined
{
enum
{
CTA_SIZE_X = GpuGaussNewtonSolver::CTA_SIZE_X,
CTA_SIZE_Y = GpuGaussNewtonSolver::CTA_SIZE_Y,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
VarPerNode = GpuGaussNewtonSolver::VarPerNode,
VarPerNode2 = VarPerNode*VarPerNode,
LowerPartNum = GpuGaussNewtonSolver::LowerPartNum,
};
PtrStep<float4> vmap_live;
PtrStep<float4> nmap_live;
PtrStep<float4> vmap_warp;
PtrStep<float4> nmap_warp;
PtrStep<float4> vmap_cano;
PtrStep<float4> nmap_cano;
PtrStep<KnnIdx> vmapKnn;
float* Hd_;
float* g_;
Intr intr;
Tbx::Transfo Tlw_inv;
Tbx::Transfo Tlw;
int imgWidth;
int imgHeight;
int nNodes;
float distThres;
float angleThres;
float psi_data;
float* totalEnergy;
__device__ __forceinline__ float data_term_energy(float f)const
{
#ifdef USE_L2_NORM_DATA_TERM
return 0.5f*f*f;
#else
// the robust Tukey penelty gradient
if (abs(f) <= psi_data)
return psi_data*psi_data / 6.f *(1 - pow(1 - sqr(f / psi_data), 3));
else
return psi_data*psi_data / 6.f;
#endif
}
__device__ __forceinline__ float data_term_penalty(float f)const
{
#ifdef USE_L2_NORM_DATA_TERM
return f;
#else
return f * sqr(max(0.f, 1.f - sqr(f / psi_data)));
//// the robust Tukey penelty gradient
//if (abs(f) <= psi_data)
// return f * sqr(1 - sqr(f / psi_data));
//else
// return 0;
#endif
}
__device__ __forceinline__ float trace_AtB(Tbx::Transfo A, Tbx::Transfo B)const
{
float sum = 0;
for (int i = 0; i < 16; i++)
sum += A[i] * B[i];
return sum;
}
__device__ __forceinline__ Tbx::Transfo compute_p_f_p_T(const Tbx::Vec3& n,
const Tbx::Point3& v, const Tbx::Point3& vl, const Tbx::Dual_quat_cu& dq)const
{
//Tbx::Transfo T = Tlw*dq.to_transformation_after_normalize();
//Tbx::Transfo nvt = outer_product(n, v);
//Tbx::Transfo vlnt = outer_product(n, vl).transpose();
//Tbx::Transfo p_f_p_T = T*(nvt + nvt.transpose()) - vlnt;
Tbx::Vec3 Tn = dq.rotate(n);
Tbx::Point3 Tv(dq.transform(v) - vl);
return Tbx::Transfo(
Tn.x*v.x + n.x*Tv.x, Tn.x*v.y + n.y*Tv.x, Tn.x*v.z + n.z*Tv.x, Tn.x,
Tn.y*v.x + n.x*Tv.y, Tn.y*v.y + n.y*Tv.y, Tn.y*v.z + n.z*Tv.y, Tn.y,
Tn.z*v.x + n.x*Tv.z, Tn.z*v.y + n.y*Tv.z, Tn.z*v.z + n.z*Tv.z, Tn.z,
n.x, n.y, n.z, 0
);
}
__device__ __forceinline__ Tbx::Transfo p_T_p_alphak_func(const Tbx::Dual_quat_cu& p_qk_p_alpha,
const Tbx::Dual_quat_cu& dq_bar, const Tbx::Dual_quat_cu& dq, float inv_norm_dq_bar, float wk_k)const
{
Tbx::Transfo p_T_p_alphak = Tbx::Transfo::empty();
float pdot = dq_bar.get_non_dual_part().dot(p_qk_p_alpha.get_non_dual_part())
* sqr(inv_norm_dq_bar);
//// evaluate p_dqi_p_alphak, heavily hard code here
//// this hard code is crucial to the performance
// 0:
// (0, -z0, y0, x1,
// z0, 0, -x0, y1,
//-y0, x0, 0, z1,
// 0, 0, 0, 0) * 2;
float p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[0] - dq_bar[0] * pdot
);
p_T_p_alphak[1] += -dq[3] * p_dqi_p_alphak;
p_T_p_alphak[2] += dq[2] * p_dqi_p_alphak;
p_T_p_alphak[3] += dq[5] * p_dqi_p_alphak;
p_T_p_alphak[4] += dq[3] * p_dqi_p_alphak;
p_T_p_alphak[6] += -dq[1] * p_dqi_p_alphak;
p_T_p_alphak[7] += dq[6] * p_dqi_p_alphak;
p_T_p_alphak[8] += -dq[2] * p_dqi_p_alphak;
p_T_p_alphak[9] += dq[1] * p_dqi_p_alphak;
p_T_p_alphak[11] += dq[7] * p_dqi_p_alphak;
// 1
//( 0, y0, z0, -w1,
// y0, -2 * x0, -w0, -z1,
// z0, w0, -2 * x0, y1,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[1] - dq_bar[1] * pdot
);
p_T_p_alphak[1] += dq[2] * p_dqi_p_alphak;
p_T_p_alphak[2] += dq[3] * p_dqi_p_alphak;
p_T_p_alphak[3] += -dq[4] * p_dqi_p_alphak;
p_T_p_alphak[4] += dq[2] * p_dqi_p_alphak;
p_T_p_alphak[5] += -dq[1] * p_dqi_p_alphak * 2;
p_T_p_alphak[6] += -dq[0] * p_dqi_p_alphak;
p_T_p_alphak[7] += -dq[7] * p_dqi_p_alphak;
p_T_p_alphak[8] += dq[3] * p_dqi_p_alphak;
p_T_p_alphak[9] += dq[0] * p_dqi_p_alphak;
p_T_p_alphak[10] += -dq[1] * p_dqi_p_alphak * 2;
p_T_p_alphak[11] += dq[6] * p_dqi_p_alphak;
// 2.
// (-2 * y0, x0, w0, z1,
// x0, 0, z0, -w1,
// -w0, z0, -2 * y0, -x1,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[2] - dq_bar[2] * pdot
);
p_T_p_alphak[0] += -dq[2] * p_dqi_p_alphak * 2;
p_T_p_alphak[1] += dq[1] * p_dqi_p_alphak;
p_T_p_alphak[2] += dq[0] * p_dqi_p_alphak;
p_T_p_alphak[3] += dq[7] * p_dqi_p_alphak;
p_T_p_alphak[4] += dq[1] * p_dqi_p_alphak;
p_T_p_alphak[6] += dq[3] * p_dqi_p_alphak;
p_T_p_alphak[7] += -dq[4] * p_dqi_p_alphak;
p_T_p_alphak[8] += -dq[0] * p_dqi_p_alphak;
p_T_p_alphak[9] += dq[3] * p_dqi_p_alphak;
p_T_p_alphak[10] += -dq[2] * p_dqi_p_alphak * 2;
p_T_p_alphak[11] += -dq[5] * p_dqi_p_alphak;
// 3.
// (-2 * z0, -w0, x0, -y1,
// w0, -2 * z0, y0, x1,
// x0, y0, 0, -w1,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[3] - dq_bar[3] * pdot
);
p_T_p_alphak[0] += -dq[3] * p_dqi_p_alphak * 2;
p_T_p_alphak[1] += -dq[0] * p_dqi_p_alphak;
p_T_p_alphak[2] += dq[1] * p_dqi_p_alphak;
p_T_p_alphak[3] += -dq[6] * p_dqi_p_alphak;
p_T_p_alphak[4] += dq[0] * p_dqi_p_alphak;
p_T_p_alphak[5] += -dq[3] * p_dqi_p_alphak * 2;
p_T_p_alphak[6] += dq[2] * p_dqi_p_alphak;
p_T_p_alphak[7] += dq[5] * p_dqi_p_alphak;
p_T_p_alphak[8] += dq[1] * p_dqi_p_alphak;
p_T_p_alphak[9] += dq[2] * p_dqi_p_alphak;
p_T_p_alphak[11] += -dq[4] * p_dqi_p_alphak;
// 4.
//( 0, 0, 0, -x0,
// 0, 0, 0, -y0,
// 0, 0, 0, -z0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[4] - dq_bar[4] * pdot
);
p_T_p_alphak[3] += -dq[1] * p_dqi_p_alphak;
p_T_p_alphak[7] += -dq[2] * p_dqi_p_alphak;
p_T_p_alphak[11] += -dq[3] * p_dqi_p_alphak;
// 5.
// (0, 0, 0, w0,
// 0, 0, 0, z0,
// 0, 0, 0, -y0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[5] - dq_bar[5] * pdot
);
p_T_p_alphak[3] += dq[0] * p_dqi_p_alphak;
p_T_p_alphak[7] += dq[3] * p_dqi_p_alphak;
p_T_p_alphak[11] += -dq[2] * p_dqi_p_alphak;
// 6.
// (0, 0, 0, -z0,
// 0, 0, 0, w0,
// 0, 0, 0, x0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[6] - dq_bar[6] * pdot
);
p_T_p_alphak[3] += -dq[3] * p_dqi_p_alphak;
p_T_p_alphak[7] += dq[0] * p_dqi_p_alphak;
p_T_p_alphak[11] += dq[1] * p_dqi_p_alphak;
// 7.
// (0, 0, 0, y0,
// 0, 0, 0, -x0,
// 0, 0, 0, w0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[7] - dq_bar[7] * pdot
);
p_T_p_alphak[3] += dq[2] * p_dqi_p_alphak;
p_T_p_alphak[7] += -dq[1] * p_dqi_p_alphak;
p_T_p_alphak[11] += dq[0] * p_dqi_p_alphak;
return p_T_p_alphak;
}
__device__ __forceinline__ bool search(int x, int y, Tbx::Point3& vl) const
{
float3 vwarp = read_float3_4(vmap_warp(y, x));
float3 nwarp = read_float3_4(nmap_warp(y, x));
return search(vwarp, nwarp, vl);
}
__device__ __forceinline__ bool search(float3 vwarp, float3 nwarp, Tbx::Point3& vl) const
{
if (isnan(nwarp.x) || isnan(vwarp.x))
return false;
float3 uvd = intr.xyz2uvd(vwarp);
int2 ukr = make_int2(__float2int_rn(uvd.x), __float2int_rn(uvd.y));
// we use opengl coordinate, thus world.z should < 0
if (ukr.x < 0 || ukr.y < 0 || ukr.x >= imgWidth || ukr.y >= imgHeight || vwarp.z >= 0)
return false;
float3 vlive = read_float3_4(vmap_live[ukr.y*imgWidth + ukr.x]);
float3 nlive = read_float3_4(nmap_live[ukr.y*imgWidth + ukr.x]);
if (isnan(nlive.x) || isnan(vlive.x))
return false;
#ifndef DEBUG_ASSIGN_10M_TO_NO_CORR
float dist = norm(vwarp - vlive);
if (!(dist <= distThres))
return false;
float sine = norm(cross(nwarp, nlive));
if (!(sine < angleThres))
return false;
#endif
vl = Tbx::Point3(vlive.x, vlive.y, vlive.z);
return true;
}
__device__ __forceinline__ void calc_dataterm () const
{
const int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
const int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
Tbx::Point3 vl;
bool found_coresp = false;
if (x < imgWidth && y < imgHeight)
found_coresp = search(x, y, vl);
vl = Tlw_inv * vl;
if (found_coresp)
{
Tbx::Point3 v(convert(read_float3_4(vmap_cano(y, x))));
Tbx::Vec3 n(convert(read_float3_4(nmap_cano(y, x))));
const KnnIdx knn = vmapKnn(y, x);
Tbx::Dual_quat_cu dq(Tbx::Quat_cu(0, 0, 0, 0), Tbx::Quat_cu(0, 0, 0, 0));
Tbx::Dual_quat_cu dqk_0;
float wk[KnnK];
// dqk_0
{
Tbx::Vec3 r, t;
get_twist(knn_k(knn, 0), r, t);
float4 nodeVw = get_nodesVw(knn_k(knn, 0));
Tbx::Vec3 nodesV(convert(read_float3_4(nodeVw)) - v);
dqk_0.from_twist(r, t);
float expIn = nodesV.dot(nodesV) * nodeVw.w * nodeVw.w;
wk[0] = __expf(-0.5f * expIn);
dq = dq + dqk_0 * wk[0];
}
// other dqk_k
#pragma unroll
for (int k = 1; k < KnnK; k++)
{
int knnNodeId = knn_k(knn, k);
if (knnNodeId >= nNodes)
break;
Tbx::Vec3 r, t;
get_twist(knnNodeId, r, t);
float4 nodeVw = get_nodesVw(knnNodeId);
Tbx::Vec3 nodesV(convert(read_float3_4(nodeVw))-v);
Tbx::Dual_quat_cu dqk_k;
dqk_k.from_twist(r, t);
#ifdef ENABLE_ANTI_PODALITY
wk[k] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w)
*sign(dqk_0.get_non_dual_part().dot(dqk_k.get_non_dual_part()));
#else
wk[k] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w);
#endif
dq = dq + dqk_k * wk[k];
}
Tbx::Dual_quat_cu dq_bar = dq;
float norm_dq_bar = dq_bar.norm();
if (norm_dq_bar < Tbx::Dual_quat_cu::epsilon())
return;
float inv_norm_dq_bar = 1.f / norm_dq_bar;
dq = dq * inv_norm_dq_bar; // normalize
// the grad energy f
const float f = data_term_penalty(dq.rotate(n).dot(dq.transform(v) - vl));
// paitial_f_partial_T
const Tbx::Transfo p_f_p_T = compute_p_f_p_T(n, v, vl, dq);
for (int knnK = 0; knnK < KnnK; knnK++)
{
int knnNodeId = knn_k(knn, knnK);
if (knnNodeId >= nNodes)
break;
float p_f_p_alpha[VarPerNode];
float wk_k = wk[knnK] * inv_norm_dq_bar * 2;
//// comput partial_T_partial_alphak, hard code here.
Tbx::Dual_quat_cu p_qk_p_alpha;
Tbx::Transfo p_T_p_alphak;
Tbx::Vec3 t, r;
float b, c;
Tbx::Quat_cu q1;
get_twist(knnNodeId, r, t);
{
float n = r.norm();
float sin_n, cos_n;
sincos(n, &sin_n, &cos_n);
b = n > Tbx::Dual_quat_cu::epsilon() ? sin_n / n : 1;
c = n > Tbx::Dual_quat_cu::epsilon() ? (cos_n - b) / (n*n) : 0;
q1 = Tbx::Quat_cu(cos_n*0.5f, r.x*b*0.5f, r.y*b*0.5f, r.z*b*0.5f);
}
// alpha0
p_qk_p_alpha[0] = -r[0] * b;
p_qk_p_alpha[1] = b + r[0] * r[0] * c;
p_qk_p_alpha[2] = r[0] * r[1] * c;
p_qk_p_alpha[3] = r[0] * r[2] * c;
p_qk_p_alpha = Tbx::Dual_quat_cu::dual_quat_from(p_qk_p_alpha.get_non_dual_part(), t);
p_T_p_alphak = p_T_p_alphak_func(p_qk_p_alpha, dq_bar, dq,
inv_norm_dq_bar, wk_k);
p_f_p_alpha[0] = trace_AtB(p_f_p_T, p_T_p_alphak);
// alpha1
p_qk_p_alpha[0] = -r[1] * b;
p_qk_p_alpha[1] = r[1] * r[0] * c;
p_qk_p_alpha[2] = b + r[1] * r[1] * c;
p_qk_p_alpha[3] = r[1] * r[2] * c;
p_qk_p_alpha = Tbx::Dual_quat_cu::dual_quat_from(p_qk_p_alpha.get_non_dual_part(), t);
p_T_p_alphak = p_T_p_alphak_func(p_qk_p_alpha, dq_bar, dq,
inv_norm_dq_bar, wk_k);
p_f_p_alpha[1] = trace_AtB(p_f_p_T, p_T_p_alphak);
// alpha2
p_qk_p_alpha[0] = -r[2] * b;
p_qk_p_alpha[1] = r[2] * r[0] * c;
p_qk_p_alpha[2] = r[2] * r[1] * c;
p_qk_p_alpha[3] = b + r[2] * r[2] * c;
p_qk_p_alpha = Tbx::Dual_quat_cu::dual_quat_from(p_qk_p_alpha.get_non_dual_part(), t);
p_T_p_alphak = p_T_p_alphak_func(p_qk_p_alpha, dq_bar, dq,
inv_norm_dq_bar, wk_k);
p_f_p_alpha[2] = trace_AtB(p_f_p_T, p_T_p_alphak);
// alpha3
p_qk_p_alpha = Tbx::Dual_quat_cu(Tbx::Quat_cu(0, 0, 0, 0),
Tbx::Quat_cu(-q1[1], q1[0], -q1[3], q1[2]));
p_T_p_alphak = p_T_p_alphak_func(p_qk_p_alpha, dq_bar, dq,
inv_norm_dq_bar, wk_k);
p_f_p_alpha[3] = trace_AtB(p_f_p_T, p_T_p_alphak);
// alpha4
p_qk_p_alpha = Tbx::Dual_quat_cu(Tbx::Quat_cu(0, 0, 0, 0),
Tbx::Quat_cu(-q1[2], q1[3], q1[0], -q1[1]));
p_T_p_alphak = p_T_p_alphak_func(p_qk_p_alpha, dq_bar, dq,
inv_norm_dq_bar, wk_k);
p_f_p_alpha[4] = trace_AtB(p_f_p_T, p_T_p_alphak);
// alpha5
p_qk_p_alpha = Tbx::Dual_quat_cu(Tbx::Quat_cu(0, 0, 0, 0),
Tbx::Quat_cu(-q1[3], -q1[2], q1[1], q1[0]));
p_T_p_alphak = p_T_p_alphak_func(p_qk_p_alpha, dq_bar, dq,
inv_norm_dq_bar, wk_k);
p_f_p_alpha[5] = trace_AtB(p_f_p_T, p_T_p_alphak);
//// reduce--------------------------------------------------
int shift = knnNodeId * VarPerNode2;
int shift_g = knnNodeId * VarPerNode;
for (int i = 0; i < VarPerNode; ++i)
{
#pragma unroll
for (int j = 0; j <= i; ++j)
atomicAdd(&Hd_[shift + j], p_f_p_alpha[i] * p_f_p_alpha[j]);
atomicAdd(&g_[shift_g + i], p_f_p_alpha[i] * f);
shift += VarPerNode;
}// end for i
}// end for knnK
}// end if found corr
}// end function ()
__device__ __forceinline__ Tbx::Dual_quat_cu calc_pixel_dq(KnnIdx knn,
Tbx::Point3 v, float* wk)const
{
Tbx::Dual_quat_cu dqk_0;
Tbx::Dual_quat_cu dq(Tbx::Quat_cu(0,0,0,0), Tbx::Quat_cu(0,0,0,0));
// dqk_0
{
Tbx::Vec3 r, t;
get_twist(knn_k(knn, 0), r, t);
float4 nodeVw = get_nodesVw(knn_k(knn, 0));
Tbx::Vec3 nodesV(convert(read_float3_4(nodeVw)) - v);
dqk_0.from_twist(r, t);
wk[0] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w);
dq += dqk_0 * wk[0];
}
// other dqk_k
#pragma unroll
for (int k = 1; k < KnnK; k++)
{
if (knn_k(knn, k) >= nNodes)
break;
float4 nodeVw = get_nodesVw(knn_k(knn, k));
Tbx::Vec3 nodesV(convert(read_float3_4(nodeVw)) - v);
Tbx::Dual_quat_cu dqk_k;
Tbx::Vec3 r, t;
get_twist(knn_k(knn, k), r, t);
dqk_k.from_twist(r, t);
#ifdef ENABLE_ANTI_PODALITY
wk[k] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w)
*sign(dqk_0.get_non_dual_part().dot(dqk_k.get_non_dual_part()));
#else
wk[k] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w);
#endif
dq += dqk_k * wk[k];
}
return dq;
}
__device__ __forceinline__ void exchange_ri_k(KnnIdx knn,
const float* wk, int k, int i, Tbx::Dual_quat_cu& dq, float& inc)const
{
Tbx::Vec3 r, t;
get_twist(knn_k(knn, k), r, t);
Tbx::Dual_quat_cu old_dqk, new_dqk;
old_dqk.from_twist(r, t);
inc = get_numeric_inc(r[i]);
r[i] += inc;
new_dqk.from_twist(r, t);
dq -= old_dqk * wk[k];
dq += new_dqk * wk[k] * sign(old_dqk.get_non_dual_part().dot(new_dqk.get_non_dual_part()));
}
__device__ __forceinline__ void exchange_ti_k(KnnIdx knn,
const float* wk, int k, int i, Tbx::Dual_quat_cu& dq, float& inc)const
{
Tbx::Vec3 r, t;
get_twist(knn_k(knn, k), r, t);
Tbx::Dual_quat_cu old_dqk, new_dqk;
old_dqk.from_twist(r, t);
inc = get_numeric_inc(t[i]);
t[i] += inc;
new_dqk.from_twist(r, t);
dq -= old_dqk * wk[k];
dq += new_dqk * wk[k] * sign(old_dqk.get_non_dual_part().dot(new_dqk.get_non_dual_part()));
}
__device__ __forceinline__ float get_numeric_inc(float v) const
{
return max( 1e-5f, v* 1e-3f);
}
__device__ __forceinline__ void calc_dataterm_numeric() const
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= imgWidth || y >= imgHeight)
return;
const KnnIdx knn = vmapKnn(y, x);
Tbx::Point3 v(convert(read_float3_4(vmap_cano(y, x))));
Tbx::Vec3 n(convert(read_float3_4(nmap_cano(y, x))));
if (isnan(n.x) || isnan(v.x))
return;
// 1. get all nodes params
// 2. compute function=================================================
float wk[KnnK];
Tbx::Dual_quat_cu dq = calc_pixel_dq(knn, v, wk);
float norm_dq = dq.norm();
if (norm_dq < Tbx::Dual_quat_cu::epsilon())
return;
Tbx::Dual_quat_cu dq_not_normalized = dq;
dq = dq * (1.f / norm_dq); // normalize
// find corr
Tbx::Vec3 nwarp = Tlw*dq.rotate(n);
Tbx::Point3 vwarp = Tlw*dq.transform(v);
Tbx::Point3 vl;
//bool corr_found = search(convert(vwarp), convert(nwarp), vl);
bool corr_found = search(x, y, vl);
if (!corr_found)
return;
// the grad energy
const float f = nwarp.dot(vwarp - vl);
const float psi_f = data_term_penalty(f);
// 3. compute jacobi
for (int knnK = 0; knnK < KnnK; knnK++)
{
if (knn_k(knn, knnK) >= nNodes)
break;
float df[6];
// 3.0 p_r[0:2]
for (int i = 0; i < 3; i++)
{
float inc;
Tbx::Dual_quat_cu dq1 = dq_not_normalized;
exchange_ri_k(knn, wk, knnK, i, dq1, inc);
dq1 *= (1.f / dq1.norm());
nwarp = Tlw*dq1.rotate(n);
vwarp = Tlw*dq1.transform(v);
Tbx::Point3 vl1 = vl;
//corr_found = search(convert(vwarp), convert(nwarp), vl1);
//if (!corr_found)
// return;
float f1 = nwarp.dot(vwarp - vl1);
df[i] = (f1 - f) / inc;
}// i=0:3
// 3.1 p_t[0:2]
for (int i = 0; i < 3; i++)
{
float inc;
Tbx::Dual_quat_cu dq1 = dq_not_normalized;
exchange_ti_k(knn, wk, knnK, i, dq1, inc);
dq1 *= (1.f / dq1.norm());
nwarp = Tlw*dq1.rotate(n);
vwarp = Tlw*dq1.transform(v);
Tbx::Point3 vl1 = vl;
//corr_found = search(convert(vwarp), convert(nwarp), vl1);
//if (!corr_found)
// return;
float f1 = nwarp.dot(vwarp - vl1);
df[i+3] = (f1 - f) / inc;
}// i=0:3
//// reduce--------------------------------------------------
int shift = knn_k(knn, knnK) * VarPerNode2;
int shift_g = knn_k(knn, knnK) * VarPerNode;
for (int i = 0; i < VarPerNode; ++i)
{
#pragma unroll
for (int j = 0; j <= i; ++j)
atomicAdd(&Hd_[shift + j], df[i] * df[j]);
atomicAdd(&g_[shift_g + i], df[i] * psi_f);
shift += VarPerNode;
}// end for i
}// end for knnK
}// end function ()
__device__ __forceinline__ void calcTotalEnergy()const
{
const int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
const int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
Tbx::Point3 vl;
bool found_coresp = false;
if (x < imgWidth && y < imgHeight)
found_coresp = search(x, y, vl);
if (found_coresp)
{
Tbx::Point3 v(convert(read_float3_4(vmap_cano(y, x))));
Tbx::Vec3 n(convert(read_float3_4(nmap_cano(y, x))));
const KnnIdx knn = vmapKnn(y, x);
Tbx::Dual_quat_cu dq(Tbx::Quat_cu(0, 0, 0, 0), Tbx::Quat_cu(0, 0, 0, 0));
Tbx::Dual_quat_cu dqk_0;
float wk[KnnK];
// dqk_0
{
Tbx::Vec3 r, t;
get_twist(knn_k(knn, 0), r, t);
float4 nodeVw = get_nodesVw(knn_k(knn, 0));
Tbx::Vec3 nodesV(convert(read_float3_4(nodeVw)) - v);
dqk_0.from_twist(r, t);
float expIn = nodesV.dot(nodesV) * nodeVw.w * nodeVw.w;
wk[0] = __expf(-0.5f * expIn);
dq = dq + dqk_0 * wk[0];
}
// other dqk_k
#pragma unroll
for (int k = 1; k < KnnK; k++)
{
int knnNodeId = knn_k(knn, k);
if (knnNodeId >= nNodes)
break;
Tbx::Vec3 r, t;
get_twist(knnNodeId, r, t);
float4 nodeVw = get_nodesVw(knnNodeId);
Tbx::Vec3 nodesV(convert(read_float3_4(nodeVw)) - v);
Tbx::Dual_quat_cu dqk_k;
dqk_k.from_twist(r, t);
#ifdef ENABLE_ANTI_PODALITY
wk[k] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w)
*sign(dqk_0.get_non_dual_part().dot(dqk_k.get_non_dual_part()));
#else
wk[k] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w);
#endif
dq = dq + dqk_k * wk[k];
}
float norm_dq = dq.norm();
if (norm_dq < Tbx::Dual_quat_cu::epsilon())
return;
dq = dq * (1.f / norm_dq); // normalize
// the grad energy f
const float f = data_term_energy((Tlw*dq.rotate(n)).dot(Tlw*dq.transform(v) - vl));
//atomicAdd(totalEnergy, f);
totalEnergy[y*imgWidth + x] = f;
}//end if find corr
#ifdef DEBUG_ASSIGN_BIG_ENERGY_TO_NO_CORR
else // debug: add constant penalty
{
totalEnergy[y*imgWidth + x] = data_term_energy(psi_data);
}
#endif
}
};
__global__ void dataTermCombinedKernel(const DataTermCombined cs)
{
#ifdef CALC_DATA_TERM_NUMERIC
cs.calc_dataterm_numeric();
#else
cs.calc_dataterm();
#endif
}
void GpuGaussNewtonSolver::calcDataTerm()
{
DataTermCombined cs;
cs.angleThres = m_param->fusion_nonRigid_angleThreSin;
cs.distThres = m_param->fusion_nonRigid_distThre;
cs.Hd_ = m_Hd.value();
cs.g_ = m_g;
cs.imgHeight = m_vmap_cano->rows();
cs.imgWidth = m_vmap_cano->cols();
cs.intr = m_intr;
cs.nmap_cano = *m_nmap_cano;
cs.nmap_live = *m_nmap_live;
cs.nmap_warp = *m_nmap_warp;
cs.vmap_cano = *m_vmap_cano;
cs.vmap_live = *m_vmap_live;
cs.vmap_warp = *m_vmap_warp;
cs.vmapKnn = m_vmapKnn;
cs.nNodes = m_numNodes;
cs.Tlw = m_pWarpField->get_rigidTransform();
cs.Tlw_inv = m_pWarpField->get_rigidTransform().fast_invert();
cs.psi_data = m_param->fusion_psi_data;
//////////////////////////////
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid(1, 1, 1);
grid.x = divUp(cs.imgWidth, block.x);
grid.y = divUp(cs.imgHeight, block.y);
dataTermCombinedKernel<< <grid, block >> >(cs);
cudaSafeCall(hipGetLastError(), "dataTermCombinedKernel");
}
__global__ void calcDataTermTotalEnergyKernel(const DataTermCombined cs)
{
cs.calcTotalEnergy();
}
#pragma endregion
#pragma region --define sparse structure
__global__ void count_Jr_rows_kernel(int* rctptr, int nMaxNodes)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= nMaxNodes)
return;
KnnIdx knn = get_nodesKnn(i);
int numK = -1;
for (int k = 0; k < KnnK; ++k)
{
if (knn_k(knn, k) < nMaxNodes)
numK = k;
}
// each node generate 6*maxK rows
rctptr[i] = (numK + 1);
if (i == 0)
rctptr[nMaxNodes] = 0;
}
__global__ void compute_row_map_kernel(GpuGaussNewtonSolver::JrRow2NodeMapper* row2nodeId,
const int* rctptr, int nMaxNodes)
{
int iNode = threadIdx.x + blockIdx.x*blockDim.x;
if (iNode < nMaxNodes)
{
int row_b = rctptr[iNode];
int row_e = rctptr[iNode+1];
for (int r = row_b; r < row_e; r++)
{
GpuGaussNewtonSolver::JrRow2NodeMapper mp;
mp.nodeId = iNode;
mp.k = r - row_b;
mp.ixyz = 0;
row2nodeId[r] = mp;
}
}
}
__global__ void compute_Jr_rowPtr_kernel(
int* rptr, const GpuGaussNewtonSolver::JrRow2NodeMapper* row2nodeId,
int nMaxNodes, int nBlockRows)
{
enum{
BlocksPerRow = 2
};
const int iBlockRow = threadIdx.x + blockIdx.x*blockDim.x;
if (iBlockRow >= nBlockRows)
return;
const int iNode = row2nodeId[iBlockRow].nodeId;
if (iNode < nMaxNodes)
{
KnnIdx knn = get_nodesKnn(iNode);
if (knn_k(knn, row2nodeId[iBlockRow].k) < nMaxNodes)
rptr[iBlockRow] = iBlockRow * BlocksPerRow;
}
// the 1st thread also write the last value
if (iBlockRow == 0)
rptr[nBlockRows] = nBlockRows * BlocksPerRow;
}
__global__ void compute_Jr_colIdx_kernel(
int* colIdx, const GpuGaussNewtonSolver::JrRow2NodeMapper* row2nodeId,
int nMaxNodes, int nBlockRows)
{
enum{
ColPerRow = 2
};
const int iBlockRow = threadIdx.x + blockIdx.x*blockDim.x;
if (iBlockRow >= nBlockRows)
return;
const int iNode = row2nodeId[iBlockRow].nodeId;
if (iNode < nMaxNodes)
{
KnnIdx knn = get_nodesKnn(iNode);
int knnNodeId = knn_k(knn, row2nodeId[iBlockRow].k);
if (knnNodeId < nMaxNodes)
{
int col_b = iBlockRow*ColPerRow;
// each row 2 blocks
// 1. self
colIdx[col_b] = iNode;
// 2. neighbor
colIdx[col_b + 1] = knnNodeId;
}// end if knnNodeId
}
}
__global__ void calc_B_cidx_kernel(int* B_cidx,
const int* B_rptr, int nBlockInRows, int nMaxNodes, int nLv0Nodes)
{
int iBlockRow = threadIdx.x + blockIdx.x*blockDim.x;
if (iBlockRow < nBlockInRows)
{
KnnIdx knn = get_nodesKnn(iBlockRow);
int col_b = B_rptr[iBlockRow];
for (int k = 0; k < KnnK; ++k)
{
int knnNodeId = knn_k(knn, k);
if (knnNodeId < nMaxNodes)
B_cidx[col_b++] = knnNodeId-nLv0Nodes;
}
}
}
void GpuGaussNewtonSolver::initSparseStructure()
{
// 1. compute Jr structure ==============================================
// 1.0. decide the total rows we have for each nodes
{
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_numNodes, block.x));
count_Jr_rows_kernel << <grid, block >> >(m_Jr_RowCounter.ptr(), m_numNodes);
cudaSafeCall(hipGetLastError(), "GpuGaussNewtonSolver::initSparseStructure::count_Jr_rows_kernel");
thrust_wrapper::exclusive_scan(m_Jr_RowCounter.ptr(), m_Jr_RowCounter.ptr(), m_numNodes + 1);
int jrRows = 0;
cudaSafeCall(hipMemcpy(&jrRows, m_Jr_RowCounter.ptr() + m_numNodes,
sizeof(int), hipMemcpyDeviceToHost), "copy Jr rows to host");
m_Jr->resize(jrRows, m_numNodes, RowPerNode_RegTerm, VarPerNode);
}
// 1.1. collect nodes edges info:
// each low-level nodes are connected to k higher level nodes
// but the connections are not stored for the higher level nodes
// thus when processing each node, we add 2*k edges, w.r.t. 2*k*3 rows: each (x,y,z) a row
// for each row, there are exactly 2*VarPerNode values
// after this step, we can get the CSR/COO structure
if (m_Jr->rows() > 0)
{
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_numNodes, block.x));
compute_row_map_kernel << <grid, block >> >(m_Jr_RowMap2NodeId.ptr(), m_Jr_RowCounter.ptr(), m_numNodes);
cudaSafeCall(hipGetLastError(), "GpuGaussNewtonSolver::initSparseStructure::compute_row_map_kernel");
}
if (m_Jr->rows() > 0)
{
m_Jr->beginConstructRowPtr();
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_Jr->blocksInRow(), block.x));
compute_Jr_rowPtr_kernel << <grid, block >> >(m_Jr->bsrRowPtr(),
m_Jr_RowMap2NodeId.ptr(), m_numNodes, m_Jr->blocksInRow());
cudaSafeCall(hipGetLastError(), "GpuGaussNewtonSolver::initSparseStructure::compute_Jr_rowPtr_kernel");
m_Jr->endConstructRowPtr();
compute_Jr_colIdx_kernel << <grid, block >> >(m_Jr->bsrColIdx(),
m_Jr_RowMap2NodeId.ptr(), m_numNodes, m_Jr->blocksInRow());
cudaSafeCall(hipGetLastError(), "GpuGaussNewtonSolver::initSparseStructure::compute_Jr_colIdx_kernel");
}
// 2. compute Jrt structure ==============================================
// 2.1. fill (row, col) as (col, row) from Jr and sort.
m_Jr->transposeStructureTo(*m_Jrt);
m_Jrt->subRows_structure(*m_Jrt13_structure, m_numLv0Nodes, m_numNodes);
m_Jrt13_structure->transposeStructureTo(*m_Jr13_structure);
m_Jrt13_structure->multBsr_structure(*m_Jr13_structure, *m_Hr);
// 3. compute B structure ==============================================
// 3.1 the row ptr of B is the same CSR info with the first L0 rows of Jrt.
m_B->resize(m_numLv0Nodes, m_Jr->blocksInCol() - m_numLv0Nodes, VarPerNode, VarPerNode);
m_B->setRowFromBsrRowPtr(m_Jrt->bsrRowPtr());
// 3.2 the col-idx of B
if (m_B->rows() > 0)
{
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_B->blocksInRow(), block.x));
calc_B_cidx_kernel << <grid, block >> >(
m_B->bsrColIdx(), m_B->bsrRowPtr(), m_B->blocksInRow(), m_numNodes, m_numLv0Nodes);
cudaSafeCall(hipGetLastError(), "GpuGaussNewtonSolver::initSparseStructure::calc_B_cidx_kernel");
}
// 3.3 sort to compute Bt
m_B->transposeStructureTo(*m_Bt);
m_Hd.resize(m_numLv0Nodes, VarPerNode);
m_Hd_Linv.resize(m_numLv0Nodes, VarPerNode);
m_Hd_LLtinv.resize(m_numLv0Nodes, VarPerNode);
m_Bt->rightMultDiag_structure(m_Hd_Linv, *m_Bt_Ltinv);
// 4. single level Hessian
if (m_param->graph_single_level)
{
m_Jrt->multBsr_structure(*m_Jr, *m_H_singleLevel);
m_singleLevel_solver->analysis(m_H_singleLevel, true);
}
else
{
// sovle Q on CPU, prepare for it
m_Bt->multBsr_structure(*m_B, *m_Q, m_Hr);
m_singleLevel_solver->analysis(m_Q, true);
}
}
#pragma endregion
#pragma region --calc reg term
struct RegTermJacobi
{
typedef GpuGaussNewtonSolver::JrRow2NodeMapper Mapper;
enum
{
VarPerNode = GpuGaussNewtonSolver::VarPerNode,
VarPerNode2 = VarPerNode*VarPerNode,
ColPerRow = VarPerNode * 2
};
int nNodes;
int nBlockRows;
const Mapper* rows2nodeIds;
const int* rptr;
mutable float* vptr;
mutable float* fptr;
int nNodesEachLevel[WarpField::GraphLevelNum];
float dw_scale_each_level;
float dw_softness;
float psi_reg;
float lambda;
float* totalEnergy;
__device__ __forceinline__ int getNodeLevel(int nodeId)const
{
for (int k = 0; k < WarpField::GraphLevelNum; k++)
if (nodeId < nNodesEachLevel[k])
return k;
return WarpField::GraphLevelNum;
}
__device__ __forceinline__ float calc_alpha_reg(int nodeId, int k, int nMaxNodes)const
{
KnnIdx knn = get_nodesKnn(nodeId);
float4 nodeVwi = get_nodesVw(nodeId);
Tbx::Point3 vi(convert(read_float3_4(nodeVwi)));
float4 nodeVwj = get_nodesVw(knn_k(knn, k));
float invW = min(nodeVwi.w, nodeVwj.w);
float wk = 0.f, sum_w = 0.f;
for (int knn_idx = 0; knn_idx < KnnK; knn_idx++)
{
if (knn_idx < nMaxNodes)
{
float4 nodeVwj = get_nodesVw(knn_k(knn, knn_idx));
Tbx::Point3 vj(convert(read_float3_4(nodeVwj)));
float w = __expf(-dw_softness * (vi - vj).dot(vi - vj) * invW * invW);
sum_w += w;
if (knn_idx == k)
wk = w;
}
}
// if all neighbors are too far to give valid weightings,
// we just take an average.
if (sum_w < 1e-6f)
wk = 0.25f;
else
wk /= sum_w;
return wk * __powf(dw_scale_each_level, getNodeLevel(nodeId));
}
__device__ __forceinline__ Tbx::Dual_quat_cu p_qk_p_alpha_func(Tbx::Dual_quat_cu dq, int i)const
{
Tbx::Vec3 t, r;
float b, c, n;
Tbx::Quat_cu q0(0, 0, 0, 0), q1 = dq.get_non_dual_part();
switch (i)
{
case 0:
dq.to_twist(r, t);
n = r.norm();
if (n > Tbx::Dual_quat_cu::epsilon())
{
b = sin(n) / n;
c = (cos(n) - b) / (n*n);
q0.coeff0 = -r.x * b;
q0.coeff1 = b + r.x*r.x*c;
q0.coeff2 = r.x*r.y*c;
q0.coeff3 = r.x*r.z*c;
}
else
{
q0.coeff0 = 0;
q0.coeff1 = 1;
q0.coeff2 = 0;
q0.coeff3 = 0;
}
q1.coeff0 = (t.x * q0.coeff1 + t.y * q0.coeff2 + t.z * q0.coeff3) * (-0.5);
q1.coeff1 = (t.x * q0.coeff0 + t.y * q0.coeff3 - t.z * q0.coeff2) * 0.5;
q1.coeff2 = (-t.x * q0.coeff3 + t.y * q0.coeff0 + t.z * q0.coeff1) * 0.5;
q1.coeff3 = (t.x * q0.coeff2 - t.y * q0.coeff1 + t.z * q0.coeff0) * 0.5;
return Tbx::Dual_quat_cu(q0, q1);
case 1:
dq.to_twist(r, t);
n = r.norm();
if (n > Tbx::Dual_quat_cu::epsilon())
{
b = sin(n) / n;
c = (cos(n) - b) / (n*n);
q0.coeff0 = -r.y * b;
q0.coeff1 = r.y*r.x*c;
q0.coeff2 = b + r.y*r.y*c;
q0.coeff3 = r.y*r.z*c;
}
else
{
q0.coeff0 = 0;
q0.coeff1 = 0;
q0.coeff2 = 1;
q0.coeff3 = 0;
}
q1.coeff0 = (t.x * q0.coeff1 + t.y * q0.coeff2 + t.z * q0.coeff3) * (-0.5);
q1.coeff1 = (t.x * q0.coeff0 + t.y * q0.coeff3 - t.z * q0.coeff2) * 0.5;
q1.coeff2 = (-t.x * q0.coeff3 + t.y * q0.coeff0 + t.z * q0.coeff1) * 0.5;
q1.coeff3 = (t.x * q0.coeff2 - t.y * q0.coeff1 + t.z * q0.coeff0) * 0.5;
return Tbx::Dual_quat_cu(q0, q1);
case 2:
dq.to_twist(r, t);
n = r.norm();
if (n > Tbx::Dual_quat_cu::epsilon())
{
b = sin(n) / n;
c = (cos(n) - b) / (n*n);
q0.coeff0 = -r.z * b;
q0.coeff1 = r.z*r.x*c;
q0.coeff2 = r.z*r.y*c;
q0.coeff3 = b + r.z*r.z*c;
}
else
{
q0.coeff0 = 0;
q0.coeff1 = 0;
q0.coeff2 = 0;
q0.coeff3 = 1;
}
q1.coeff0 = (t.x * q0.coeff1 + t.y * q0.coeff2 + t.z * q0.coeff3) * (-0.5);
q1.coeff1 = (t.x * q0.coeff0 + t.y * q0.coeff3 - t.z * q0.coeff2) * 0.5;
q1.coeff2 = (-t.x * q0.coeff3 + t.y * q0.coeff0 + t.z * q0.coeff1) * 0.5;
q1.coeff3 = (t.x * q0.coeff2 - t.y * q0.coeff1 + t.z * q0.coeff0) * 0.5;
return Tbx::Dual_quat_cu(q0, q1);
case 3:
return Tbx::Dual_quat_cu(q0, Tbx::Quat_cu(-q1.coeff1, q1.coeff0, -q1.coeff3, q1.coeff2))*0.5;
case 4:
return Tbx::Dual_quat_cu(q0, Tbx::Quat_cu(-q1.coeff2, q1.coeff3, q1.coeff0, -q1.coeff1))*0.5;
case 5:
return Tbx::Dual_quat_cu(q0, Tbx::Quat_cu(-q1.coeff3, -q1.coeff2, q1.coeff1, q1.coeff0))*0.5;
default:
return Tbx::Dual_quat_cu();
}
}
__device__ __forceinline__ float reg_term_energy(Tbx::Vec3 f)const
{
#ifdef USE_L2_NORM_REG_TERM
return 0.5f*f.dot(f);
#else
// the robust Huber penelty gradient
float s = 0;
float norm = f.norm();
if (norm < psi_reg)
s = norm * norm * 0.5f;
else
s = psi_reg*(norm - psi_reg*0.5f);
return s;
#endif
}
__device__ __forceinline__ Tbx::Vec3 reg_term_penalty(Tbx::Vec3 f)const
{
#ifdef USE_L2_NORM_REG_TERM
return f;
#else
// the robust Huber penelty gradient
Tbx::Vec3 df;
float norm = f.norm();
if (norm < psi_reg)
df = f;
else
for (int k = 0; k < 3; k++)
df[k] = f[k]*psi_reg / norm;
return df;
#endif
}
__device__ __forceinline__ Tbx::Transfo p_SE3_p_alpha_func(Tbx::Dual_quat_cu dq, int i)const
{
Tbx::Transfo T = Tbx::Transfo::empty();
Tbx::Dual_quat_cu p_dq_p_alphai = p_qk_p_alpha_func(dq, i) * 2.f;
//// evaluate p_dqi_p_alphak, heavily hard code here
//// this hard code is crucial to the performance
// 0:
// (0, -z0, y0, x1,
// z0, 0, -x0, y1,
//-y0, x0, 0, z1,
// 0, 0, 0, 0) * 2;
float p_dqi_p_alphak = p_dq_p_alphai[0];
T[1] += -dq[3] * p_dqi_p_alphak;
T[2] += dq[2] * p_dqi_p_alphak;
T[3] += dq[5] * p_dqi_p_alphak;
T[4] += dq[3] * p_dqi_p_alphak;
T[6] += -dq[1] * p_dqi_p_alphak;
T[7] += dq[6] * p_dqi_p_alphak;
T[8] += -dq[2] * p_dqi_p_alphak;
T[9] += dq[1] * p_dqi_p_alphak;
T[11] += dq[7] * p_dqi_p_alphak;
// 1
//( 0, y0, z0, -w1,
// y0, -2 * x0, -w0, -z1,
// z0, w0, -2 * x0, y1,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[1];
T[1] += dq[2] * p_dqi_p_alphak;
T[2] += dq[3] * p_dqi_p_alphak;
T[3] += -dq[4] * p_dqi_p_alphak;
T[4] += dq[2] * p_dqi_p_alphak;
T[5] += -dq[1] * p_dqi_p_alphak * 2;
T[6] += -dq[0] * p_dqi_p_alphak;
T[7] += -dq[7] * p_dqi_p_alphak;
T[8] += dq[3] * p_dqi_p_alphak;
T[9] += dq[0] * p_dqi_p_alphak;
T[10] += -dq[1] * p_dqi_p_alphak * 2;
T[11] += dq[6] * p_dqi_p_alphak;
// 2.
// (-2 * y0, x0, w0, z1,
// x0, 0, z0, -w1,
// -w0, z0, -2 * y0, -x1,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[2];
T[0] += -dq[2] * p_dqi_p_alphak * 2;
T[1] += dq[1] * p_dqi_p_alphak;
T[2] += dq[0] * p_dqi_p_alphak;
T[3] += dq[7] * p_dqi_p_alphak;
T[4] += dq[1] * p_dqi_p_alphak;
T[6] += dq[3] * p_dqi_p_alphak;
T[7] += -dq[4] * p_dqi_p_alphak;
T[8] += -dq[0] * p_dqi_p_alphak;
T[9] += dq[3] * p_dqi_p_alphak;
T[10] += -dq[2] * p_dqi_p_alphak * 2;
T[11] += -dq[5] * p_dqi_p_alphak;
// 3.
// (-2 * z0, -w0, x0, -y1,
// w0, -2 * z0, y0, x1,
// x0, y0, 0, -w1,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[3];
T[0] += -dq[3] * p_dqi_p_alphak * 2;
T[1] += -dq[0] * p_dqi_p_alphak;
T[2] += dq[1] * p_dqi_p_alphak;
T[3] += -dq[6] * p_dqi_p_alphak;
T[4] += dq[0] * p_dqi_p_alphak;
T[5] += -dq[3] * p_dqi_p_alphak * 2;
T[6] += dq[2] * p_dqi_p_alphak;
T[7] += dq[5] * p_dqi_p_alphak;
T[8] += dq[1] * p_dqi_p_alphak;
T[9] += dq[2] * p_dqi_p_alphak;
T[11] += -dq[4] * p_dqi_p_alphak;
// 4.
//( 0, 0, 0, -x0,
// 0, 0, 0, -y0,
// 0, 0, 0, -z0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[4];
T[3] += -dq[1] * p_dqi_p_alphak;
T[7] += -dq[2] * p_dqi_p_alphak;
T[11] += -dq[3] * p_dqi_p_alphak;
// 5.
// (0, 0, 0, w0,
// 0, 0, 0, z0,
// 0, 0, 0, -y0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[5];
T[3] += dq[0] * p_dqi_p_alphak;
T[7] += dq[3] * p_dqi_p_alphak;
T[11] += -dq[2] * p_dqi_p_alphak;
// 6.
// (0, 0, 0, -z0,
// 0, 0, 0, w0,
// 0, 0, 0, x0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[6];
T[3] += -dq[3] * p_dqi_p_alphak;
T[7] += dq[0] * p_dqi_p_alphak;
T[11] += dq[1] * p_dqi_p_alphak;
// 7.
// (0, 0, 0, y0,
// 0, 0, 0, -x0,
// 0, 0, 0, w0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[7];
T[3] += dq[2] * p_dqi_p_alphak;
T[7] += -dq[1] * p_dqi_p_alphak;
T[11] += dq[0] * p_dqi_p_alphak;
return T;
}
__device__ __forceinline__ void operator () () const
{
const int iBlockRow = threadIdx.x + blockIdx.x * blockDim.x;
if (iBlockRow >= nBlockRows)
return;
Mapper mapper = rows2nodeIds[iBlockRow];
int knnNodeId = knn_k(get_nodesKnn(mapper.nodeId), mapper.k);
if (knnNodeId >= nNodes)
return;
Tbx::Dual_quat_cu dqi, dqj;
Tbx::Vec3 ri, ti, rj, tj;
get_twist(mapper.nodeId, ri, ti);
get_twist(knnNodeId, rj, tj);
dqi.from_twist(ri, ti);
dqj.from_twist(rj, tj);
float4 nodeVwi = get_nodesVw(mapper.nodeId);
float4 nodeVwj = get_nodesVw(knnNodeId);
Tbx::Point3 vi(convert(read_float3_4(nodeVwi)));
Tbx::Point3 vj(convert(read_float3_4(nodeVwj)));
float alpha_ij = calc_alpha_reg(mapper.nodeId, mapper.k, nNodes);
float ww = sqrt(lambda * alpha_ij);
//if (isinf(nodeVwj.w))
// printf("inf found: %d %d %f %f %f %f\n", mapper.nodeId, knnNodeId,
// nodeVwj.w, 1.f / nodeVwj.w, alpha_ij, ww);
// energy=============================================
Tbx::Vec3 val = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
val = reg_term_penalty(val);
const int iRow = iBlockRow * RowPerNode_RegTerm;
fptr[iRow + 0] = val.x * ww;
fptr[iRow + 1] = val.y * ww;
fptr[iRow + 2] = val.z * ww;
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
Tbx::Vec3 val1 = dqj.transform(Tbx::Point3(vi)) - dqi.transform(Tbx::Point3(vi));
val1 = reg_term_penalty(val1);
fptr[iRow + 3] = val1.x * ww;
fptr[iRow + 4] = val1.y * ww;
fptr[iRow + 5] = val1.z * ww;
#endif
// jacobi=============================================
int cooPos0 = rptr[iBlockRow] * RowPerNode_RegTerm * VarPerNode;
int cooPos1 = cooPos0 + RowPerNode_RegTerm * VarPerNode;
for (int ialpha = 0; ialpha < VarPerNode; ialpha++)
{
Tbx::Transfo p_Ti_p_alpha = p_SE3_p_alpha_func(dqi, ialpha);
Tbx::Transfo p_Tj_p_alpha = p_SE3_p_alpha_func(dqj, ialpha);
// partial_psi_partial_alpha
Tbx::Vec3 p_psi_p_alphai_j = (p_Ti_p_alpha * vj) * ww;
Tbx::Vec3 p_psi_p_alphaj_j = (p_Tj_p_alpha * vj) * (-ww);
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
Tbx::Vec3 p_psi_p_alphai_i = (p_Ti_p_alpha * vi) * (-ww);
Tbx::Vec3 p_psi_p_alphaj_i = (p_Tj_p_alpha * vi) * ww;
#endif
for (int ixyz = 0; ixyz < 3; ixyz++)
{
vptr[cooPos0 + ixyz*VarPerNode + ialpha] = p_psi_p_alphai_j[ixyz];
vptr[cooPos1 + ixyz*VarPerNode + ialpha] = p_psi_p_alphaj_j[ixyz];
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
vptr[cooPos0 + (3 + ixyz)*VarPerNode + ialpha] = p_psi_p_alphai_i[ixyz];
vptr[cooPos1 + (3 + ixyz)*VarPerNode + ialpha] = p_psi_p_alphaj_i[ixyz];
#endif
}
}// end for ialpha
}// end function ()
__device__ __forceinline__ float get_numeric_inc(float v) const
{
return max(1e-5f, v* 1e-3f);
}
__device__ __forceinline__ void calc_reg_numeric () const
{
const int iBlockRow = threadIdx.x + blockIdx.x * blockDim.x;
if (iBlockRow >= nBlockRows)
return;
Mapper mapper = rows2nodeIds[iBlockRow];
int knnNodeId = knn_k(get_nodesKnn(mapper.nodeId), mapper.k);
if (knnNodeId >= nNodes)
return;
Tbx::Dual_quat_cu dqi, dqj;
Tbx::Vec3 ri, ti, rj, tj;
get_twist(mapper.nodeId, ri, ti);
get_twist(knnNodeId, rj, tj);
dqi.from_twist(ri, ti);
dqj.from_twist(rj, tj);
float4 nodeVwi = get_nodesVw(mapper.nodeId);
float4 nodeVwj = get_nodesVw(knnNodeId);
Tbx::Point3 vi(convert(read_float3_4(nodeVwi)));
Tbx::Point3 vj(convert(read_float3_4(nodeVwj)));
float alpha_ij = calc_alpha_reg(mapper.nodeId, mapper.k, nNodes);
float ww = sqrt(lambda * alpha_ij);
// energy=============================================
Tbx::Vec3 val_j = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
Tbx::Vec3 psi_val_j = reg_term_penalty(val_j);
const int iRow = iBlockRow * RowPerNode_RegTerm;
fptr[iRow + 0] = psi_val_j.x * ww;
fptr[iRow + 1] = psi_val_j.y * ww;
fptr[iRow + 2] = psi_val_j.z * ww;
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
Tbx::Vec3 val_i = dqj.transform(Tbx::Point3(vi)) - dqi.transform(Tbx::Point3(vi));
Tbx::Vec3 psi_val_i = reg_term_penalty(val_i);
fptr[iRow + 3] = psi_val_i.x * ww;
fptr[iRow + 4] = psi_val_i.y * ww;
fptr[iRow + 5] = psi_val_i.z * ww;
#endif
// jacobi=============================================
int cooPos0 = rptr[iBlockRow] * RowPerNode_RegTerm * VarPerNode;
int cooPos1 = cooPos0 + RowPerNode_RegTerm * VarPerNode;
for (int ialpha = 0; ialpha < 3; ialpha++)
{
float inci = get_numeric_inc(ri[ialpha]);
ri[ialpha] += inci;
dqi.from_twist(ri, ti);
Tbx::Vec3 val_j_inci = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
Tbx::Vec3 val_i_inci = dqj.transform(Tbx::Point3(vi)) - dqi.transform(Tbx::Point3(vi));
ri[ialpha] -= inci;
dqi.from_twist(ri, ti);
float incj = get_numeric_inc(rj[ialpha]);
rj[ialpha] += incj;
dqj.from_twist(rj, tj);
Tbx::Vec3 val_j_incj = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
Tbx::Vec3 val_i_incj = dqj.transform(Tbx::Point3(vi)) - dqi.transform(Tbx::Point3(vi));
rj[ialpha] -= incj;
dqj.from_twist(rj, tj);
for (int ixyz = 0; ixyz < 3; ixyz++)
{
vptr[cooPos0 + ixyz*VarPerNode + ialpha] = ww * (val_j_inci[ixyz] - val_j[ixyz]) / inci;
vptr[cooPos1 + ixyz*VarPerNode + ialpha] = ww * (val_j_incj[ixyz] - val_j[ixyz]) / incj;
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
vptr[cooPos0 + (3 + ixyz)*VarPerNode + ialpha] = ww * (val_i_inci[ixyz] - val_i[ixyz]) / inci;
vptr[cooPos1 + (3 + ixyz)*VarPerNode + ialpha] = ww * (val_i_incj[ixyz] - val_i[ixyz]) / incj;
#endif
}
}// end for ialpha
cooPos0 += 3;
cooPos1 += 3;
for (int ialpha = 0; ialpha < 3; ialpha++)
{
float inci = get_numeric_inc(ti[ialpha]);
ti[ialpha] += inci;
dqi.from_twist(ri, ti);
Tbx::Vec3 val_j_inci = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
Tbx::Vec3 val_i_inci = dqj.transform(Tbx::Point3(vi)) - dqi.transform(Tbx::Point3(vi));
ti[ialpha] -= inci;
dqi.from_twist(ri, ti);
float incj = get_numeric_inc(tj[ialpha]);
tj[ialpha] += incj;
dqj.from_twist(rj, tj);
Tbx::Vec3 val_j_incj = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
Tbx::Vec3 val_i_incj = dqj.transform(Tbx::Point3(vi)) - dqi.transform(Tbx::Point3(vi));
tj[ialpha] -= incj;
dqj.from_twist(rj, tj);
for (int ixyz = 0; ixyz < 3; ixyz++)
{
vptr[cooPos0 + ixyz*VarPerNode + ialpha] = ww * (val_j_inci[ixyz] - val_j[ixyz]) / inci;
vptr[cooPos1 + ixyz*VarPerNode + ialpha] = ww * (val_j_incj[ixyz] - val_j[ixyz]) / incj;
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
vptr[cooPos0 + (3 + ixyz)*VarPerNode + ialpha] = ww * (val_i_inci[ixyz] - val_i[ixyz]) / inci;
vptr[cooPos1 + (3 + ixyz)*VarPerNode + ialpha] = ww * (val_i_incj[ixyz] - val_i[ixyz]) / incj;
#endif
}
}// end for ialpha
}// end function ()
__device__ __forceinline__ void calcTotalEnergy () const
{
const int iNode = threadIdx.x + blockIdx.x * blockDim.x;
if (iNode >= nBlockRows)
return;
Mapper mapper = rows2nodeIds[iNode];
int knnNodeId = knn_k(get_nodesKnn(mapper.nodeId), mapper.k);
if (knnNodeId >= nNodes)
return;
Tbx::Dual_quat_cu dqi, dqj;
Tbx::Vec3 ri, ti, rj, tj;
get_twist(mapper.nodeId, ri, ti);
get_twist(knnNodeId, rj, tj);
dqi.from_twist(ri, ti);
dqj.from_twist(rj, tj);
float4 nodeVwi = get_nodesVw(mapper.nodeId);
float4 nodeVwj = get_nodesVw(knnNodeId);
Tbx::Point3 vi(convert(read_float3_4(nodeVwi)));
Tbx::Point3 vj(convert(read_float3_4(nodeVwj)));
float alpha_ij = max(1.f / nodeVwi.w, 1.f / nodeVwj.w);
float ww2 = lambda * calc_alpha_reg(mapper.nodeId, mapper.k, nNodes);
// energy=============================================
Tbx::Vec3 val = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
float eg = ww2 * reg_term_energy(val);
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
Tbx::Vec3 val1 = dqi.transform(Tbx::Point3(vi)) - dqj.transform(Tbx::Point3(vi));
eg += ww2 * reg_term_energy(val1);
#endif
//atomicAdd(totalEnergy, eg);
totalEnergy[iNode] = eg;
}
};
__global__ void calcRegTerm_kernel(RegTermJacobi rj)
{
#ifdef CALC_REG_TERM_NUMERIC
rj.calc_reg_numeric();
#else
rj();
#endif
}
__global__ void calcRegTermTotalEnergy_kernel(RegTermJacobi rj)
{
rj.calcTotalEnergy();
}
void GpuGaussNewtonSolver::calcRegTerm()
{
if (m_Jr->rows() > 0)
{
CHECK_LE(m_Jr->rows(), m_f_r.size());
RegTermJacobi rj;
rj.lambda = m_param->fusion_lambda;
rj.nNodes = m_numNodes;
rj.nBlockRows = m_Jr->blocksInRow();
rj.psi_reg = m_param->fusion_psi_reg;
rj.rows2nodeIds = m_Jr_RowMap2NodeId;
rj.rptr = m_Jr->bsrRowPtr();
rj.vptr = m_Jr->value();
rj.fptr = m_f_r.ptr();
for (int k = 0; k < WarpField::GraphLevelNum; k++)
rj.nNodesEachLevel[k] = m_pWarpField->getNumNodesInLevel(k);
for (int k = 1; k < WarpField::GraphLevelNum; k++)
rj.nNodesEachLevel[k] += rj.nNodesEachLevel[k-1];
rj.dw_scale_each_level = m_param->warp_param_dw_lvup_scale;
rj.dw_softness = m_param->warp_param_softness;
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_Jr->rows() / RowPerNode_RegTerm, block.x));
calcRegTerm_kernel << <grid, block >> >(rj);
cudaSafeCall(hipGetLastError(), "calcRegTerm_kernel");
// 2. compute Jrt ==============================================
// 2.1. fill (row, col) as (col, row) from Jr and sort.
m_Jr->transposeValueTo(*m_Jrt);
}
}
#pragma endregion
#pragma region --calcTotalEnergy
float GpuGaussNewtonSolver::calcTotalEnergy(float& data_energy, float& reg_energy)
{
float total_energy = 0.f;
hipMemset(m_energy_vec.ptr(), 0, m_energy_vec.sizeBytes());
{
DataTermCombined cs;
cs.angleThres = m_param->fusion_nonRigid_angleThreSin;
cs.distThres = m_param->fusion_nonRigid_distThre;
cs.Hd_ = m_Hd.value();
cs.g_ = m_g;
cs.imgHeight = m_vmap_cano->rows();
cs.imgWidth = m_vmap_cano->cols();
cs.intr = m_intr;
cs.nmap_cano = *m_nmap_cano;
cs.nmap_live = *m_nmap_live;
cs.nmap_warp = *m_nmap_warp;
cs.vmap_cano = *m_vmap_cano;
cs.vmap_live = *m_vmap_live;
cs.vmap_warp = *m_vmap_warp;
cs.vmapKnn = m_vmapKnn;
cs.nNodes = m_numNodes;
cs.Tlw = m_pWarpField->get_rigidTransform();
cs.Tlw_inv = m_pWarpField->get_rigidTransform().fast_invert();
cs.psi_data = m_param->fusion_psi_data;
cs.totalEnergy = m_energy_vec.ptr();
//int zero_mem_symbol = 0;
//hipMemcpyToSymbol(g_totalEnergy, &zero_mem_symbol, sizeof(int));
//hipMemset(&m_tmpvec[0], 0, sizeof(float));
// 1. data term
//////////////////////////////
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid(1, 1, 1);
grid.x = divUp(cs.imgWidth, block.x);
grid.y = divUp(cs.imgHeight, block.y);
calcDataTermTotalEnergyKernel << <grid, block >> >(cs);
cudaSafeCall(hipGetLastError(), "calcDataTermTotalEnergyKernel");
}
if (m_Jr->rows() > 0)
{
RegTermJacobi rj;
rj.lambda = m_param->fusion_lambda;
rj.nNodes = m_numNodes;
rj.nBlockRows = m_Jr->blocksInRow();
rj.psi_reg = m_param->fusion_psi_reg;
rj.rows2nodeIds = m_Jr_RowMap2NodeId;
rj.rptr = m_Jr->bsrRowPtr();
rj.vptr = m_Jr->value();
rj.fptr = m_f_r.ptr();
rj.totalEnergy = m_energy_vec.ptr() + m_vmapKnn.rows()*m_vmapKnn.cols();
for (int k = 0; k < WarpField::GraphLevelNum; k++)
rj.nNodesEachLevel[k] = m_pWarpField->getNumNodesInLevel(k);
for (int k = 1; k < WarpField::GraphLevelNum; k++)
rj.nNodesEachLevel[k] += rj.nNodesEachLevel[k - 1];
rj.dw_scale_each_level = m_param->warp_param_dw_lvup_scale;
rj.dw_softness = m_param->warp_param_softness;
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_Jr->rows() / RowPerNode_RegTerm, block.x));
calcRegTermTotalEnergy_kernel << <grid, block >> >(rj);
cudaSafeCall(hipGetLastError(), "calcRegTermTotalEnergy_kernel");
}
//cudaSafeCall(hipMemcpy(&total_energy,
// &m_tmpvec[0], sizeof(float), hipMemcpyDeviceToHost), "copy reg totalEnergy to host");
hipblasStatus_t st = hipblasSasum(m_cublasHandle, m_Jr->rows() / RowPerNode_RegTerm +
m_vmapKnn.rows()*m_vmapKnn.cols(),
m_energy_vec.ptr(), 1, &total_energy);
if (st != HIPBLAS_STATUS_SUCCESS)
throw std::exception("cublass error, in hipblasSnrm2");
// debug get both data and reg term energy
#if 1
reg_energy = 0.f;
if (m_Jr->rows() > 0)
{
hipblasSasum(m_cublasHandle, m_Jr->rows() / RowPerNode_RegTerm,
m_energy_vec.ptr() + m_vmapKnn.rows()*m_vmapKnn.cols(),
1, ®_energy);
}
data_energy = total_energy - reg_energy;
#endif
return total_energy;
}
#pragma endregion
#pragma region --update twist
__global__ void updateTwist_inch_kernel(float* twist, const float* h, float step, int nNodes)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < nNodes)
{
int i6 = i * 6;
Tbx::Vec3 r(twist[i6] + step*h[i6], twist[i6 + 1] + step*h[i6 + 1], twist[i6 + 2] + step*h[i6 + 2]);
Tbx::Vec3 t(twist[i6+3] + step*h[i6+3], twist[i6 + 4] + step*h[i6 + 4], twist[i6 + 5] + step*h[i6 + 5]);
Tbx::Dual_quat_cu dq;
dq.from_twist(r, t);
dq.to_twist(r, t);
twist[i6] = r[0];
twist[i6 + 1] = r[1];
twist[i6 + 2] = r[2];
twist[i6 + 3] = t[0];
twist[i6 + 4] = t[1];
twist[i6 + 5] = t[2];
}
}
void GpuGaussNewtonSolver::updateTwist_inch(const float* h, float step)
{
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_numNodes, block.x));
updateTwist_inch_kernel << <grid, block >> >(m_twist.ptr(), h, step, m_numNodes);
cudaSafeCall(hipGetLastError(), "updateTwist_inch_kernel");
}
#pragma endregion
#pragma region --factor out rigid
__device__ float _g_common_q[8];
template<int CTA_SIZE_, typename T>
static __device__ __forceinline__ void reduce(volatile T* buffer)
{
int tid = Block::flattenedThreadId();
T val = buffer[tid];
if (CTA_SIZE_ >= 1024) { if (tid < 512) buffer[tid] = val = val + buffer[tid + 512]; __syncthreads(); }
if (CTA_SIZE_ >= 512) { if (tid < 256) buffer[tid] = val = val + buffer[tid + 256]; __syncthreads(); }
if (CTA_SIZE_ >= 256) { if (tid < 128) buffer[tid] = val = val + buffer[tid + 128]; __syncthreads(); }
if (CTA_SIZE_ >= 128) { if (tid < 64) buffer[tid] = val = val + buffer[tid + 64]; __syncthreads(); }
if (tid < 32){
if (CTA_SIZE_ >= 64) { buffer[tid] = val = val + buffer[tid + 32]; }
if (CTA_SIZE_ >= 32) { buffer[tid] = val = val + buffer[tid + 16]; }
if (CTA_SIZE_ >= 16) { buffer[tid] = val = val + buffer[tid + 8]; }
if (CTA_SIZE_ >= 8) { buffer[tid] = val = val + buffer[tid + 4]; }
if (CTA_SIZE_ >= 4) { buffer[tid] = val = val + buffer[tid + 2]; }
if (CTA_SIZE_ >= 2) { buffer[tid] = val = val + buffer[tid + 1]; }
}
}
__global__ void reduce_all_nodes_kernel(const float4* nodesDqVw, int n)
{
const float* beg = (const float*)nodesDqVw + blockIdx.x;
float sum = 0.f;
for (int i = threadIdx.x; i < n; i += blockDim.x)
sum += beg[i * 12]; // dq+vw, 12 float per node
__shared__ float smem[GpuGaussNewtonSolver::CTA_SIZE];
smem[threadIdx.x] = sum;
__syncthreads();
reduce<GpuGaussNewtonSolver::CTA_SIZE>(smem);
if (threadIdx.x == 0)
_g_common_q[blockIdx.x] = smem[0];
}
__global__ void factor_all_nodes_kernel(float4* nodesDqVw, int n, Tbx::Dual_quat_cu rigid_inv)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n)
return;
Tbx::Dual_quat_cu dq = rigid_inv * pack_dual_quat(nodesDqVw[3 * i], nodesDqVw[3 * i + 1]);
unpack_dual_quat(dq, nodesDqVw[3 * i], nodesDqVw[3 * i + 1]);
}
// optional, factor out common rigid transformations among all nodes
void GpuGaussNewtonSolver::factor_out_rigid()
{
if (m_pWarpField == nullptr)
throw std::exception("GpuGaussNewtonSolver::solve: null pointer");
if (m_pWarpField->getNumLevels() < 2)
throw std::exception("non-supported levels of warp field!");
if (m_pWarpField->getNumNodesInLevel(0) == 0)
{
printf("no warp nodes, return\n");
return;
}
const int num0 = m_pWarpField->getNumNodesInLevel(0);
const int numAll = m_pWarpField->getNumAllNodes();
Tbx::Dual_quat_cu dq(Tbx::Quat_cu(0,0,0,0), Tbx::Quat_cu(0,0,0,0));
hipMemcpyToSymbol(_g_common_q, &dq, sizeof(Tbx::Dual_quat_cu));
reduce_all_nodes_kernel << <8, GpuGaussNewtonSolver::CTA_SIZE >> >(
m_pWarpField->getNodesDqVwPtr(0), num0);
cudaSafeCall(hipGetLastError(), "reduce_all_nodes_kernel");
hipMemcpyFromSymbol(&dq, _g_common_q, sizeof(Tbx::Dual_quat_cu));
if (dq.get_non_dual_part().norm() > Tbx::Dual_quat_cu::epsilon())
{
dq.normalize();
m_pWarpField->set_rigidTransform(
m_pWarpField->get_rigidTransform() * dq.to_transformation());
for (int lv = 0; lv < m_pWarpField->getNumLevels(); lv++)
{
int numLv = m_pWarpField->getNumNodesInLevel(lv);
if (numLv == 0)
break;
factor_all_nodes_kernel << <divUp(numLv, GpuGaussNewtonSolver::CTA_SIZE),
GpuGaussNewtonSolver::CTA_SIZE >> >(m_pWarpField->getNodesDqVwPtr(lv), numLv, dq.conjugate());
}
cudaSafeCall(hipGetLastError(), "factor_all_nodes_kernel");
// re-extract info
m_pWarpField->extract_nodes_info_no_allocation(m_nodesKnn, m_twist, m_nodesVw);
checkNan(m_twist, numAll * 6, "twist after factoring rigid");
}
}
#pragma endregion
} | d35c30a150cadf0b8afb640aafd086cceb9ebd89.cu | #include "GpuGaussNewtonSolver.h"
#include "device_utils.h"
#include "cudpp\thrust_wrapper.h"
#include "cudpp\ModerGpuWrapper.h"
#include <iostream>
#include "GpuCholeSky.h"
namespace dfusion
{
//#define DEFINE_USE_HALF_GRAPH_EDGE
//#define CALC_DATA_TERM_NUMERIC
//#define CALC_REG_TERM_NUMERIC
//#define DEBUG_ASSIGN_10M_TO_NO_CORR
//#define DEBUG_ASSIGN_BIG_ENERGY_TO_NO_CORR
//#define ENABLE_ANTI_PODALITY
#ifdef DEFINE_USE_HALF_GRAPH_EDGE
enum{RowPerNode_RegTerm = 3};
#else
enum{ RowPerNode_RegTerm = 6 };
#endif
//#define USE_L2_NORM_DATA_TERM
//#define USE_L2_NORM_REG_TERM
#define CHECK(a, msg){if(!(a)) throw std::exception(msg);}
#define CHECK_LE(a, b){if((a) > (b)) {std::cout << "" << #a << "(" << a << ")<=" << #b << "(" << b << ")";throw std::exception(" ###error!");}}
texture<KnnIdx, cudaTextureType1D, cudaReadModeElementType> g_nodesKnnTex;
texture<float4, cudaTextureType1D, cudaReadModeElementType> g_nodesVwTex;
texture<float, cudaTextureType1D, cudaReadModeElementType> g_twistTex;
__device__ __forceinline__ float4 get_nodesVw(int i)
{
return tex1Dfetch(g_nodesVwTex, i);
}
__device__ __forceinline__ KnnIdx get_nodesKnn(int i)
{
return tex1Dfetch(g_nodesKnnTex, i);
}
__device__ __forceinline__ void get_twist(int i, Tbx::Vec3& r, Tbx::Vec3& t)
{
int i6 = i * 6;
r.x = tex1Dfetch(g_twistTex, i6++);
r.y = tex1Dfetch(g_twistTex, i6++);
r.z = tex1Dfetch(g_twistTex, i6++);
t.x = tex1Dfetch(g_twistTex, i6++);
t.y = tex1Dfetch(g_twistTex, i6++);
t.z = tex1Dfetch(g_twistTex, i6++);
}
__device__ __forceinline__ float3 read_float3_4(float4 a)
{
return make_float3(a.x, a.y, a.z);
}
__device__ __forceinline__ float sqr(float a)
{
return a*a;
}
__device__ __forceinline__ float pow3(float a)
{
return a*a*a;
}
__device__ __forceinline__ float sign(float a)
{
return (a>0.f) - (a<0.f);
}
__device__ __forceinline__ void sort_knn(KnnIdx& knn)
{
for (int i = 1; i < KnnK; i++)
{
KnnIdxType x = knn_k(knn,i);
int j = i;
while (j > 0 && knn_k(knn, j - 1) > x)
{
knn_k(knn, j) = knn_k(knn, j - 1);
j = j - 1;
}
knn_k(knn, j) = x;
}
}
#pragma region --bind textures
void GpuGaussNewtonSolver::bindTextures()
{
if (1)
{
size_t offset;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<KnnIdx>();
cudaBindTexture(&offset, &g_nodesKnnTex, m_nodesKnn.ptr(), &desc,
m_nodesKnn.size() * sizeof(KnnIdx));
if (offset != 0)
throw std::exception("GpuGaussNewtonSolver::bindTextures(): non-zero-offset error1!");
}
if (1)
{
size_t offset;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float4>();
cudaBindTexture(&offset, &g_nodesVwTex, m_nodesVw.ptr(), &desc,
m_nodesVw.size() * sizeof(float4));
if (offset != 0)
throw std::exception("GpuGaussNewtonSolver::bindTextures(): non-zero-offset error2!");
}
if (1)
{
size_t offset;
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
cudaBindTexture(&offset, &g_twistTex, m_twist.ptr(), &desc,
m_twist.size() * sizeof(float));
if (offset != 0)
throw std::exception("GpuGaussNewtonSolver::bindTextures(): non-zero-offset error3!");
}
}
void GpuGaussNewtonSolver::unBindTextures()
{
cudaUnbindTexture(g_twistTex);
cudaUnbindTexture(g_nodesVwTex);
cudaUnbindTexture(g_nodesKnnTex);
}
#pragma endregion
#pragma region --calc data term
struct DataTermCombined
{
enum
{
CTA_SIZE_X = GpuGaussNewtonSolver::CTA_SIZE_X,
CTA_SIZE_Y = GpuGaussNewtonSolver::CTA_SIZE_Y,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
VarPerNode = GpuGaussNewtonSolver::VarPerNode,
VarPerNode2 = VarPerNode*VarPerNode,
LowerPartNum = GpuGaussNewtonSolver::LowerPartNum,
};
PtrStep<float4> vmap_live;
PtrStep<float4> nmap_live;
PtrStep<float4> vmap_warp;
PtrStep<float4> nmap_warp;
PtrStep<float4> vmap_cano;
PtrStep<float4> nmap_cano;
PtrStep<KnnIdx> vmapKnn;
float* Hd_;
float* g_;
Intr intr;
Tbx::Transfo Tlw_inv;
Tbx::Transfo Tlw;
int imgWidth;
int imgHeight;
int nNodes;
float distThres;
float angleThres;
float psi_data;
float* totalEnergy;
__device__ __forceinline__ float data_term_energy(float f)const
{
#ifdef USE_L2_NORM_DATA_TERM
return 0.5f*f*f;
#else
// the robust Tukey penelty gradient
if (abs(f) <= psi_data)
return psi_data*psi_data / 6.f *(1 - pow(1 - sqr(f / psi_data), 3));
else
return psi_data*psi_data / 6.f;
#endif
}
__device__ __forceinline__ float data_term_penalty(float f)const
{
#ifdef USE_L2_NORM_DATA_TERM
return f;
#else
return f * sqr(max(0.f, 1.f - sqr(f / psi_data)));
//// the robust Tukey penelty gradient
//if (abs(f) <= psi_data)
// return f * sqr(1 - sqr(f / psi_data));
//else
// return 0;
#endif
}
__device__ __forceinline__ float trace_AtB(Tbx::Transfo A, Tbx::Transfo B)const
{
float sum = 0;
for (int i = 0; i < 16; i++)
sum += A[i] * B[i];
return sum;
}
__device__ __forceinline__ Tbx::Transfo compute_p_f_p_T(const Tbx::Vec3& n,
const Tbx::Point3& v, const Tbx::Point3& vl, const Tbx::Dual_quat_cu& dq)const
{
//Tbx::Transfo T = Tlw*dq.to_transformation_after_normalize();
//Tbx::Transfo nvt = outer_product(n, v);
//Tbx::Transfo vlnt = outer_product(n, vl).transpose();
//Tbx::Transfo p_f_p_T = T*(nvt + nvt.transpose()) - vlnt;
Tbx::Vec3 Tn = dq.rotate(n);
Tbx::Point3 Tv(dq.transform(v) - vl);
return Tbx::Transfo(
Tn.x*v.x + n.x*Tv.x, Tn.x*v.y + n.y*Tv.x, Tn.x*v.z + n.z*Tv.x, Tn.x,
Tn.y*v.x + n.x*Tv.y, Tn.y*v.y + n.y*Tv.y, Tn.y*v.z + n.z*Tv.y, Tn.y,
Tn.z*v.x + n.x*Tv.z, Tn.z*v.y + n.y*Tv.z, Tn.z*v.z + n.z*Tv.z, Tn.z,
n.x, n.y, n.z, 0
);
}
__device__ __forceinline__ Tbx::Transfo p_T_p_alphak_func(const Tbx::Dual_quat_cu& p_qk_p_alpha,
const Tbx::Dual_quat_cu& dq_bar, const Tbx::Dual_quat_cu& dq, float inv_norm_dq_bar, float wk_k)const
{
Tbx::Transfo p_T_p_alphak = Tbx::Transfo::empty();
float pdot = dq_bar.get_non_dual_part().dot(p_qk_p_alpha.get_non_dual_part())
* sqr(inv_norm_dq_bar);
//// evaluate p_dqi_p_alphak, heavily hard code here
//// this hard code is crucial to the performance
// 0:
// (0, -z0, y0, x1,
// z0, 0, -x0, y1,
//-y0, x0, 0, z1,
// 0, 0, 0, 0) * 2;
float p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[0] - dq_bar[0] * pdot
);
p_T_p_alphak[1] += -dq[3] * p_dqi_p_alphak;
p_T_p_alphak[2] += dq[2] * p_dqi_p_alphak;
p_T_p_alphak[3] += dq[5] * p_dqi_p_alphak;
p_T_p_alphak[4] += dq[3] * p_dqi_p_alphak;
p_T_p_alphak[6] += -dq[1] * p_dqi_p_alphak;
p_T_p_alphak[7] += dq[6] * p_dqi_p_alphak;
p_T_p_alphak[8] += -dq[2] * p_dqi_p_alphak;
p_T_p_alphak[9] += dq[1] * p_dqi_p_alphak;
p_T_p_alphak[11] += dq[7] * p_dqi_p_alphak;
// 1
//( 0, y0, z0, -w1,
// y0, -2 * x0, -w0, -z1,
// z0, w0, -2 * x0, y1,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[1] - dq_bar[1] * pdot
);
p_T_p_alphak[1] += dq[2] * p_dqi_p_alphak;
p_T_p_alphak[2] += dq[3] * p_dqi_p_alphak;
p_T_p_alphak[3] += -dq[4] * p_dqi_p_alphak;
p_T_p_alphak[4] += dq[2] * p_dqi_p_alphak;
p_T_p_alphak[5] += -dq[1] * p_dqi_p_alphak * 2;
p_T_p_alphak[6] += -dq[0] * p_dqi_p_alphak;
p_T_p_alphak[7] += -dq[7] * p_dqi_p_alphak;
p_T_p_alphak[8] += dq[3] * p_dqi_p_alphak;
p_T_p_alphak[9] += dq[0] * p_dqi_p_alphak;
p_T_p_alphak[10] += -dq[1] * p_dqi_p_alphak * 2;
p_T_p_alphak[11] += dq[6] * p_dqi_p_alphak;
// 2.
// (-2 * y0, x0, w0, z1,
// x0, 0, z0, -w1,
// -w0, z0, -2 * y0, -x1,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[2] - dq_bar[2] * pdot
);
p_T_p_alphak[0] += -dq[2] * p_dqi_p_alphak * 2;
p_T_p_alphak[1] += dq[1] * p_dqi_p_alphak;
p_T_p_alphak[2] += dq[0] * p_dqi_p_alphak;
p_T_p_alphak[3] += dq[7] * p_dqi_p_alphak;
p_T_p_alphak[4] += dq[1] * p_dqi_p_alphak;
p_T_p_alphak[6] += dq[3] * p_dqi_p_alphak;
p_T_p_alphak[7] += -dq[4] * p_dqi_p_alphak;
p_T_p_alphak[8] += -dq[0] * p_dqi_p_alphak;
p_T_p_alphak[9] += dq[3] * p_dqi_p_alphak;
p_T_p_alphak[10] += -dq[2] * p_dqi_p_alphak * 2;
p_T_p_alphak[11] += -dq[5] * p_dqi_p_alphak;
// 3.
// (-2 * z0, -w0, x0, -y1,
// w0, -2 * z0, y0, x1,
// x0, y0, 0, -w1,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[3] - dq_bar[3] * pdot
);
p_T_p_alphak[0] += -dq[3] * p_dqi_p_alphak * 2;
p_T_p_alphak[1] += -dq[0] * p_dqi_p_alphak;
p_T_p_alphak[2] += dq[1] * p_dqi_p_alphak;
p_T_p_alphak[3] += -dq[6] * p_dqi_p_alphak;
p_T_p_alphak[4] += dq[0] * p_dqi_p_alphak;
p_T_p_alphak[5] += -dq[3] * p_dqi_p_alphak * 2;
p_T_p_alphak[6] += dq[2] * p_dqi_p_alphak;
p_T_p_alphak[7] += dq[5] * p_dqi_p_alphak;
p_T_p_alphak[8] += dq[1] * p_dqi_p_alphak;
p_T_p_alphak[9] += dq[2] * p_dqi_p_alphak;
p_T_p_alphak[11] += -dq[4] * p_dqi_p_alphak;
// 4.
//( 0, 0, 0, -x0,
// 0, 0, 0, -y0,
// 0, 0, 0, -z0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[4] - dq_bar[4] * pdot
);
p_T_p_alphak[3] += -dq[1] * p_dqi_p_alphak;
p_T_p_alphak[7] += -dq[2] * p_dqi_p_alphak;
p_T_p_alphak[11] += -dq[3] * p_dqi_p_alphak;
// 5.
// (0, 0, 0, w0,
// 0, 0, 0, z0,
// 0, 0, 0, -y0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[5] - dq_bar[5] * pdot
);
p_T_p_alphak[3] += dq[0] * p_dqi_p_alphak;
p_T_p_alphak[7] += dq[3] * p_dqi_p_alphak;
p_T_p_alphak[11] += -dq[2] * p_dqi_p_alphak;
// 6.
// (0, 0, 0, -z0,
// 0, 0, 0, w0,
// 0, 0, 0, x0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[6] - dq_bar[6] * pdot
);
p_T_p_alphak[3] += -dq[3] * p_dqi_p_alphak;
p_T_p_alphak[7] += dq[0] * p_dqi_p_alphak;
p_T_p_alphak[11] += dq[1] * p_dqi_p_alphak;
// 7.
// (0, 0, 0, y0,
// 0, 0, 0, -x0,
// 0, 0, 0, w0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = wk_k * (
p_qk_p_alpha[7] - dq_bar[7] * pdot
);
p_T_p_alphak[3] += dq[2] * p_dqi_p_alphak;
p_T_p_alphak[7] += -dq[1] * p_dqi_p_alphak;
p_T_p_alphak[11] += dq[0] * p_dqi_p_alphak;
return p_T_p_alphak;
}
__device__ __forceinline__ bool search(int x, int y, Tbx::Point3& vl) const
{
float3 vwarp = read_float3_4(vmap_warp(y, x));
float3 nwarp = read_float3_4(nmap_warp(y, x));
return search(vwarp, nwarp, vl);
}
__device__ __forceinline__ bool search(float3 vwarp, float3 nwarp, Tbx::Point3& vl) const
{
if (isnan(nwarp.x) || isnan(vwarp.x))
return false;
float3 uvd = intr.xyz2uvd(vwarp);
int2 ukr = make_int2(__float2int_rn(uvd.x), __float2int_rn(uvd.y));
// we use opengl coordinate, thus world.z should < 0
if (ukr.x < 0 || ukr.y < 0 || ukr.x >= imgWidth || ukr.y >= imgHeight || vwarp.z >= 0)
return false;
float3 vlive = read_float3_4(vmap_live[ukr.y*imgWidth + ukr.x]);
float3 nlive = read_float3_4(nmap_live[ukr.y*imgWidth + ukr.x]);
if (isnan(nlive.x) || isnan(vlive.x))
return false;
#ifndef DEBUG_ASSIGN_10M_TO_NO_CORR
float dist = norm(vwarp - vlive);
if (!(dist <= distThres))
return false;
float sine = norm(cross(nwarp, nlive));
if (!(sine < angleThres))
return false;
#endif
vl = Tbx::Point3(vlive.x, vlive.y, vlive.z);
return true;
}
__device__ __forceinline__ void calc_dataterm () const
{
const int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
const int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
Tbx::Point3 vl;
bool found_coresp = false;
if (x < imgWidth && y < imgHeight)
found_coresp = search(x, y, vl);
vl = Tlw_inv * vl;
if (found_coresp)
{
Tbx::Point3 v(convert(read_float3_4(vmap_cano(y, x))));
Tbx::Vec3 n(convert(read_float3_4(nmap_cano(y, x))));
const KnnIdx knn = vmapKnn(y, x);
Tbx::Dual_quat_cu dq(Tbx::Quat_cu(0, 0, 0, 0), Tbx::Quat_cu(0, 0, 0, 0));
Tbx::Dual_quat_cu dqk_0;
float wk[KnnK];
// dqk_0
{
Tbx::Vec3 r, t;
get_twist(knn_k(knn, 0), r, t);
float4 nodeVw = get_nodesVw(knn_k(knn, 0));
Tbx::Vec3 nodesV(convert(read_float3_4(nodeVw)) - v);
dqk_0.from_twist(r, t);
float expIn = nodesV.dot(nodesV) * nodeVw.w * nodeVw.w;
wk[0] = __expf(-0.5f * expIn);
dq = dq + dqk_0 * wk[0];
}
// other dqk_k
#pragma unroll
for (int k = 1; k < KnnK; k++)
{
int knnNodeId = knn_k(knn, k);
if (knnNodeId >= nNodes)
break;
Tbx::Vec3 r, t;
get_twist(knnNodeId, r, t);
float4 nodeVw = get_nodesVw(knnNodeId);
Tbx::Vec3 nodesV(convert(read_float3_4(nodeVw))-v);
Tbx::Dual_quat_cu dqk_k;
dqk_k.from_twist(r, t);
#ifdef ENABLE_ANTI_PODALITY
wk[k] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w)
*sign(dqk_0.get_non_dual_part().dot(dqk_k.get_non_dual_part()));
#else
wk[k] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w);
#endif
dq = dq + dqk_k * wk[k];
}
Tbx::Dual_quat_cu dq_bar = dq;
float norm_dq_bar = dq_bar.norm();
if (norm_dq_bar < Tbx::Dual_quat_cu::epsilon())
return;
float inv_norm_dq_bar = 1.f / norm_dq_bar;
dq = dq * inv_norm_dq_bar; // normalize
// the grad energy f
const float f = data_term_penalty(dq.rotate(n).dot(dq.transform(v) - vl));
// paitial_f_partial_T
const Tbx::Transfo p_f_p_T = compute_p_f_p_T(n, v, vl, dq);
for (int knnK = 0; knnK < KnnK; knnK++)
{
int knnNodeId = knn_k(knn, knnK);
if (knnNodeId >= nNodes)
break;
float p_f_p_alpha[VarPerNode];
float wk_k = wk[knnK] * inv_norm_dq_bar * 2;
//// comput partial_T_partial_alphak, hard code here.
Tbx::Dual_quat_cu p_qk_p_alpha;
Tbx::Transfo p_T_p_alphak;
Tbx::Vec3 t, r;
float b, c;
Tbx::Quat_cu q1;
get_twist(knnNodeId, r, t);
{
float n = r.norm();
float sin_n, cos_n;
sincos(n, &sin_n, &cos_n);
b = n > Tbx::Dual_quat_cu::epsilon() ? sin_n / n : 1;
c = n > Tbx::Dual_quat_cu::epsilon() ? (cos_n - b) / (n*n) : 0;
q1 = Tbx::Quat_cu(cos_n*0.5f, r.x*b*0.5f, r.y*b*0.5f, r.z*b*0.5f);
}
// alpha0
p_qk_p_alpha[0] = -r[0] * b;
p_qk_p_alpha[1] = b + r[0] * r[0] * c;
p_qk_p_alpha[2] = r[0] * r[1] * c;
p_qk_p_alpha[3] = r[0] * r[2] * c;
p_qk_p_alpha = Tbx::Dual_quat_cu::dual_quat_from(p_qk_p_alpha.get_non_dual_part(), t);
p_T_p_alphak = p_T_p_alphak_func(p_qk_p_alpha, dq_bar, dq,
inv_norm_dq_bar, wk_k);
p_f_p_alpha[0] = trace_AtB(p_f_p_T, p_T_p_alphak);
// alpha1
p_qk_p_alpha[0] = -r[1] * b;
p_qk_p_alpha[1] = r[1] * r[0] * c;
p_qk_p_alpha[2] = b + r[1] * r[1] * c;
p_qk_p_alpha[3] = r[1] * r[2] * c;
p_qk_p_alpha = Tbx::Dual_quat_cu::dual_quat_from(p_qk_p_alpha.get_non_dual_part(), t);
p_T_p_alphak = p_T_p_alphak_func(p_qk_p_alpha, dq_bar, dq,
inv_norm_dq_bar, wk_k);
p_f_p_alpha[1] = trace_AtB(p_f_p_T, p_T_p_alphak);
// alpha2
p_qk_p_alpha[0] = -r[2] * b;
p_qk_p_alpha[1] = r[2] * r[0] * c;
p_qk_p_alpha[2] = r[2] * r[1] * c;
p_qk_p_alpha[3] = b + r[2] * r[2] * c;
p_qk_p_alpha = Tbx::Dual_quat_cu::dual_quat_from(p_qk_p_alpha.get_non_dual_part(), t);
p_T_p_alphak = p_T_p_alphak_func(p_qk_p_alpha, dq_bar, dq,
inv_norm_dq_bar, wk_k);
p_f_p_alpha[2] = trace_AtB(p_f_p_T, p_T_p_alphak);
// alpha3
p_qk_p_alpha = Tbx::Dual_quat_cu(Tbx::Quat_cu(0, 0, 0, 0),
Tbx::Quat_cu(-q1[1], q1[0], -q1[3], q1[2]));
p_T_p_alphak = p_T_p_alphak_func(p_qk_p_alpha, dq_bar, dq,
inv_norm_dq_bar, wk_k);
p_f_p_alpha[3] = trace_AtB(p_f_p_T, p_T_p_alphak);
// alpha4
p_qk_p_alpha = Tbx::Dual_quat_cu(Tbx::Quat_cu(0, 0, 0, 0),
Tbx::Quat_cu(-q1[2], q1[3], q1[0], -q1[1]));
p_T_p_alphak = p_T_p_alphak_func(p_qk_p_alpha, dq_bar, dq,
inv_norm_dq_bar, wk_k);
p_f_p_alpha[4] = trace_AtB(p_f_p_T, p_T_p_alphak);
// alpha5
p_qk_p_alpha = Tbx::Dual_quat_cu(Tbx::Quat_cu(0, 0, 0, 0),
Tbx::Quat_cu(-q1[3], -q1[2], q1[1], q1[0]));
p_T_p_alphak = p_T_p_alphak_func(p_qk_p_alpha, dq_bar, dq,
inv_norm_dq_bar, wk_k);
p_f_p_alpha[5] = trace_AtB(p_f_p_T, p_T_p_alphak);
//// reduce--------------------------------------------------
int shift = knnNodeId * VarPerNode2;
int shift_g = knnNodeId * VarPerNode;
for (int i = 0; i < VarPerNode; ++i)
{
#pragma unroll
for (int j = 0; j <= i; ++j)
atomicAdd(&Hd_[shift + j], p_f_p_alpha[i] * p_f_p_alpha[j]);
atomicAdd(&g_[shift_g + i], p_f_p_alpha[i] * f);
shift += VarPerNode;
}// end for i
}// end for knnK
}// end if found corr
}// end function ()
__device__ __forceinline__ Tbx::Dual_quat_cu calc_pixel_dq(KnnIdx knn,
Tbx::Point3 v, float* wk)const
{
Tbx::Dual_quat_cu dqk_0;
Tbx::Dual_quat_cu dq(Tbx::Quat_cu(0,0,0,0), Tbx::Quat_cu(0,0,0,0));
// dqk_0
{
Tbx::Vec3 r, t;
get_twist(knn_k(knn, 0), r, t);
float4 nodeVw = get_nodesVw(knn_k(knn, 0));
Tbx::Vec3 nodesV(convert(read_float3_4(nodeVw)) - v);
dqk_0.from_twist(r, t);
wk[0] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w);
dq += dqk_0 * wk[0];
}
// other dqk_k
#pragma unroll
for (int k = 1; k < KnnK; k++)
{
if (knn_k(knn, k) >= nNodes)
break;
float4 nodeVw = get_nodesVw(knn_k(knn, k));
Tbx::Vec3 nodesV(convert(read_float3_4(nodeVw)) - v);
Tbx::Dual_quat_cu dqk_k;
Tbx::Vec3 r, t;
get_twist(knn_k(knn, k), r, t);
dqk_k.from_twist(r, t);
#ifdef ENABLE_ANTI_PODALITY
wk[k] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w)
*sign(dqk_0.get_non_dual_part().dot(dqk_k.get_non_dual_part()));
#else
wk[k] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w);
#endif
dq += dqk_k * wk[k];
}
return dq;
}
__device__ __forceinline__ void exchange_ri_k(KnnIdx knn,
const float* wk, int k, int i, Tbx::Dual_quat_cu& dq, float& inc)const
{
Tbx::Vec3 r, t;
get_twist(knn_k(knn, k), r, t);
Tbx::Dual_quat_cu old_dqk, new_dqk;
old_dqk.from_twist(r, t);
inc = get_numeric_inc(r[i]);
r[i] += inc;
new_dqk.from_twist(r, t);
dq -= old_dqk * wk[k];
dq += new_dqk * wk[k] * sign(old_dqk.get_non_dual_part().dot(new_dqk.get_non_dual_part()));
}
__device__ __forceinline__ void exchange_ti_k(KnnIdx knn,
const float* wk, int k, int i, Tbx::Dual_quat_cu& dq, float& inc)const
{
Tbx::Vec3 r, t;
get_twist(knn_k(knn, k), r, t);
Tbx::Dual_quat_cu old_dqk, new_dqk;
old_dqk.from_twist(r, t);
inc = get_numeric_inc(t[i]);
t[i] += inc;
new_dqk.from_twist(r, t);
dq -= old_dqk * wk[k];
dq += new_dqk * wk[k] * sign(old_dqk.get_non_dual_part().dot(new_dqk.get_non_dual_part()));
}
__device__ __forceinline__ float get_numeric_inc(float v) const
{
return max( 1e-5f, v* 1e-3f);
}
__device__ __forceinline__ void calc_dataterm_numeric() const
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= imgWidth || y >= imgHeight)
return;
const KnnIdx knn = vmapKnn(y, x);
Tbx::Point3 v(convert(read_float3_4(vmap_cano(y, x))));
Tbx::Vec3 n(convert(read_float3_4(nmap_cano(y, x))));
if (isnan(n.x) || isnan(v.x))
return;
// 1. get all nodes params
// 2. compute function=================================================
float wk[KnnK];
Tbx::Dual_quat_cu dq = calc_pixel_dq(knn, v, wk);
float norm_dq = dq.norm();
if (norm_dq < Tbx::Dual_quat_cu::epsilon())
return;
Tbx::Dual_quat_cu dq_not_normalized = dq;
dq = dq * (1.f / norm_dq); // normalize
// find corr
Tbx::Vec3 nwarp = Tlw*dq.rotate(n);
Tbx::Point3 vwarp = Tlw*dq.transform(v);
Tbx::Point3 vl;
//bool corr_found = search(convert(vwarp), convert(nwarp), vl);
bool corr_found = search(x, y, vl);
if (!corr_found)
return;
// the grad energy
const float f = nwarp.dot(vwarp - vl);
const float psi_f = data_term_penalty(f);
// 3. compute jacobi
for (int knnK = 0; knnK < KnnK; knnK++)
{
if (knn_k(knn, knnK) >= nNodes)
break;
float df[6];
// 3.0 p_r[0:2]
for (int i = 0; i < 3; i++)
{
float inc;
Tbx::Dual_quat_cu dq1 = dq_not_normalized;
exchange_ri_k(knn, wk, knnK, i, dq1, inc);
dq1 *= (1.f / dq1.norm());
nwarp = Tlw*dq1.rotate(n);
vwarp = Tlw*dq1.transform(v);
Tbx::Point3 vl1 = vl;
//corr_found = search(convert(vwarp), convert(nwarp), vl1);
//if (!corr_found)
// return;
float f1 = nwarp.dot(vwarp - vl1);
df[i] = (f1 - f) / inc;
}// i=0:3
// 3.1 p_t[0:2]
for (int i = 0; i < 3; i++)
{
float inc;
Tbx::Dual_quat_cu dq1 = dq_not_normalized;
exchange_ti_k(knn, wk, knnK, i, dq1, inc);
dq1 *= (1.f / dq1.norm());
nwarp = Tlw*dq1.rotate(n);
vwarp = Tlw*dq1.transform(v);
Tbx::Point3 vl1 = vl;
//corr_found = search(convert(vwarp), convert(nwarp), vl1);
//if (!corr_found)
// return;
float f1 = nwarp.dot(vwarp - vl1);
df[i+3] = (f1 - f) / inc;
}// i=0:3
//// reduce--------------------------------------------------
int shift = knn_k(knn, knnK) * VarPerNode2;
int shift_g = knn_k(knn, knnK) * VarPerNode;
for (int i = 0; i < VarPerNode; ++i)
{
#pragma unroll
for (int j = 0; j <= i; ++j)
atomicAdd(&Hd_[shift + j], df[i] * df[j]);
atomicAdd(&g_[shift_g + i], df[i] * psi_f);
shift += VarPerNode;
}// end for i
}// end for knnK
}// end function ()
__device__ __forceinline__ void calcTotalEnergy()const
{
const int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
const int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
Tbx::Point3 vl;
bool found_coresp = false;
if (x < imgWidth && y < imgHeight)
found_coresp = search(x, y, vl);
if (found_coresp)
{
Tbx::Point3 v(convert(read_float3_4(vmap_cano(y, x))));
Tbx::Vec3 n(convert(read_float3_4(nmap_cano(y, x))));
const KnnIdx knn = vmapKnn(y, x);
Tbx::Dual_quat_cu dq(Tbx::Quat_cu(0, 0, 0, 0), Tbx::Quat_cu(0, 0, 0, 0));
Tbx::Dual_quat_cu dqk_0;
float wk[KnnK];
// dqk_0
{
Tbx::Vec3 r, t;
get_twist(knn_k(knn, 0), r, t);
float4 nodeVw = get_nodesVw(knn_k(knn, 0));
Tbx::Vec3 nodesV(convert(read_float3_4(nodeVw)) - v);
dqk_0.from_twist(r, t);
float expIn = nodesV.dot(nodesV) * nodeVw.w * nodeVw.w;
wk[0] = __expf(-0.5f * expIn);
dq = dq + dqk_0 * wk[0];
}
// other dqk_k
#pragma unroll
for (int k = 1; k < KnnK; k++)
{
int knnNodeId = knn_k(knn, k);
if (knnNodeId >= nNodes)
break;
Tbx::Vec3 r, t;
get_twist(knnNodeId, r, t);
float4 nodeVw = get_nodesVw(knnNodeId);
Tbx::Vec3 nodesV(convert(read_float3_4(nodeVw)) - v);
Tbx::Dual_quat_cu dqk_k;
dqk_k.from_twist(r, t);
#ifdef ENABLE_ANTI_PODALITY
wk[k] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w)
*sign(dqk_0.get_non_dual_part().dot(dqk_k.get_non_dual_part()));
#else
wk[k] = __expf(-0.5f * nodesV.dot(nodesV) * nodeVw.w * nodeVw.w);
#endif
dq = dq + dqk_k * wk[k];
}
float norm_dq = dq.norm();
if (norm_dq < Tbx::Dual_quat_cu::epsilon())
return;
dq = dq * (1.f / norm_dq); // normalize
// the grad energy f
const float f = data_term_energy((Tlw*dq.rotate(n)).dot(Tlw*dq.transform(v) - vl));
//atomicAdd(totalEnergy, f);
totalEnergy[y*imgWidth + x] = f;
}//end if find corr
#ifdef DEBUG_ASSIGN_BIG_ENERGY_TO_NO_CORR
else // debug: add constant penalty
{
totalEnergy[y*imgWidth + x] = data_term_energy(psi_data);
}
#endif
}
};
__global__ void dataTermCombinedKernel(const DataTermCombined cs)
{
#ifdef CALC_DATA_TERM_NUMERIC
cs.calc_dataterm_numeric();
#else
cs.calc_dataterm();
#endif
}
void GpuGaussNewtonSolver::calcDataTerm()
{
DataTermCombined cs;
cs.angleThres = m_param->fusion_nonRigid_angleThreSin;
cs.distThres = m_param->fusion_nonRigid_distThre;
cs.Hd_ = m_Hd.value();
cs.g_ = m_g;
cs.imgHeight = m_vmap_cano->rows();
cs.imgWidth = m_vmap_cano->cols();
cs.intr = m_intr;
cs.nmap_cano = *m_nmap_cano;
cs.nmap_live = *m_nmap_live;
cs.nmap_warp = *m_nmap_warp;
cs.vmap_cano = *m_vmap_cano;
cs.vmap_live = *m_vmap_live;
cs.vmap_warp = *m_vmap_warp;
cs.vmapKnn = m_vmapKnn;
cs.nNodes = m_numNodes;
cs.Tlw = m_pWarpField->get_rigidTransform();
cs.Tlw_inv = m_pWarpField->get_rigidTransform().fast_invert();
cs.psi_data = m_param->fusion_psi_data;
//////////////////////////////
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid(1, 1, 1);
grid.x = divUp(cs.imgWidth, block.x);
grid.y = divUp(cs.imgHeight, block.y);
dataTermCombinedKernel<< <grid, block >> >(cs);
cudaSafeCall(cudaGetLastError(), "dataTermCombinedKernel");
}
__global__ void calcDataTermTotalEnergyKernel(const DataTermCombined cs)
{
cs.calcTotalEnergy();
}
#pragma endregion
#pragma region --define sparse structure
__global__ void count_Jr_rows_kernel(int* rctptr, int nMaxNodes)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i >= nMaxNodes)
return;
KnnIdx knn = get_nodesKnn(i);
int numK = -1;
for (int k = 0; k < KnnK; ++k)
{
if (knn_k(knn, k) < nMaxNodes)
numK = k;
}
// each node generate 6*maxK rows
rctptr[i] = (numK + 1);
if (i == 0)
rctptr[nMaxNodes] = 0;
}
__global__ void compute_row_map_kernel(GpuGaussNewtonSolver::JrRow2NodeMapper* row2nodeId,
const int* rctptr, int nMaxNodes)
{
int iNode = threadIdx.x + blockIdx.x*blockDim.x;
if (iNode < nMaxNodes)
{
int row_b = rctptr[iNode];
int row_e = rctptr[iNode+1];
for (int r = row_b; r < row_e; r++)
{
GpuGaussNewtonSolver::JrRow2NodeMapper mp;
mp.nodeId = iNode;
mp.k = r - row_b;
mp.ixyz = 0;
row2nodeId[r] = mp;
}
}
}
__global__ void compute_Jr_rowPtr_kernel(
int* rptr, const GpuGaussNewtonSolver::JrRow2NodeMapper* row2nodeId,
int nMaxNodes, int nBlockRows)
{
enum{
BlocksPerRow = 2
};
const int iBlockRow = threadIdx.x + blockIdx.x*blockDim.x;
if (iBlockRow >= nBlockRows)
return;
const int iNode = row2nodeId[iBlockRow].nodeId;
if (iNode < nMaxNodes)
{
KnnIdx knn = get_nodesKnn(iNode);
if (knn_k(knn, row2nodeId[iBlockRow].k) < nMaxNodes)
rptr[iBlockRow] = iBlockRow * BlocksPerRow;
}
// the 1st thread also write the last value
if (iBlockRow == 0)
rptr[nBlockRows] = nBlockRows * BlocksPerRow;
}
__global__ void compute_Jr_colIdx_kernel(
int* colIdx, const GpuGaussNewtonSolver::JrRow2NodeMapper* row2nodeId,
int nMaxNodes, int nBlockRows)
{
enum{
ColPerRow = 2
};
const int iBlockRow = threadIdx.x + blockIdx.x*blockDim.x;
if (iBlockRow >= nBlockRows)
return;
const int iNode = row2nodeId[iBlockRow].nodeId;
if (iNode < nMaxNodes)
{
KnnIdx knn = get_nodesKnn(iNode);
int knnNodeId = knn_k(knn, row2nodeId[iBlockRow].k);
if (knnNodeId < nMaxNodes)
{
int col_b = iBlockRow*ColPerRow;
// each row 2 blocks
// 1. self
colIdx[col_b] = iNode;
// 2. neighbor
colIdx[col_b + 1] = knnNodeId;
}// end if knnNodeId
}
}
__global__ void calc_B_cidx_kernel(int* B_cidx,
const int* B_rptr, int nBlockInRows, int nMaxNodes, int nLv0Nodes)
{
int iBlockRow = threadIdx.x + blockIdx.x*blockDim.x;
if (iBlockRow < nBlockInRows)
{
KnnIdx knn = get_nodesKnn(iBlockRow);
int col_b = B_rptr[iBlockRow];
for (int k = 0; k < KnnK; ++k)
{
int knnNodeId = knn_k(knn, k);
if (knnNodeId < nMaxNodes)
B_cidx[col_b++] = knnNodeId-nLv0Nodes;
}
}
}
void GpuGaussNewtonSolver::initSparseStructure()
{
// 1. compute Jr structure ==============================================
// 1.0. decide the total rows we have for each nodes
{
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_numNodes, block.x));
count_Jr_rows_kernel << <grid, block >> >(m_Jr_RowCounter.ptr(), m_numNodes);
cudaSafeCall(cudaGetLastError(), "GpuGaussNewtonSolver::initSparseStructure::count_Jr_rows_kernel");
thrust_wrapper::exclusive_scan(m_Jr_RowCounter.ptr(), m_Jr_RowCounter.ptr(), m_numNodes + 1);
int jrRows = 0;
cudaSafeCall(cudaMemcpy(&jrRows, m_Jr_RowCounter.ptr() + m_numNodes,
sizeof(int), cudaMemcpyDeviceToHost), "copy Jr rows to host");
m_Jr->resize(jrRows, m_numNodes, RowPerNode_RegTerm, VarPerNode);
}
// 1.1. collect nodes edges info:
// each low-level nodes are connected to k higher level nodes
// but the connections are not stored for the higher level nodes
// thus when processing each node, we add 2*k edges, w.r.t. 2*k*3 rows: each (x,y,z) a row
// for each row, there are exactly 2*VarPerNode values
// after this step, we can get the CSR/COO structure
if (m_Jr->rows() > 0)
{
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_numNodes, block.x));
compute_row_map_kernel << <grid, block >> >(m_Jr_RowMap2NodeId.ptr(), m_Jr_RowCounter.ptr(), m_numNodes);
cudaSafeCall(cudaGetLastError(), "GpuGaussNewtonSolver::initSparseStructure::compute_row_map_kernel");
}
if (m_Jr->rows() > 0)
{
m_Jr->beginConstructRowPtr();
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_Jr->blocksInRow(), block.x));
compute_Jr_rowPtr_kernel << <grid, block >> >(m_Jr->bsrRowPtr(),
m_Jr_RowMap2NodeId.ptr(), m_numNodes, m_Jr->blocksInRow());
cudaSafeCall(cudaGetLastError(), "GpuGaussNewtonSolver::initSparseStructure::compute_Jr_rowPtr_kernel");
m_Jr->endConstructRowPtr();
compute_Jr_colIdx_kernel << <grid, block >> >(m_Jr->bsrColIdx(),
m_Jr_RowMap2NodeId.ptr(), m_numNodes, m_Jr->blocksInRow());
cudaSafeCall(cudaGetLastError(), "GpuGaussNewtonSolver::initSparseStructure::compute_Jr_colIdx_kernel");
}
// 2. compute Jrt structure ==============================================
// 2.1. fill (row, col) as (col, row) from Jr and sort.
m_Jr->transposeStructureTo(*m_Jrt);
m_Jrt->subRows_structure(*m_Jrt13_structure, m_numLv0Nodes, m_numNodes);
m_Jrt13_structure->transposeStructureTo(*m_Jr13_structure);
m_Jrt13_structure->multBsr_structure(*m_Jr13_structure, *m_Hr);
// 3. compute B structure ==============================================
// 3.1 the row ptr of B is the same CSR info with the first L0 rows of Jrt.
m_B->resize(m_numLv0Nodes, m_Jr->blocksInCol() - m_numLv0Nodes, VarPerNode, VarPerNode);
m_B->setRowFromBsrRowPtr(m_Jrt->bsrRowPtr());
// 3.2 the col-idx of B
if (m_B->rows() > 0)
{
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_B->blocksInRow(), block.x));
calc_B_cidx_kernel << <grid, block >> >(
m_B->bsrColIdx(), m_B->bsrRowPtr(), m_B->blocksInRow(), m_numNodes, m_numLv0Nodes);
cudaSafeCall(cudaGetLastError(), "GpuGaussNewtonSolver::initSparseStructure::calc_B_cidx_kernel");
}
// 3.3 sort to compute Bt
m_B->transposeStructureTo(*m_Bt);
m_Hd.resize(m_numLv0Nodes, VarPerNode);
m_Hd_Linv.resize(m_numLv0Nodes, VarPerNode);
m_Hd_LLtinv.resize(m_numLv0Nodes, VarPerNode);
m_Bt->rightMultDiag_structure(m_Hd_Linv, *m_Bt_Ltinv);
// 4. single level Hessian
if (m_param->graph_single_level)
{
m_Jrt->multBsr_structure(*m_Jr, *m_H_singleLevel);
m_singleLevel_solver->analysis(m_H_singleLevel, true);
}
else
{
// sovle Q on CPU, prepare for it
m_Bt->multBsr_structure(*m_B, *m_Q, m_Hr);
m_singleLevel_solver->analysis(m_Q, true);
}
}
#pragma endregion
#pragma region --calc reg term
struct RegTermJacobi
{
typedef GpuGaussNewtonSolver::JrRow2NodeMapper Mapper;
enum
{
VarPerNode = GpuGaussNewtonSolver::VarPerNode,
VarPerNode2 = VarPerNode*VarPerNode,
ColPerRow = VarPerNode * 2
};
int nNodes;
int nBlockRows;
const Mapper* rows2nodeIds;
const int* rptr;
mutable float* vptr;
mutable float* fptr;
int nNodesEachLevel[WarpField::GraphLevelNum];
float dw_scale_each_level;
float dw_softness;
float psi_reg;
float lambda;
float* totalEnergy;
__device__ __forceinline__ int getNodeLevel(int nodeId)const
{
for (int k = 0; k < WarpField::GraphLevelNum; k++)
if (nodeId < nNodesEachLevel[k])
return k;
return WarpField::GraphLevelNum;
}
__device__ __forceinline__ float calc_alpha_reg(int nodeId, int k, int nMaxNodes)const
{
KnnIdx knn = get_nodesKnn(nodeId);
float4 nodeVwi = get_nodesVw(nodeId);
Tbx::Point3 vi(convert(read_float3_4(nodeVwi)));
float4 nodeVwj = get_nodesVw(knn_k(knn, k));
float invW = min(nodeVwi.w, nodeVwj.w);
float wk = 0.f, sum_w = 0.f;
for (int knn_idx = 0; knn_idx < KnnK; knn_idx++)
{
if (knn_idx < nMaxNodes)
{
float4 nodeVwj = get_nodesVw(knn_k(knn, knn_idx));
Tbx::Point3 vj(convert(read_float3_4(nodeVwj)));
float w = __expf(-dw_softness * (vi - vj).dot(vi - vj) * invW * invW);
sum_w += w;
if (knn_idx == k)
wk = w;
}
}
// if all neighbors are too far to give valid weightings,
// we just take an average.
if (sum_w < 1e-6f)
wk = 0.25f;
else
wk /= sum_w;
return wk * __powf(dw_scale_each_level, getNodeLevel(nodeId));
}
__device__ __forceinline__ Tbx::Dual_quat_cu p_qk_p_alpha_func(Tbx::Dual_quat_cu dq, int i)const
{
Tbx::Vec3 t, r;
float b, c, n;
Tbx::Quat_cu q0(0, 0, 0, 0), q1 = dq.get_non_dual_part();
switch (i)
{
case 0:
dq.to_twist(r, t);
n = r.norm();
if (n > Tbx::Dual_quat_cu::epsilon())
{
b = sin(n) / n;
c = (cos(n) - b) / (n*n);
q0.coeff0 = -r.x * b;
q0.coeff1 = b + r.x*r.x*c;
q0.coeff2 = r.x*r.y*c;
q0.coeff3 = r.x*r.z*c;
}
else
{
q0.coeff0 = 0;
q0.coeff1 = 1;
q0.coeff2 = 0;
q0.coeff3 = 0;
}
q1.coeff0 = (t.x * q0.coeff1 + t.y * q0.coeff2 + t.z * q0.coeff3) * (-0.5);
q1.coeff1 = (t.x * q0.coeff0 + t.y * q0.coeff3 - t.z * q0.coeff2) * 0.5;
q1.coeff2 = (-t.x * q0.coeff3 + t.y * q0.coeff0 + t.z * q0.coeff1) * 0.5;
q1.coeff3 = (t.x * q0.coeff2 - t.y * q0.coeff1 + t.z * q0.coeff0) * 0.5;
return Tbx::Dual_quat_cu(q0, q1);
case 1:
dq.to_twist(r, t);
n = r.norm();
if (n > Tbx::Dual_quat_cu::epsilon())
{
b = sin(n) / n;
c = (cos(n) - b) / (n*n);
q0.coeff0 = -r.y * b;
q0.coeff1 = r.y*r.x*c;
q0.coeff2 = b + r.y*r.y*c;
q0.coeff3 = r.y*r.z*c;
}
else
{
q0.coeff0 = 0;
q0.coeff1 = 0;
q0.coeff2 = 1;
q0.coeff3 = 0;
}
q1.coeff0 = (t.x * q0.coeff1 + t.y * q0.coeff2 + t.z * q0.coeff3) * (-0.5);
q1.coeff1 = (t.x * q0.coeff0 + t.y * q0.coeff3 - t.z * q0.coeff2) * 0.5;
q1.coeff2 = (-t.x * q0.coeff3 + t.y * q0.coeff0 + t.z * q0.coeff1) * 0.5;
q1.coeff3 = (t.x * q0.coeff2 - t.y * q0.coeff1 + t.z * q0.coeff0) * 0.5;
return Tbx::Dual_quat_cu(q0, q1);
case 2:
dq.to_twist(r, t);
n = r.norm();
if (n > Tbx::Dual_quat_cu::epsilon())
{
b = sin(n) / n;
c = (cos(n) - b) / (n*n);
q0.coeff0 = -r.z * b;
q0.coeff1 = r.z*r.x*c;
q0.coeff2 = r.z*r.y*c;
q0.coeff3 = b + r.z*r.z*c;
}
else
{
q0.coeff0 = 0;
q0.coeff1 = 0;
q0.coeff2 = 0;
q0.coeff3 = 1;
}
q1.coeff0 = (t.x * q0.coeff1 + t.y * q0.coeff2 + t.z * q0.coeff3) * (-0.5);
q1.coeff1 = (t.x * q0.coeff0 + t.y * q0.coeff3 - t.z * q0.coeff2) * 0.5;
q1.coeff2 = (-t.x * q0.coeff3 + t.y * q0.coeff0 + t.z * q0.coeff1) * 0.5;
q1.coeff3 = (t.x * q0.coeff2 - t.y * q0.coeff1 + t.z * q0.coeff0) * 0.5;
return Tbx::Dual_quat_cu(q0, q1);
case 3:
return Tbx::Dual_quat_cu(q0, Tbx::Quat_cu(-q1.coeff1, q1.coeff0, -q1.coeff3, q1.coeff2))*0.5;
case 4:
return Tbx::Dual_quat_cu(q0, Tbx::Quat_cu(-q1.coeff2, q1.coeff3, q1.coeff0, -q1.coeff1))*0.5;
case 5:
return Tbx::Dual_quat_cu(q0, Tbx::Quat_cu(-q1.coeff3, -q1.coeff2, q1.coeff1, q1.coeff0))*0.5;
default:
return Tbx::Dual_quat_cu();
}
}
__device__ __forceinline__ float reg_term_energy(Tbx::Vec3 f)const
{
#ifdef USE_L2_NORM_REG_TERM
return 0.5f*f.dot(f);
#else
// the robust Huber penelty gradient
float s = 0;
float norm = f.norm();
if (norm < psi_reg)
s = norm * norm * 0.5f;
else
s = psi_reg*(norm - psi_reg*0.5f);
return s;
#endif
}
__device__ __forceinline__ Tbx::Vec3 reg_term_penalty(Tbx::Vec3 f)const
{
#ifdef USE_L2_NORM_REG_TERM
return f;
#else
// the robust Huber penelty gradient
Tbx::Vec3 df;
float norm = f.norm();
if (norm < psi_reg)
df = f;
else
for (int k = 0; k < 3; k++)
df[k] = f[k]*psi_reg / norm;
return df;
#endif
}
__device__ __forceinline__ Tbx::Transfo p_SE3_p_alpha_func(Tbx::Dual_quat_cu dq, int i)const
{
Tbx::Transfo T = Tbx::Transfo::empty();
Tbx::Dual_quat_cu p_dq_p_alphai = p_qk_p_alpha_func(dq, i) * 2.f;
//// evaluate p_dqi_p_alphak, heavily hard code here
//// this hard code is crucial to the performance
// 0:
// (0, -z0, y0, x1,
// z0, 0, -x0, y1,
//-y0, x0, 0, z1,
// 0, 0, 0, 0) * 2;
float p_dqi_p_alphak = p_dq_p_alphai[0];
T[1] += -dq[3] * p_dqi_p_alphak;
T[2] += dq[2] * p_dqi_p_alphak;
T[3] += dq[5] * p_dqi_p_alphak;
T[4] += dq[3] * p_dqi_p_alphak;
T[6] += -dq[1] * p_dqi_p_alphak;
T[7] += dq[6] * p_dqi_p_alphak;
T[8] += -dq[2] * p_dqi_p_alphak;
T[9] += dq[1] * p_dqi_p_alphak;
T[11] += dq[7] * p_dqi_p_alphak;
// 1
//( 0, y0, z0, -w1,
// y0, -2 * x0, -w0, -z1,
// z0, w0, -2 * x0, y1,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[1];
T[1] += dq[2] * p_dqi_p_alphak;
T[2] += dq[3] * p_dqi_p_alphak;
T[3] += -dq[4] * p_dqi_p_alphak;
T[4] += dq[2] * p_dqi_p_alphak;
T[5] += -dq[1] * p_dqi_p_alphak * 2;
T[6] += -dq[0] * p_dqi_p_alphak;
T[7] += -dq[7] * p_dqi_p_alphak;
T[8] += dq[3] * p_dqi_p_alphak;
T[9] += dq[0] * p_dqi_p_alphak;
T[10] += -dq[1] * p_dqi_p_alphak * 2;
T[11] += dq[6] * p_dqi_p_alphak;
// 2.
// (-2 * y0, x0, w0, z1,
// x0, 0, z0, -w1,
// -w0, z0, -2 * y0, -x1,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[2];
T[0] += -dq[2] * p_dqi_p_alphak * 2;
T[1] += dq[1] * p_dqi_p_alphak;
T[2] += dq[0] * p_dqi_p_alphak;
T[3] += dq[7] * p_dqi_p_alphak;
T[4] += dq[1] * p_dqi_p_alphak;
T[6] += dq[3] * p_dqi_p_alphak;
T[7] += -dq[4] * p_dqi_p_alphak;
T[8] += -dq[0] * p_dqi_p_alphak;
T[9] += dq[3] * p_dqi_p_alphak;
T[10] += -dq[2] * p_dqi_p_alphak * 2;
T[11] += -dq[5] * p_dqi_p_alphak;
// 3.
// (-2 * z0, -w0, x0, -y1,
// w0, -2 * z0, y0, x1,
// x0, y0, 0, -w1,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[3];
T[0] += -dq[3] * p_dqi_p_alphak * 2;
T[1] += -dq[0] * p_dqi_p_alphak;
T[2] += dq[1] * p_dqi_p_alphak;
T[3] += -dq[6] * p_dqi_p_alphak;
T[4] += dq[0] * p_dqi_p_alphak;
T[5] += -dq[3] * p_dqi_p_alphak * 2;
T[6] += dq[2] * p_dqi_p_alphak;
T[7] += dq[5] * p_dqi_p_alphak;
T[8] += dq[1] * p_dqi_p_alphak;
T[9] += dq[2] * p_dqi_p_alphak;
T[11] += -dq[4] * p_dqi_p_alphak;
// 4.
//( 0, 0, 0, -x0,
// 0, 0, 0, -y0,
// 0, 0, 0, -z0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[4];
T[3] += -dq[1] * p_dqi_p_alphak;
T[7] += -dq[2] * p_dqi_p_alphak;
T[11] += -dq[3] * p_dqi_p_alphak;
// 5.
// (0, 0, 0, w0,
// 0, 0, 0, z0,
// 0, 0, 0, -y0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[5];
T[3] += dq[0] * p_dqi_p_alphak;
T[7] += dq[3] * p_dqi_p_alphak;
T[11] += -dq[2] * p_dqi_p_alphak;
// 6.
// (0, 0, 0, -z0,
// 0, 0, 0, w0,
// 0, 0, 0, x0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[6];
T[3] += -dq[3] * p_dqi_p_alphak;
T[7] += dq[0] * p_dqi_p_alphak;
T[11] += dq[1] * p_dqi_p_alphak;
// 7.
// (0, 0, 0, y0,
// 0, 0, 0, -x0,
// 0, 0, 0, w0,
// 0, 0, 0, 0) * 2;
p_dqi_p_alphak = p_dq_p_alphai[7];
T[3] += dq[2] * p_dqi_p_alphak;
T[7] += -dq[1] * p_dqi_p_alphak;
T[11] += dq[0] * p_dqi_p_alphak;
return T;
}
__device__ __forceinline__ void operator () () const
{
const int iBlockRow = threadIdx.x + blockIdx.x * blockDim.x;
if (iBlockRow >= nBlockRows)
return;
Mapper mapper = rows2nodeIds[iBlockRow];
int knnNodeId = knn_k(get_nodesKnn(mapper.nodeId), mapper.k);
if (knnNodeId >= nNodes)
return;
Tbx::Dual_quat_cu dqi, dqj;
Tbx::Vec3 ri, ti, rj, tj;
get_twist(mapper.nodeId, ri, ti);
get_twist(knnNodeId, rj, tj);
dqi.from_twist(ri, ti);
dqj.from_twist(rj, tj);
float4 nodeVwi = get_nodesVw(mapper.nodeId);
float4 nodeVwj = get_nodesVw(knnNodeId);
Tbx::Point3 vi(convert(read_float3_4(nodeVwi)));
Tbx::Point3 vj(convert(read_float3_4(nodeVwj)));
float alpha_ij = calc_alpha_reg(mapper.nodeId, mapper.k, nNodes);
float ww = sqrt(lambda * alpha_ij);
//if (isinf(nodeVwj.w))
// printf("inf found: %d %d %f %f %f %f\n", mapper.nodeId, knnNodeId,
// nodeVwj.w, 1.f / nodeVwj.w, alpha_ij, ww);
// energy=============================================
Tbx::Vec3 val = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
val = reg_term_penalty(val);
const int iRow = iBlockRow * RowPerNode_RegTerm;
fptr[iRow + 0] = val.x * ww;
fptr[iRow + 1] = val.y * ww;
fptr[iRow + 2] = val.z * ww;
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
Tbx::Vec3 val1 = dqj.transform(Tbx::Point3(vi)) - dqi.transform(Tbx::Point3(vi));
val1 = reg_term_penalty(val1);
fptr[iRow + 3] = val1.x * ww;
fptr[iRow + 4] = val1.y * ww;
fptr[iRow + 5] = val1.z * ww;
#endif
// jacobi=============================================
int cooPos0 = rptr[iBlockRow] * RowPerNode_RegTerm * VarPerNode;
int cooPos1 = cooPos0 + RowPerNode_RegTerm * VarPerNode;
for (int ialpha = 0; ialpha < VarPerNode; ialpha++)
{
Tbx::Transfo p_Ti_p_alpha = p_SE3_p_alpha_func(dqi, ialpha);
Tbx::Transfo p_Tj_p_alpha = p_SE3_p_alpha_func(dqj, ialpha);
// partial_psi_partial_alpha
Tbx::Vec3 p_psi_p_alphai_j = (p_Ti_p_alpha * vj) * ww;
Tbx::Vec3 p_psi_p_alphaj_j = (p_Tj_p_alpha * vj) * (-ww);
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
Tbx::Vec3 p_psi_p_alphai_i = (p_Ti_p_alpha * vi) * (-ww);
Tbx::Vec3 p_psi_p_alphaj_i = (p_Tj_p_alpha * vi) * ww;
#endif
for (int ixyz = 0; ixyz < 3; ixyz++)
{
vptr[cooPos0 + ixyz*VarPerNode + ialpha] = p_psi_p_alphai_j[ixyz];
vptr[cooPos1 + ixyz*VarPerNode + ialpha] = p_psi_p_alphaj_j[ixyz];
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
vptr[cooPos0 + (3 + ixyz)*VarPerNode + ialpha] = p_psi_p_alphai_i[ixyz];
vptr[cooPos1 + (3 + ixyz)*VarPerNode + ialpha] = p_psi_p_alphaj_i[ixyz];
#endif
}
}// end for ialpha
}// end function ()
__device__ __forceinline__ float get_numeric_inc(float v) const
{
return max(1e-5f, v* 1e-3f);
}
__device__ __forceinline__ void calc_reg_numeric () const
{
const int iBlockRow = threadIdx.x + blockIdx.x * blockDim.x;
if (iBlockRow >= nBlockRows)
return;
Mapper mapper = rows2nodeIds[iBlockRow];
int knnNodeId = knn_k(get_nodesKnn(mapper.nodeId), mapper.k);
if (knnNodeId >= nNodes)
return;
Tbx::Dual_quat_cu dqi, dqj;
Tbx::Vec3 ri, ti, rj, tj;
get_twist(mapper.nodeId, ri, ti);
get_twist(knnNodeId, rj, tj);
dqi.from_twist(ri, ti);
dqj.from_twist(rj, tj);
float4 nodeVwi = get_nodesVw(mapper.nodeId);
float4 nodeVwj = get_nodesVw(knnNodeId);
Tbx::Point3 vi(convert(read_float3_4(nodeVwi)));
Tbx::Point3 vj(convert(read_float3_4(nodeVwj)));
float alpha_ij = calc_alpha_reg(mapper.nodeId, mapper.k, nNodes);
float ww = sqrt(lambda * alpha_ij);
// energy=============================================
Tbx::Vec3 val_j = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
Tbx::Vec3 psi_val_j = reg_term_penalty(val_j);
const int iRow = iBlockRow * RowPerNode_RegTerm;
fptr[iRow + 0] = psi_val_j.x * ww;
fptr[iRow + 1] = psi_val_j.y * ww;
fptr[iRow + 2] = psi_val_j.z * ww;
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
Tbx::Vec3 val_i = dqj.transform(Tbx::Point3(vi)) - dqi.transform(Tbx::Point3(vi));
Tbx::Vec3 psi_val_i = reg_term_penalty(val_i);
fptr[iRow + 3] = psi_val_i.x * ww;
fptr[iRow + 4] = psi_val_i.y * ww;
fptr[iRow + 5] = psi_val_i.z * ww;
#endif
// jacobi=============================================
int cooPos0 = rptr[iBlockRow] * RowPerNode_RegTerm * VarPerNode;
int cooPos1 = cooPos0 + RowPerNode_RegTerm * VarPerNode;
for (int ialpha = 0; ialpha < 3; ialpha++)
{
float inci = get_numeric_inc(ri[ialpha]);
ri[ialpha] += inci;
dqi.from_twist(ri, ti);
Tbx::Vec3 val_j_inci = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
Tbx::Vec3 val_i_inci = dqj.transform(Tbx::Point3(vi)) - dqi.transform(Tbx::Point3(vi));
ri[ialpha] -= inci;
dqi.from_twist(ri, ti);
float incj = get_numeric_inc(rj[ialpha]);
rj[ialpha] += incj;
dqj.from_twist(rj, tj);
Tbx::Vec3 val_j_incj = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
Tbx::Vec3 val_i_incj = dqj.transform(Tbx::Point3(vi)) - dqi.transform(Tbx::Point3(vi));
rj[ialpha] -= incj;
dqj.from_twist(rj, tj);
for (int ixyz = 0; ixyz < 3; ixyz++)
{
vptr[cooPos0 + ixyz*VarPerNode + ialpha] = ww * (val_j_inci[ixyz] - val_j[ixyz]) / inci;
vptr[cooPos1 + ixyz*VarPerNode + ialpha] = ww * (val_j_incj[ixyz] - val_j[ixyz]) / incj;
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
vptr[cooPos0 + (3 + ixyz)*VarPerNode + ialpha] = ww * (val_i_inci[ixyz] - val_i[ixyz]) / inci;
vptr[cooPos1 + (3 + ixyz)*VarPerNode + ialpha] = ww * (val_i_incj[ixyz] - val_i[ixyz]) / incj;
#endif
}
}// end for ialpha
cooPos0 += 3;
cooPos1 += 3;
for (int ialpha = 0; ialpha < 3; ialpha++)
{
float inci = get_numeric_inc(ti[ialpha]);
ti[ialpha] += inci;
dqi.from_twist(ri, ti);
Tbx::Vec3 val_j_inci = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
Tbx::Vec3 val_i_inci = dqj.transform(Tbx::Point3(vi)) - dqi.transform(Tbx::Point3(vi));
ti[ialpha] -= inci;
dqi.from_twist(ri, ti);
float incj = get_numeric_inc(tj[ialpha]);
tj[ialpha] += incj;
dqj.from_twist(rj, tj);
Tbx::Vec3 val_j_incj = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
Tbx::Vec3 val_i_incj = dqj.transform(Tbx::Point3(vi)) - dqi.transform(Tbx::Point3(vi));
tj[ialpha] -= incj;
dqj.from_twist(rj, tj);
for (int ixyz = 0; ixyz < 3; ixyz++)
{
vptr[cooPos0 + ixyz*VarPerNode + ialpha] = ww * (val_j_inci[ixyz] - val_j[ixyz]) / inci;
vptr[cooPos1 + ixyz*VarPerNode + ialpha] = ww * (val_j_incj[ixyz] - val_j[ixyz]) / incj;
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
vptr[cooPos0 + (3 + ixyz)*VarPerNode + ialpha] = ww * (val_i_inci[ixyz] - val_i[ixyz]) / inci;
vptr[cooPos1 + (3 + ixyz)*VarPerNode + ialpha] = ww * (val_i_incj[ixyz] - val_i[ixyz]) / incj;
#endif
}
}// end for ialpha
}// end function ()
__device__ __forceinline__ void calcTotalEnergy () const
{
const int iNode = threadIdx.x + blockIdx.x * blockDim.x;
if (iNode >= nBlockRows)
return;
Mapper mapper = rows2nodeIds[iNode];
int knnNodeId = knn_k(get_nodesKnn(mapper.nodeId), mapper.k);
if (knnNodeId >= nNodes)
return;
Tbx::Dual_quat_cu dqi, dqj;
Tbx::Vec3 ri, ti, rj, tj;
get_twist(mapper.nodeId, ri, ti);
get_twist(knnNodeId, rj, tj);
dqi.from_twist(ri, ti);
dqj.from_twist(rj, tj);
float4 nodeVwi = get_nodesVw(mapper.nodeId);
float4 nodeVwj = get_nodesVw(knnNodeId);
Tbx::Point3 vi(convert(read_float3_4(nodeVwi)));
Tbx::Point3 vj(convert(read_float3_4(nodeVwj)));
float alpha_ij = max(1.f / nodeVwi.w, 1.f / nodeVwj.w);
float ww2 = lambda * calc_alpha_reg(mapper.nodeId, mapper.k, nNodes);
// energy=============================================
Tbx::Vec3 val = dqi.transform(Tbx::Point3(vj)) - dqj.transform(Tbx::Point3(vj));
float eg = ww2 * reg_term_energy(val);
#ifndef DEFINE_USE_HALF_GRAPH_EDGE
Tbx::Vec3 val1 = dqi.transform(Tbx::Point3(vi)) - dqj.transform(Tbx::Point3(vi));
eg += ww2 * reg_term_energy(val1);
#endif
//atomicAdd(totalEnergy, eg);
totalEnergy[iNode] = eg;
}
};
__global__ void calcRegTerm_kernel(RegTermJacobi rj)
{
#ifdef CALC_REG_TERM_NUMERIC
rj.calc_reg_numeric();
#else
rj();
#endif
}
__global__ void calcRegTermTotalEnergy_kernel(RegTermJacobi rj)
{
rj.calcTotalEnergy();
}
void GpuGaussNewtonSolver::calcRegTerm()
{
if (m_Jr->rows() > 0)
{
CHECK_LE(m_Jr->rows(), m_f_r.size());
RegTermJacobi rj;
rj.lambda = m_param->fusion_lambda;
rj.nNodes = m_numNodes;
rj.nBlockRows = m_Jr->blocksInRow();
rj.psi_reg = m_param->fusion_psi_reg;
rj.rows2nodeIds = m_Jr_RowMap2NodeId;
rj.rptr = m_Jr->bsrRowPtr();
rj.vptr = m_Jr->value();
rj.fptr = m_f_r.ptr();
for (int k = 0; k < WarpField::GraphLevelNum; k++)
rj.nNodesEachLevel[k] = m_pWarpField->getNumNodesInLevel(k);
for (int k = 1; k < WarpField::GraphLevelNum; k++)
rj.nNodesEachLevel[k] += rj.nNodesEachLevel[k-1];
rj.dw_scale_each_level = m_param->warp_param_dw_lvup_scale;
rj.dw_softness = m_param->warp_param_softness;
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_Jr->rows() / RowPerNode_RegTerm, block.x));
calcRegTerm_kernel << <grid, block >> >(rj);
cudaSafeCall(cudaGetLastError(), "calcRegTerm_kernel");
// 2. compute Jrt ==============================================
// 2.1. fill (row, col) as (col, row) from Jr and sort.
m_Jr->transposeValueTo(*m_Jrt);
}
}
#pragma endregion
#pragma region --calcTotalEnergy
float GpuGaussNewtonSolver::calcTotalEnergy(float& data_energy, float& reg_energy)
{
float total_energy = 0.f;
cudaMemset(m_energy_vec.ptr(), 0, m_energy_vec.sizeBytes());
{
DataTermCombined cs;
cs.angleThres = m_param->fusion_nonRigid_angleThreSin;
cs.distThres = m_param->fusion_nonRigid_distThre;
cs.Hd_ = m_Hd.value();
cs.g_ = m_g;
cs.imgHeight = m_vmap_cano->rows();
cs.imgWidth = m_vmap_cano->cols();
cs.intr = m_intr;
cs.nmap_cano = *m_nmap_cano;
cs.nmap_live = *m_nmap_live;
cs.nmap_warp = *m_nmap_warp;
cs.vmap_cano = *m_vmap_cano;
cs.vmap_live = *m_vmap_live;
cs.vmap_warp = *m_vmap_warp;
cs.vmapKnn = m_vmapKnn;
cs.nNodes = m_numNodes;
cs.Tlw = m_pWarpField->get_rigidTransform();
cs.Tlw_inv = m_pWarpField->get_rigidTransform().fast_invert();
cs.psi_data = m_param->fusion_psi_data;
cs.totalEnergy = m_energy_vec.ptr();
//int zero_mem_symbol = 0;
//cudaMemcpyToSymbol(g_totalEnergy, &zero_mem_symbol, sizeof(int));
//cudaMemset(&m_tmpvec[0], 0, sizeof(float));
// 1. data term
//////////////////////////////
dim3 block(CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid(1, 1, 1);
grid.x = divUp(cs.imgWidth, block.x);
grid.y = divUp(cs.imgHeight, block.y);
calcDataTermTotalEnergyKernel << <grid, block >> >(cs);
cudaSafeCall(cudaGetLastError(), "calcDataTermTotalEnergyKernel");
}
if (m_Jr->rows() > 0)
{
RegTermJacobi rj;
rj.lambda = m_param->fusion_lambda;
rj.nNodes = m_numNodes;
rj.nBlockRows = m_Jr->blocksInRow();
rj.psi_reg = m_param->fusion_psi_reg;
rj.rows2nodeIds = m_Jr_RowMap2NodeId;
rj.rptr = m_Jr->bsrRowPtr();
rj.vptr = m_Jr->value();
rj.fptr = m_f_r.ptr();
rj.totalEnergy = m_energy_vec.ptr() + m_vmapKnn.rows()*m_vmapKnn.cols();
for (int k = 0; k < WarpField::GraphLevelNum; k++)
rj.nNodesEachLevel[k] = m_pWarpField->getNumNodesInLevel(k);
for (int k = 1; k < WarpField::GraphLevelNum; k++)
rj.nNodesEachLevel[k] += rj.nNodesEachLevel[k - 1];
rj.dw_scale_each_level = m_param->warp_param_dw_lvup_scale;
rj.dw_softness = m_param->warp_param_softness;
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_Jr->rows() / RowPerNode_RegTerm, block.x));
calcRegTermTotalEnergy_kernel << <grid, block >> >(rj);
cudaSafeCall(cudaGetLastError(), "calcRegTermTotalEnergy_kernel");
}
//cudaSafeCall(cudaMemcpy(&total_energy,
// &m_tmpvec[0], sizeof(float), cudaMemcpyDeviceToHost), "copy reg totalEnergy to host");
cublasStatus_t st = cublasSasum(m_cublasHandle, m_Jr->rows() / RowPerNode_RegTerm +
m_vmapKnn.rows()*m_vmapKnn.cols(),
m_energy_vec.ptr(), 1, &total_energy);
if (st != CUBLAS_STATUS_SUCCESS)
throw std::exception("cublass error, in cublasSnrm2");
// debug get both data and reg term energy
#if 1
reg_energy = 0.f;
if (m_Jr->rows() > 0)
{
cublasSasum(m_cublasHandle, m_Jr->rows() / RowPerNode_RegTerm,
m_energy_vec.ptr() + m_vmapKnn.rows()*m_vmapKnn.cols(),
1, ®_energy);
}
data_energy = total_energy - reg_energy;
#endif
return total_energy;
}
#pragma endregion
#pragma region --update twist
__global__ void updateTwist_inch_kernel(float* twist, const float* h, float step, int nNodes)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < nNodes)
{
int i6 = i * 6;
Tbx::Vec3 r(twist[i6] + step*h[i6], twist[i6 + 1] + step*h[i6 + 1], twist[i6 + 2] + step*h[i6 + 2]);
Tbx::Vec3 t(twist[i6+3] + step*h[i6+3], twist[i6 + 4] + step*h[i6 + 4], twist[i6 + 5] + step*h[i6 + 5]);
Tbx::Dual_quat_cu dq;
dq.from_twist(r, t);
dq.to_twist(r, t);
twist[i6] = r[0];
twist[i6 + 1] = r[1];
twist[i6 + 2] = r[2];
twist[i6 + 3] = t[0];
twist[i6 + 4] = t[1];
twist[i6 + 5] = t[2];
}
}
void GpuGaussNewtonSolver::updateTwist_inch(const float* h, float step)
{
dim3 block(CTA_SIZE);
dim3 grid(divUp(m_numNodes, block.x));
updateTwist_inch_kernel << <grid, block >> >(m_twist.ptr(), h, step, m_numNodes);
cudaSafeCall(cudaGetLastError(), "updateTwist_inch_kernel");
}
#pragma endregion
#pragma region --factor out rigid
__device__ float _g_common_q[8];
template<int CTA_SIZE_, typename T>
static __device__ __forceinline__ void reduce(volatile T* buffer)
{
int tid = Block::flattenedThreadId();
T val = buffer[tid];
if (CTA_SIZE_ >= 1024) { if (tid < 512) buffer[tid] = val = val + buffer[tid + 512]; __syncthreads(); }
if (CTA_SIZE_ >= 512) { if (tid < 256) buffer[tid] = val = val + buffer[tid + 256]; __syncthreads(); }
if (CTA_SIZE_ >= 256) { if (tid < 128) buffer[tid] = val = val + buffer[tid + 128]; __syncthreads(); }
if (CTA_SIZE_ >= 128) { if (tid < 64) buffer[tid] = val = val + buffer[tid + 64]; __syncthreads(); }
if (tid < 32){
if (CTA_SIZE_ >= 64) { buffer[tid] = val = val + buffer[tid + 32]; }
if (CTA_SIZE_ >= 32) { buffer[tid] = val = val + buffer[tid + 16]; }
if (CTA_SIZE_ >= 16) { buffer[tid] = val = val + buffer[tid + 8]; }
if (CTA_SIZE_ >= 8) { buffer[tid] = val = val + buffer[tid + 4]; }
if (CTA_SIZE_ >= 4) { buffer[tid] = val = val + buffer[tid + 2]; }
if (CTA_SIZE_ >= 2) { buffer[tid] = val = val + buffer[tid + 1]; }
}
}
__global__ void reduce_all_nodes_kernel(const float4* nodesDqVw, int n)
{
const float* beg = (const float*)nodesDqVw + blockIdx.x;
float sum = 0.f;
for (int i = threadIdx.x; i < n; i += blockDim.x)
sum += beg[i * 12]; // dq+vw, 12 float per node
__shared__ float smem[GpuGaussNewtonSolver::CTA_SIZE];
smem[threadIdx.x] = sum;
__syncthreads();
reduce<GpuGaussNewtonSolver::CTA_SIZE>(smem);
if (threadIdx.x == 0)
_g_common_q[blockIdx.x] = smem[0];
}
__global__ void factor_all_nodes_kernel(float4* nodesDqVw, int n, Tbx::Dual_quat_cu rigid_inv)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n)
return;
Tbx::Dual_quat_cu dq = rigid_inv * pack_dual_quat(nodesDqVw[3 * i], nodesDqVw[3 * i + 1]);
unpack_dual_quat(dq, nodesDqVw[3 * i], nodesDqVw[3 * i + 1]);
}
// optional, factor out common rigid transformations among all nodes
void GpuGaussNewtonSolver::factor_out_rigid()
{
if (m_pWarpField == nullptr)
throw std::exception("GpuGaussNewtonSolver::solve: null pointer");
if (m_pWarpField->getNumLevels() < 2)
throw std::exception("non-supported levels of warp field!");
if (m_pWarpField->getNumNodesInLevel(0) == 0)
{
printf("no warp nodes, return\n");
return;
}
const int num0 = m_pWarpField->getNumNodesInLevel(0);
const int numAll = m_pWarpField->getNumAllNodes();
Tbx::Dual_quat_cu dq(Tbx::Quat_cu(0,0,0,0), Tbx::Quat_cu(0,0,0,0));
cudaMemcpyToSymbol(_g_common_q, &dq, sizeof(Tbx::Dual_quat_cu));
reduce_all_nodes_kernel << <8, GpuGaussNewtonSolver::CTA_SIZE >> >(
m_pWarpField->getNodesDqVwPtr(0), num0);
cudaSafeCall(cudaGetLastError(), "reduce_all_nodes_kernel");
cudaMemcpyFromSymbol(&dq, _g_common_q, sizeof(Tbx::Dual_quat_cu));
if (dq.get_non_dual_part().norm() > Tbx::Dual_quat_cu::epsilon())
{
dq.normalize();
m_pWarpField->set_rigidTransform(
m_pWarpField->get_rigidTransform() * dq.to_transformation());
for (int lv = 0; lv < m_pWarpField->getNumLevels(); lv++)
{
int numLv = m_pWarpField->getNumNodesInLevel(lv);
if (numLv == 0)
break;
factor_all_nodes_kernel << <divUp(numLv, GpuGaussNewtonSolver::CTA_SIZE),
GpuGaussNewtonSolver::CTA_SIZE >> >(m_pWarpField->getNodesDqVwPtr(lv), numLv, dq.conjugate());
}
cudaSafeCall(cudaGetLastError(), "factor_all_nodes_kernel");
// re-extract info
m_pWarpField->extract_nodes_info_no_allocation(m_nodesKnn, m_twist, m_nodesVw);
checkNan(m_twist, numAll * 6, "twist after factoring rigid");
}
}
#pragma endregion
} |
b1d7192c5eecf83deb86e1d13502f1406a7d5418.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from ztrtri_diag.cu normal z -> s, Fri Jul 18 17:34:13 2014
@author Peng Du
@author Tingxing Dong
@author Mark Gates
File named strtri_diag.cu to avoid name conflict with src/strtri.o
in the library. The actual kernels are in strtri_lower.cu and strtri_upper.cu
*/
#include "common_magma.h"
#include "strtri.h"
/**
Inverts the NB x NB diagonal blocks.
This routine is used in strsm.
Same as strtri_diag, but adds stream argument.
@ingroup magma_sblas3
********************************************************************/
/**
Purpose
-------
strtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA REAL array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = 'U', the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = 'L', the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = 'U', the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
d_dinvA REAL array of dimension (NB, ((n+NB-1)/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
stream magma_queue_t
Stream to execute in.
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_strtri_diag_stream(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
const float *dA, magma_int_t ldda,
float *d_dinvA,
magma_queue_t stream)
{
int nblocks = (n + IB - 1)/IB;
hipMemset( d_dinvA, 0, ((n+NB-1)/NB)*NB*NB * sizeof(float) );
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
hipLaunchKernelGGL(( strtri_diag_kernel_lower), dim3(nblocks), dim3(IB), 0, stream , diag, n, dA, ldda, d_dinvA );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_sgemm16_part1_lower), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm16_part2_lower), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_sgemm32_part1_lower), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm32_part2_lower), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_sgemm64_part1_lower), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm64_part2_lower), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_sgemm_above64_part1_lower), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part2_lower), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part3_lower), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
hipLaunchKernelGGL(( strtri_diag_kernel_upper), dim3(nblocks), dim3(IB), 0, stream , diag, n, dA, ldda, d_dinvA );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb*=2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_sgemm16_upper), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_sgemm32_part1_upper), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm32_part2_upper), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_sgemm64_part1_upper), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm64_part2_upper), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_sgemm_above64_part1_upper), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part2_upper), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
hipLaunchKernelGGL(( triple_sgemm_above64_part3_upper), dim3(grid), dim3(threads), 0, stream , n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
/**
@see magmablas_strtri_diag_stream
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_strtri_diag(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
const float *dA, magma_int_t ldda,
float *d_dinvA)
{
magmablas_strtri_diag_stream( uplo, diag, n, dA, ldda, d_dinvA, magma_stream );
}
| b1d7192c5eecf83deb86e1d13502f1406a7d5418.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from ztrtri_diag.cu normal z -> s, Fri Jul 18 17:34:13 2014
@author Peng Du
@author Tingxing Dong
@author Mark Gates
File named strtri_diag.cu to avoid name conflict with src/strtri.o
in the library. The actual kernels are in strtri_lower.cu and strtri_upper.cu
*/
#include "common_magma.h"
#include "strtri.h"
/**
Inverts the NB x NB diagonal blocks.
This routine is used in strsm.
Same as strtri_diag, but adds stream argument.
@ingroup magma_sblas3
********************************************************************/
/**
Purpose
-------
strtri_diag inverts the NB x NB diagonal blocks of A.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA REAL array of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = 'U', the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = 'L', the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = 'U', the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of the array A. LDDA >= max(1,N).
@param[out]
d_dinvA REAL array of dimension (NB, ((n+NB-1)/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
stream magma_queue_t
Stream to execute in.
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_strtri_diag_stream(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
const float *dA, magma_int_t ldda,
float *d_dinvA,
magma_queue_t stream)
{
int nblocks = (n + IB - 1)/IB;
cudaMemset( d_dinvA, 0, ((n+NB-1)/NB)*NB*NB * sizeof(float) );
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
strtri_diag_kernel_lower<<< nblocks, IB, 0, stream >>>( diag, n, dA, ldda, d_dinvA );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
triple_sgemm16_part1_lower<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm16_part2_lower<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
triple_sgemm32_part1_lower<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm32_part2_lower<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
triple_sgemm64_part1_lower<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm64_part2_lower<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
default:
triple_sgemm_above64_part1_lower<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm_above64_part2_lower<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm_above64_part3_lower<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
strtri_diag_kernel_upper<<< nblocks, IB, 0, stream >>>( diag, n, dA, ldda, d_dinvA );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb*=2 ) {
int kb = jb*2;
int npages = (n + kb - 1)/kb;
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16) ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
triple_sgemm16_upper<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 32:
triple_sgemm32_part1_upper<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm32_part2_upper<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
case 64:
triple_sgemm64_part1_upper<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm64_part2_upper<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
default:
triple_sgemm_above64_part1_upper<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm_above64_part2_upper<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
triple_sgemm_above64_part3_upper<<< grid, threads, 0, stream >>>( n, dA, ldda, d_dinvA, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
/**
@see magmablas_strtri_diag_stream
@ingroup magma_sblas3
********************************************************************/
extern "C" void
magmablas_strtri_diag(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
const float *dA, magma_int_t ldda,
float *d_dinvA)
{
magmablas_strtri_diag_stream( uplo, diag, n, dA, ldda, d_dinvA, magma_stream );
}
|
ec243400dd7d32bfd34a56e59c5f31d80d389fd8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Write a first sequential implementation (matmult gpu1()) of matrix multiplication on the
// GPU that uses only a single thread. It should work for all matrix sizes. Hints:
// You need CUDA code to allocate memory on the GPU, transfer A and B to the
// GPU, transfer C back to the CPU, and free the allocated memory.
//
// Time your kernel for small matrix sizes and compare to the reference DGEMM on the CPU.
// matrix times matrix
// m represents the number of rows (the vertical length) of A and C,
// k represents the number of columns of A and the n. of rows of B,
// n represents the number of columns (the horizontal length) of B and C.
// ____k____ ____n____ ____n____
// | | | | | |
// m | A | X k | B | = m | C |
// | | | | | |
// --------- --------- ---------
__global__ void m6(int m, int n, int k, double *A, double *B, double *C) {
double sum;
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
extern __shared__ double two_blocks[];
__shared__ double* A_s;
A_s = &two_blocks[0];
__shared__ double* B_s;
B_s = &two_blocks[blockDim.x*blockDim.y];
int ii = threadIdx.y;
int jj = threadIdx.x;
const int blockdim = blockDim.x;
for (int w = 0; w < k; w += blockdim){
sum = 0.0;
A_s[ii*blockdim + jj] = A[i*k+jj+w];
B_s[ii*blockdim + jj] = B[j+ii*n+w*n];
__syncthreads();
for (int h = 0; h < blockdim; h++) {
sum += A_s[ii*blockdim + h] * B_s[h*blockdim + jj];
}
__syncthreads();
C[i*n + j] += sum;
}
}
extern "C" {
void matmult_gpu6(int m, int n, int k, double *A, double *B, double *C) {
double* d_A, * d_B, * d_C;
hipSetDevice(2);
hipMalloc((void**)&d_A, m*k * sizeof(double));
hipMalloc((void**)&d_B, k*n * sizeof(double));
hipMalloc((void**)&d_C, m*n * sizeof(double));
hipMemcpy(d_A, A, m*k * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, k*n * sizeof(double), hipMemcpyHostToDevice);
// Initialize the output matrix with zeroes.
hipMemset(d_C, 0, m*n * sizeof(double));
int bs = 16;
dim3 blockDim(bs, bs);
dim3 gridDim( (m-1)/blockDim.x+1, (n-1)/blockDim.y+1 );
// https://devblogs.nvidia.com/parallelforall/using-shared-memory-cuda-cc/
// dynamically "pass" the shared memory to the kernel function.
// Otherwise we should place some constants in the kernel function.
hipLaunchKernelGGL(( m6), dim3(gridDim), dim3(blockDim), (blockDim.x*blockDim.y * 2 * sizeof(double)), 0, m, n, k, d_A, d_B, d_C);
hipDeviceSynchronize();
hipMemcpy(C, d_C, m*n * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_A); hipFree(d_B); hipFree(d_C);
}
}
| ec243400dd7d32bfd34a56e59c5f31d80d389fd8.cu | // Write a first sequential implementation (matmult gpu1()) of matrix multiplication on the
// GPU that uses only a single thread. It should work for all matrix sizes. Hints:
// – You need CUDA code to allocate memory on the GPU, transfer A and B to the
// GPU, transfer C back to the CPU, and free the allocated memory.
//
// Time your kernel for small matrix sizes and compare to the reference DGEMM on the CPU.
// matrix times matrix
// m represents the number of rows (the vertical length) of A and C,
// k represents the number of columns of A and the n. of rows of B,
// n represents the number of columns (the horizontal length) of B and C.
// ____k____ ____n____ ____n____
// | | | | | |
// m | A | X k | B | = m | C |
// | | | | | |
// --------- --------- ---------
__global__ void m6(int m, int n, int k, double *A, double *B, double *C) {
double sum;
int i = blockIdx.y*blockDim.y+threadIdx.y;
int j = blockIdx.x*blockDim.x+threadIdx.x;
extern __shared__ double two_blocks[];
__shared__ double* A_s;
A_s = &two_blocks[0];
__shared__ double* B_s;
B_s = &two_blocks[blockDim.x*blockDim.y];
int ii = threadIdx.y;
int jj = threadIdx.x;
const int blockdim = blockDim.x;
for (int w = 0; w < k; w += blockdim){
sum = 0.0;
A_s[ii*blockdim + jj] = A[i*k+jj+w];
B_s[ii*blockdim + jj] = B[j+ii*n+w*n];
__syncthreads();
for (int h = 0; h < blockdim; h++) {
sum += A_s[ii*blockdim + h] * B_s[h*blockdim + jj];
}
__syncthreads();
C[i*n + j] += sum;
}
}
extern "C" {
void matmult_gpu6(int m, int n, int k, double *A, double *B, double *C) {
double* d_A, * d_B, * d_C;
cudaSetDevice(2);
cudaMalloc((void**)&d_A, m*k * sizeof(double));
cudaMalloc((void**)&d_B, k*n * sizeof(double));
cudaMalloc((void**)&d_C, m*n * sizeof(double));
cudaMemcpy(d_A, A, m*k * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, k*n * sizeof(double), cudaMemcpyHostToDevice);
// Initialize the output matrix with zeroes.
cudaMemset(d_C, 0, m*n * sizeof(double));
int bs = 16;
dim3 blockDim(bs, bs);
dim3 gridDim( (m-1)/blockDim.x+1, (n-1)/blockDim.y+1 );
// https://devblogs.nvidia.com/parallelforall/using-shared-memory-cuda-cc/
// dynamically "pass" the shared memory to the kernel function.
// Otherwise we should place some constants in the kernel function.
m6<<<gridDim, blockDim, (blockDim.x*blockDim.y * 2 * sizeof(double))>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(C, d_C, m*n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
}
|
f312c5365b5cf81368a80a963e56888a532041d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ParticleSystem.h"
using namespace std;
__global__ void getLocationKernel(Particle *particleList, int particleSize, float *locationState) {
int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
if (particleIdx >= particleSize) {
return;
}
particleList[particleIdx].getLocation(&locationState[3 * particleIdx]);
}
void ParticleSystem::getLocation(float *locationState) {
dim3 blocksPerGrid(16, 16);
dim3 threadsPerBlock(16, 16);
hipLaunchKernelGGL(( getLocationKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, m_particleList, m_particleSize, locationState);
}
__global__ void getWholeStateKernel(Particle *particleList, int particleSize, float *wholeState) {
int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
if (particleIdx >= particleSize) {
return;
}
particleList[particleIdx].getWholeState(&wholeState[7 * particleIdx]);
}
void ParticleSystem::getWholeState(float *wholeState) {
dim3 blocksPerGrid(16, 16);
dim3 threadsPerBlock(16, 16);
hipLaunchKernelGGL(( getWholeStateKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, m_particleList, m_particleSize, wholeState);
}
__global__ void clearForceKernel(Particle *particleList, int particleSize) {
int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
if (particleIdx >= particleSize) {
return;
}
particleList[particleIdx].clearForce();
}
void ParticleSystem::clearForce() {
dim3 blocksPerGrid(16, 16);
dim3 threadsPerBlock(16, 16);
hipLaunchKernelGGL(( clearForceKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, m_particleList, m_particleSize);
}
| f312c5365b5cf81368a80a963e56888a532041d2.cu | #include "ParticleSystem.h"
using namespace std;
__global__ void getLocationKernel(Particle *particleList, int particleSize, float *locationState) {
int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
if (particleIdx >= particleSize) {
return;
}
particleList[particleIdx].getLocation(&locationState[3 * particleIdx]);
}
void ParticleSystem::getLocation(float *locationState) {
dim3 blocksPerGrid(16, 16);
dim3 threadsPerBlock(16, 16);
getLocationKernel<<<blocksPerGrid, threadsPerBlock>>>(m_particleList, m_particleSize, locationState);
}
__global__ void getWholeStateKernel(Particle *particleList, int particleSize, float *wholeState) {
int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
if (particleIdx >= particleSize) {
return;
}
particleList[particleIdx].getWholeState(&wholeState[7 * particleIdx]);
}
void ParticleSystem::getWholeState(float *wholeState) {
dim3 blocksPerGrid(16, 16);
dim3 threadsPerBlock(16, 16);
getWholeStateKernel<<<blocksPerGrid, threadsPerBlock>>>(m_particleList, m_particleSize, wholeState);
}
__global__ void clearForceKernel(Particle *particleList, int particleSize) {
int particleIdx = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
if (particleIdx >= particleSize) {
return;
}
particleList[particleIdx].clearForce();
}
void ParticleSystem::clearForce() {
dim3 blocksPerGrid(16, 16);
dim3 threadsPerBlock(16, 16);
clearForceKernel<<<blocksPerGrid, threadsPerBlock>>>(m_particleList, m_particleSize);
}
|
3539bb55dfdd680d7ad450a33e7b270620619e82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string>
#include <cmath>
#include <iostream>
#include <cstdlib>
#include "RayTracer.cuh.cu"
#include "PolygonsManager.cuh.cu"
#include "FigureConstructor.cuh.cu"
#include "Camera.cuh.cu"
#include "Vector3.cuh.cu"
#include "Lambertian.cuh.cu"
#include "Metallic.cuh.cu"
#include "Image.cuh.cu"
#include "ImageTexture.cuh.cu"
#include "Texture.cuh.cu"
#include "DiffuseLight.cuh.cu"
#include "DummyAllocs.cuh.cu"
#include "Config.cuh.cu"
#include "aabb.cuh.cu"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
namespace
{
__global__
void InitStates(hiprandState_t *state)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
hiprand_init(42, id, 0, state + id);
}
int PrintDefaultConfig()
{
int err = system("cat ./default_config");
if (err)
{
std::cout << "You probably lost config >:(" << std::endl;
}
return err;
}
CudaMemoryLogic<hiprandState_t> states;
template<
template<typename...> class ObjectAllocator,
bool isGPU,
typename TextureResource
>
void Logic(
const RayTracing::Config &config
)
{
using namespace RayTracing;
PolygonsManager<isGPU> polygonsManager;
if (isGPU)
{
states.alloc(GRID_SIZE * BLOCK_SIZE);
hipLaunchKernelGGL(( InitStates), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, states.get());
}
// Figure A
ObjectAllocator<SolidTexture, Texture, Color> pinkTexture(Color(1, 0.07, 0.57));
ObjectAllocator<Lambertian, Material, float, float, Texture**, hiprandState_t*> pinkMaterial(
0,
1,
pinkTexture.ptr,
states.get()
);
ObjectAllocator<SolidTexture, Texture, Color> mirrorTexture(config.A.color);
ObjectAllocator<Metallic, Material, float, float, Texture**> mirrorMaterial(
config.A.transparency,
config.A.reflectance,
mirrorTexture.ptr
);
ObjectAllocator<SolidTexture, Texture, Color> edgeLightTexture(Color(2, 2, 2));
ObjectAllocator<DiffuseLight, Material, Texture**> edgeLightMaterial(edgeLightTexture.ptr);
polygonsManager.AddFigure(aabb{
config.A.origin - Vector3(1, 1, 1) * (config.A.radius+1),
config.A.origin + Vector3(1, 1, 1) * (config.A.radius+1)
});
FigureConstructor<FigureId::FancyCube, isGPU>::ConstructFigure(
polygonsManager,
{
mirrorMaterial.ptr,
pinkMaterial.ptr,
edgeLightMaterial.ptr
},
config.A.origin,
config.A.radius,
config.A.edgeLightsNum
);
// Figure B
ObjectAllocator<SolidTexture, Texture, Color> secondMirrorTexture(config.B.color);
ObjectAllocator<Metallic, Material, float, float, Texture**> secondMirrorMaterial(
config.B.transparency,
config.B.reflectance,
mirrorTexture.ptr
);
polygonsManager.AddFigure(aabb{
config.B.origin - Vector3(1, 1, 1) * config.B.radius,
config.B.origin + Vector3(1, 1, 1) * config.B.radius
});
FigureConstructor<FigureId::FancyDodecahedron, isGPU>::ConstructFigure(
polygonsManager,
{
secondMirrorMaterial.ptr,
pinkMaterial.ptr,
edgeLightMaterial.ptr
},
config.B.origin,
config.B.radius,
config.B.edgeLightsNum
);
// LightSources
std::vector<ObjectAllocator<
RayTracing::SolidTexture,
RayTracing::Texture,
RayTracing::Color
>> lightSourcesTextures;
lightSourcesTextures.reserve(config.lightSourcesNum * 10);
std::vector<ObjectAllocator<
RayTracing::DiffuseLight,
RayTracing::Material,
RayTracing::Texture**
>> lightSourcesMaterials;
lightSourcesMaterials.reserve(config.lightSourcesNum * 10);
for (int i = 0; i < config.lightSourcesNum; ++i)
{
lightSourcesTextures.emplace_back(
config.lightSources[i].color
);
lightSourcesMaterials.emplace_back(
lightSourcesTextures.back().ptr
);
polygonsManager.AddFigure(aabb{
config.lightSources[i].origin - Vector3(1, 1, 1) * config.lightSources[i].radius,
config.lightSources[i].origin + Vector3(1, 1, 1) * config.lightSources[i].radius
});
FigureConstructor<FigureId::LightSource, isGPU>::ConstructFigure(
polygonsManager,
{ lightSourcesMaterials[i].ptr },
config.lightSources[i].origin,
config.lightSources[i].radius,
0
);
}
// Floor
Image floorImage(config.floorData.texturePath);
floorImage.Init<isGPU>();
ObjectAllocator<ImageTexture<isGPU>, Texture, TextureResource, Color> floorTexture(
floorImage.GetResource<isGPU, TextureResource>(),
config.floorData.color
);
ObjectAllocator<Lambertian, Material, float, float, Texture**, hiprandState_t*> floorMaterial(
0,
config.floorData.reflectance,
floorTexture.ptr,
states.get()
);
polygonsManager.AddFigure(aabb{
Vector3(1, 1, 1) * -100,
Vector3(1, 1, 1) * 100
});
FigureConstructor<FigureId::Floor, isGPU>::ConstructFigureByPoints(
polygonsManager,
{ floorMaterial.ptr },
config.floorData.A,
config.floorData.B,
config.floorData.C,
config.floorData.D
);
polygonsManager.CompleteAdding();
RayTracer rayTracer(config, 0, config.framesNum);
rayTracer.RenderFrames(polygonsManager);
floorImage.Deinit();
polygonsManager.Deinit();
if (isGPU)
{
states.dealloc();
}
}
} // namespace
int main(int argc, char **argv)
{
try
{
if (argc == 2 && std::string(argv[1]) == "--default")
{
return PrintDefaultConfig();
}
bool useGPU = true;
if (argc == 2 && std::string(argv[1]) == "--cpu")
useGPU = false;
RayTracing::Config config;
std::cin >> config;
if (useGPU)
Logic<RayTracing::CudaHeapObject, true, hipTextureObject_t>(config);
else
Logic<RayTracing::HeapObject, false, RayTracing::Image>(config);
}
catch (std::runtime_error &err)
{
std::cout << err.what() << std::endl;
}
return 0;
}
| 3539bb55dfdd680d7ad450a33e7b270620619e82.cu | #include <string>
#include <cmath>
#include <iostream>
#include <cstdlib>
#include "RayTracer.cuh.cu"
#include "PolygonsManager.cuh.cu"
#include "FigureConstructor.cuh.cu"
#include "Camera.cuh.cu"
#include "Vector3.cuh.cu"
#include "Lambertian.cuh.cu"
#include "Metallic.cuh.cu"
#include "Image.cuh.cu"
#include "ImageTexture.cuh.cu"
#include "Texture.cuh.cu"
#include "DiffuseLight.cuh.cu"
#include "DummyAllocs.cuh.cu"
#include "Config.cuh.cu"
#include "aabb.cuh.cu"
#include <curand.h>
#include <curand_kernel.h>
namespace
{
__global__
void InitStates(curandState *state)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(42, id, 0, state + id);
}
int PrintDefaultConfig()
{
int err = system("cat ./default_config");
if (err)
{
std::cout << "You probably lost config >:(" << std::endl;
}
return err;
}
CudaMemoryLogic<curandState> states;
template<
template<typename...> class ObjectAllocator,
bool isGPU,
typename TextureResource
>
void Logic(
const RayTracing::Config &config
)
{
using namespace RayTracing;
PolygonsManager<isGPU> polygonsManager;
if (isGPU)
{
states.alloc(GRID_SIZE * BLOCK_SIZE);
InitStates<<<GRID_SIZE, BLOCK_SIZE>>>(states.get());
}
// Figure A
ObjectAllocator<SolidTexture, Texture, Color> pinkTexture(Color(1, 0.07, 0.57));
ObjectAllocator<Lambertian, Material, float, float, Texture**, curandState*> pinkMaterial(
0,
1,
pinkTexture.ptr,
states.get()
);
ObjectAllocator<SolidTexture, Texture, Color> mirrorTexture(config.A.color);
ObjectAllocator<Metallic, Material, float, float, Texture**> mirrorMaterial(
config.A.transparency,
config.A.reflectance,
mirrorTexture.ptr
);
ObjectAllocator<SolidTexture, Texture, Color> edgeLightTexture(Color(2, 2, 2));
ObjectAllocator<DiffuseLight, Material, Texture**> edgeLightMaterial(edgeLightTexture.ptr);
polygonsManager.AddFigure(aabb{
config.A.origin - Vector3(1, 1, 1) * (config.A.radius+1),
config.A.origin + Vector3(1, 1, 1) * (config.A.radius+1)
});
FigureConstructor<FigureId::FancyCube, isGPU>::ConstructFigure(
polygonsManager,
{
mirrorMaterial.ptr,
pinkMaterial.ptr,
edgeLightMaterial.ptr
},
config.A.origin,
config.A.radius,
config.A.edgeLightsNum
);
// Figure B
ObjectAllocator<SolidTexture, Texture, Color> secondMirrorTexture(config.B.color);
ObjectAllocator<Metallic, Material, float, float, Texture**> secondMirrorMaterial(
config.B.transparency,
config.B.reflectance,
mirrorTexture.ptr
);
polygonsManager.AddFigure(aabb{
config.B.origin - Vector3(1, 1, 1) * config.B.radius,
config.B.origin + Vector3(1, 1, 1) * config.B.radius
});
FigureConstructor<FigureId::FancyDodecahedron, isGPU>::ConstructFigure(
polygonsManager,
{
secondMirrorMaterial.ptr,
pinkMaterial.ptr,
edgeLightMaterial.ptr
},
config.B.origin,
config.B.radius,
config.B.edgeLightsNum
);
// LightSources
std::vector<ObjectAllocator<
RayTracing::SolidTexture,
RayTracing::Texture,
RayTracing::Color
>> lightSourcesTextures;
lightSourcesTextures.reserve(config.lightSourcesNum * 10);
std::vector<ObjectAllocator<
RayTracing::DiffuseLight,
RayTracing::Material,
RayTracing::Texture**
>> lightSourcesMaterials;
lightSourcesMaterials.reserve(config.lightSourcesNum * 10);
for (int i = 0; i < config.lightSourcesNum; ++i)
{
lightSourcesTextures.emplace_back(
config.lightSources[i].color
);
lightSourcesMaterials.emplace_back(
lightSourcesTextures.back().ptr
);
polygonsManager.AddFigure(aabb{
config.lightSources[i].origin - Vector3(1, 1, 1) * config.lightSources[i].radius,
config.lightSources[i].origin + Vector3(1, 1, 1) * config.lightSources[i].radius
});
FigureConstructor<FigureId::LightSource, isGPU>::ConstructFigure(
polygonsManager,
{ lightSourcesMaterials[i].ptr },
config.lightSources[i].origin,
config.lightSources[i].radius,
0
);
}
// Floor
Image floorImage(config.floorData.texturePath);
floorImage.Init<isGPU>();
ObjectAllocator<ImageTexture<isGPU>, Texture, TextureResource, Color> floorTexture(
floorImage.GetResource<isGPU, TextureResource>(),
config.floorData.color
);
ObjectAllocator<Lambertian, Material, float, float, Texture**, curandState*> floorMaterial(
0,
config.floorData.reflectance,
floorTexture.ptr,
states.get()
);
polygonsManager.AddFigure(aabb{
Vector3(1, 1, 1) * -100,
Vector3(1, 1, 1) * 100
});
FigureConstructor<FigureId::Floor, isGPU>::ConstructFigureByPoints(
polygonsManager,
{ floorMaterial.ptr },
config.floorData.A,
config.floorData.B,
config.floorData.C,
config.floorData.D
);
polygonsManager.CompleteAdding();
RayTracer rayTracer(config, 0, config.framesNum);
rayTracer.RenderFrames(polygonsManager);
floorImage.Deinit();
polygonsManager.Deinit();
if (isGPU)
{
states.dealloc();
}
}
} // namespace
int main(int argc, char **argv)
{
try
{
if (argc == 2 && std::string(argv[1]) == "--default")
{
return PrintDefaultConfig();
}
bool useGPU = true;
if (argc == 2 && std::string(argv[1]) == "--cpu")
useGPU = false;
RayTracing::Config config;
std::cin >> config;
if (useGPU)
Logic<RayTracing::CudaHeapObject, true, cudaTextureObject_t>(config);
else
Logic<RayTracing::HeapObject, false, RayTracing::Image>(config);
}
catch (std::runtime_error &err)
{
std::cout << err.what() << std::endl;
}
return 0;
}
|
a8c6efc07e892fe259c6db6dc8fcd681c83c9795.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@precisions mixed zc -> ds
*/
#include "common_magma.h"
#include "../include/magmasparse_z.h"
#include "../include/magmasparse_zc.h"
#include "../../include/magma.h"
#include "../include/mmio.h"
#include "common_magma.h"
#define PRECISION_z
#define BLOCKSIZE 512
#define min(a, b) ((a) < (b) ? (a) : (b))
// TODO get rid of global variable!
__device__ int flag = 0;
__global__ void
magmaint_zlag2c_sparse( int M, int N,
const magmaDoubleComplex *A,
magmaFloatComplex *SA ){
int thread_id = blockDim.x * blockIdx.x + threadIdx.x ;
// global thread index
if( thread_id < M ){
for( int i=0; i<N; i++ ){
SA[i*M+thread_id] = cuComplexDoubleToFloat( A[i*M+thread_id] );
}
}
}
/**
Purpose
-------
ZLAG2C converts a COMPLEX_16 matrix A to a COMPLEX
matrix SA.
RMAX is the overflow for the COMPLEX arithmetic.
ZLAG2C checks that all the entries of A are between -RMAX and
RMAX. If not the convertion is aborted and a flag is raised.
Arguments
---------
@param[in]
M INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
N INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
A COMPLEX_16 array, dimension (LDA,N)
On entry, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
SA COMPLEX array, dimension (LDSA,N)
On exit, if INFO=0, the M-by-N coefficient matrix SA; if
INFO>0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the COMPLEX
overflow threshold, in this case, the content
of SA in exit is unspecified.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" void
magmablas_zlag2c_sparse( magma_int_t M, magma_int_t N,
const magmaDoubleComplex *A, magma_int_t lda,
magmaFloatComplex *SA, magma_int_t ldsa,
magma_int_t *info )
{
/*
(TODO note from original dense source)
Note
----
- We have to provide INFO at the end that zlag2c isn't doable now.
- Transfer a single value TO/FROM CPU/GPU
- SLAMCH that's needed is called from underlying BLAS
- Only used in iterative refinement
- Do we want to provide this in the release?
*/
*info = 0;
if ( M < 0 )
*info = -1;
else if ( N < 0 )
*info = -2;
else if ( lda < max(1,M) )
*info = -4;
else if ( ldsa < max(1,M) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
//return *info;
}
dim3 grid( (M+BLOCKSIZE-1)/BLOCKSIZE, 1, 1);
hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
hipLaunchKernelGGL(( magmaint_zlag2c_sparse), dim3(grid), dim3(BLOCKSIZE), 0, magma_stream ,
M, N, A, SA ) ;
hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
__global__ void
magma_zlag2c_CSR_DENSE_kernel( int num_rows, int num_cols,
magmaDoubleComplex *Aval, magma_index_t *Arow,
magma_index_t *Acol, magmaFloatComplex *Bval ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
for( j=0; j<num_cols; j++)
Bval[ j ] = MAGMA_C_MAKE(0.0, 0.0);
int start = Arow[ row ];
int end = Arow[ row+1 ];
for( j=start; j<end; j++ )
Bval[ row*num_rows+Acol[j] ] = cuComplexDoubleToFloat( Aval[ j] );
}
}
__global__ void
magma_zlag2c_CSR_DENSE_kernel_1( int num_rows, int num_cols,
magmaFloatComplex *Bval ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
for( j=0; j<num_cols; j++)
Bval[ j ] = MAGMA_C_MAKE(0.0, 0.0);
}
}
__global__ void
magma_zlag2c_CSR_DENSE_kernel_2( int num_rows, int num_cols,
magmaDoubleComplex *Aval, magma_index_t *Arow,
magma_index_t *Acol, magmaFloatComplex *Bval ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
int start = Arow[ row ];
int end = Arow[ row+1 ];
for( j=start; j<end; j++ )
Bval[ row*num_rows+Acol[j] ] = cuComplexDoubleToFloat( Aval[ j] );
}
}
extern "C" void
magma_zlag2c_CSR_DENSE( magma_z_sparse_matrix A,
magma_c_sparse_matrix *B ){
magma_int_t stat;
if( A.memory_location == Magma_DEV && A.storage_type == Magma_CSR){
B->storage_type = Magma_DENSE;
B->memory_location = A.memory_location;
B->num_rows = A.num_rows;
B->num_cols = A.num_cols;
B->nnz = A.nnz;
stat = magma_cmalloc( &B->val, A.num_rows* A.num_cols );
if( stat != 0 )
{printf("Memory Allocation Error converting matrix\n"); exit(0); }
dim3 Bs( BLOCKSIZE );
dim3 Gs( (A.num_rows+BLOCKSIZE-1)/BLOCKSIZE );
hipLaunchKernelGGL(( magma_zlag2c_CSR_DENSE_kernel), dim3(Bs), dim3(Gs), 0, magma_stream ,
A.num_rows, A.num_cols, A.val, A.row, A.col, B->val );
}
}
extern "C" void
magma_zlag2c_CSR_DENSE_alloc( magma_z_sparse_matrix A,
magma_c_sparse_matrix *B ){
magma_int_t stat;
if( A.memory_location == Magma_DEV && A.storage_type == Magma_CSR){
B->storage_type = Magma_DENSE;
B->memory_location = A.memory_location;
B->num_rows = A.num_rows;
B->num_cols = A.num_cols;
B->nnz = A.nnz;
stat = magma_cmalloc( &B->val, A.num_rows* A.num_cols );
if( stat != 0 )
{printf("Memory Allocation Error converting matrix\n"); exit(0); }
dim3 Bs( BLOCKSIZE );
dim3 Gs( (A.num_rows+BLOCKSIZE-1)/BLOCKSIZE );
hipLaunchKernelGGL(( magma_zlag2c_CSR_DENSE_kernel_1), dim3(Bs), dim3(Gs), 0, magma_stream ,
A.num_rows, A.num_cols, B->val );
}
}
extern "C" void
magma_zlag2c_CSR_DENSE_convert( magma_z_sparse_matrix A,
magma_c_sparse_matrix *B ){
if( B->memory_location == Magma_DEV && B->storage_type == Magma_DENSE){
dim3 Bs( BLOCKSIZE );
dim3 Gs( (A.num_rows+BLOCKSIZE-1)/BLOCKSIZE );
hipLaunchKernelGGL(( magma_zlag2c_CSR_DENSE_kernel_2), dim3(Bs), dim3(Gs), 0, magma_stream ,
A.num_rows, A.num_cols, A.val, A.row, A.col, B->val );
}
}
| a8c6efc07e892fe259c6db6dc8fcd681c83c9795.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@precisions mixed zc -> ds
*/
#include "common_magma.h"
#include "../include/magmasparse_z.h"
#include "../include/magmasparse_zc.h"
#include "../../include/magma.h"
#include "../include/mmio.h"
#include "common_magma.h"
#define PRECISION_z
#define BLOCKSIZE 512
#define min(a, b) ((a) < (b) ? (a) : (b))
// TODO get rid of global variable!
__device__ int flag = 0;
__global__ void
magmaint_zlag2c_sparse( int M, int N,
const magmaDoubleComplex *A,
magmaFloatComplex *SA ){
int thread_id = blockDim.x * blockIdx.x + threadIdx.x ;
// global thread index
if( thread_id < M ){
for( int i=0; i<N; i++ ){
SA[i*M+thread_id] = cuComplexDoubleToFloat( A[i*M+thread_id] );
}
}
}
/**
Purpose
-------
ZLAG2C converts a COMPLEX_16 matrix A to a COMPLEX
matrix SA.
RMAX is the overflow for the COMPLEX arithmetic.
ZLAG2C checks that all the entries of A are between -RMAX and
RMAX. If not the convertion is aborted and a flag is raised.
Arguments
---------
@param[in]
M INTEGER
The number of lines of the matrix A. M >= 0.
@param[in]
N INTEGER
The number of columns of the matrix A. N >= 0.
@param[in]
A COMPLEX_16 array, dimension (LDA,N)
On entry, the M-by-N coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,M).
@param[out]
SA COMPLEX array, dimension (LDSA,N)
On exit, if INFO=0, the M-by-N coefficient matrix SA; if
INFO>0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,M).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the COMPLEX
overflow threshold, in this case, the content
of SA in exit is unspecified.
@ingroup magmasparse_zaux
********************************************************************/
extern "C" void
magmablas_zlag2c_sparse( magma_int_t M, magma_int_t N,
const magmaDoubleComplex *A, magma_int_t lda,
magmaFloatComplex *SA, magma_int_t ldsa,
magma_int_t *info )
{
/*
(TODO note from original dense source)
Note
----
- We have to provide INFO at the end that zlag2c isn't doable now.
- Transfer a single value TO/FROM CPU/GPU
- SLAMCH that's needed is called from underlying BLAS
- Only used in iterative refinement
- Do we want to provide this in the release?
*/
*info = 0;
if ( M < 0 )
*info = -1;
else if ( N < 0 )
*info = -2;
else if ( lda < max(1,M) )
*info = -4;
else if ( ldsa < max(1,M) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
//return *info;
}
dim3 grid( (M+BLOCKSIZE-1)/BLOCKSIZE, 1, 1);
cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
magmaint_zlag2c_sparse<<< grid, BLOCKSIZE, 0, magma_stream >>>
( M, N, A, SA ) ;
cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
__global__ void
magma_zlag2c_CSR_DENSE_kernel( int num_rows, int num_cols,
magmaDoubleComplex *Aval, magma_index_t *Arow,
magma_index_t *Acol, magmaFloatComplex *Bval ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
for( j=0; j<num_cols; j++)
Bval[ j ] = MAGMA_C_MAKE(0.0, 0.0);
int start = Arow[ row ];
int end = Arow[ row+1 ];
for( j=start; j<end; j++ )
Bval[ row*num_rows+Acol[j] ] = cuComplexDoubleToFloat( Aval[ j] );
}
}
__global__ void
magma_zlag2c_CSR_DENSE_kernel_1( int num_rows, int num_cols,
magmaFloatComplex *Bval ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
for( j=0; j<num_cols; j++)
Bval[ j ] = MAGMA_C_MAKE(0.0, 0.0);
}
}
__global__ void
magma_zlag2c_CSR_DENSE_kernel_2( int num_rows, int num_cols,
magmaDoubleComplex *Aval, magma_index_t *Arow,
magma_index_t *Acol, magmaFloatComplex *Bval ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
int start = Arow[ row ];
int end = Arow[ row+1 ];
for( j=start; j<end; j++ )
Bval[ row*num_rows+Acol[j] ] = cuComplexDoubleToFloat( Aval[ j] );
}
}
extern "C" void
magma_zlag2c_CSR_DENSE( magma_z_sparse_matrix A,
magma_c_sparse_matrix *B ){
magma_int_t stat;
if( A.memory_location == Magma_DEV && A.storage_type == Magma_CSR){
B->storage_type = Magma_DENSE;
B->memory_location = A.memory_location;
B->num_rows = A.num_rows;
B->num_cols = A.num_cols;
B->nnz = A.nnz;
stat = magma_cmalloc( &B->val, A.num_rows* A.num_cols );
if( stat != 0 )
{printf("Memory Allocation Error converting matrix\n"); exit(0); }
dim3 Bs( BLOCKSIZE );
dim3 Gs( (A.num_rows+BLOCKSIZE-1)/BLOCKSIZE );
magma_zlag2c_CSR_DENSE_kernel<<< Bs, Gs, 0, magma_stream >>>
( A.num_rows, A.num_cols, A.val, A.row, A.col, B->val );
}
}
extern "C" void
magma_zlag2c_CSR_DENSE_alloc( magma_z_sparse_matrix A,
magma_c_sparse_matrix *B ){
magma_int_t stat;
if( A.memory_location == Magma_DEV && A.storage_type == Magma_CSR){
B->storage_type = Magma_DENSE;
B->memory_location = A.memory_location;
B->num_rows = A.num_rows;
B->num_cols = A.num_cols;
B->nnz = A.nnz;
stat = magma_cmalloc( &B->val, A.num_rows* A.num_cols );
if( stat != 0 )
{printf("Memory Allocation Error converting matrix\n"); exit(0); }
dim3 Bs( BLOCKSIZE );
dim3 Gs( (A.num_rows+BLOCKSIZE-1)/BLOCKSIZE );
magma_zlag2c_CSR_DENSE_kernel_1<<< Bs, Gs, 0, magma_stream >>>
( A.num_rows, A.num_cols, B->val );
}
}
extern "C" void
magma_zlag2c_CSR_DENSE_convert( magma_z_sparse_matrix A,
magma_c_sparse_matrix *B ){
if( B->memory_location == Magma_DEV && B->storage_type == Magma_DENSE){
dim3 Bs( BLOCKSIZE );
dim3 Gs( (A.num_rows+BLOCKSIZE-1)/BLOCKSIZE );
magma_zlag2c_CSR_DENSE_kernel_2<<< Bs, Gs, 0, magma_stream >>>
( A.num_rows, A.num_cols, A.val, A.row, A.col, B->val );
}
}
|
630136378eb64417033eaad9e8d1e875f08e2a8e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "libvideo.h"
#define SEUIL 50
//#define REPEAT_BLUR 200
#define RED 0
#define GREEN 1
#define BLUE 2
__global__ void kernel_blur(char * frame_in, char * frame_out, int height, int width, int n)
{
//for(int k=0 ; k<n ; k++)
{
for(int y=blockIdx.x ; y<height ; y+=gridDim.x)
for(int x=threadIdx.x ; x<width ; x+=3*blockDim.x)
{
if(x==0 || x==height-1 || y==0 || y==(width-1)*3)
frame_out[x*width*3+y]=frame_in[x*width*3+y];
else
{
frame_out[3*x*width+y+RED]=(frame_in[x*width*3+y+RED]
+ frame_in[x*width*3+(y+1)+RED]
+ frame_in[x*width*3+(y-1)+RED]
+ frame_in[(x+1)*width*3+y+RED]
+ frame_in[(x-1)*width*3+y+RED]
)/5;
frame_out[3*x*width+y+GREEN]=(frame_in[x*width*3+y+GREEN]
+ frame_in[x*width*3+(y+1)+GREEN]
+ frame_in[x*width*3+(y-1)+GREEN]
+ frame_in[(x+1)*width*3+y+GREEN]
+ frame_in[(x-1)*width*3+y+GREEN]
)/5;
frame_out[3*x*width+y+BLUE]=(frame_in[x*width*3+y+BLUE]
+ frame_in[x*width*3+(y+1)+BLUE]
+ frame_in[x*width*3+(y-1)+BLUE]
+ frame_in[(x+1)*width*3+y+BLUE]
+ frame_in[(x-1)*width*3+y+BLUE]
)/5;
}
}
char* tmp=frame_in;
frame_in=frame_out;
frame_out=tmp;
}
}
int main (int argc, char * argv[])
{
int flous=500;
int cpt_frame;
int frame_count;
int width, height;
printf("Opening videos - read and write\n"); fflush(stdout);
OpenReadAndWriteVideo("./Wildlife.wmv", "./Wildlife_flou.wmv");
printf("----------------------------------------\n");
frame_count = getFrameCount();
width = getWidth();
height = getHeight();
printf("Frame count = %d\n", frame_count); fflush(stdout);
printf("Width of frames: %d\n", width); fflush(stdout);
printf("Height of frames: %d\n", height); fflush(stdout);
// char * frames = (char *) malloc( sizeof(char) * frame_count * width * height * 3);
char * frame1 = (char *) malloc( sizeof(char) * width * height * 3);
/******************************/
/**** TP3 - QUESTION 4 ****/
/******************************/
char * cuda_frame_in, * cuda_frame_out;
hipMalloc((void **)&cuda_frame_in, sizeof(char) * width * height * 3);
hipMalloc((void **)&cuda_frame_out, sizeof(char) * width * height * 3);
for(cpt_frame = 190; cpt_frame < 500 && cpt_frame < frame_count; cpt_frame ++)
{
printf("%d - Read frame with index\n", cpt_frame); fflush(stdout);
readFrame_with_index(frame1, cpt_frame);
if(cpt_frame > 200 && cpt_frame < 300)
{
printf("%d - BLUR\n", cpt_frame); fflush(stdout);
hipMemcpy(cuda_frame_in, frame1, sizeof(char) * width * height * 3, hipMemcpyHostToDevice);
dim3 mygrid;
mygrid.x = 1;
dim3 myblock;
myblock.x = 1;
hipLaunchKernelGGL(( kernel_blur), dim3(mygrid), dim3(myblock), 0, 0, cuda_frame_in, cuda_frame_out, height, width, flous);
hipMemcpy(frame1, /*flous%2?cuda_frame_in:*/cuda_frame_out, sizeof(char) * width * height * 3, hipMemcpyDeviceToHost);
}
writeFrame (frame1);
}
printf("ECRITURE VIDEO FINIE\n");
hipFree(cuda_frame_in);
hipFree(cuda_frame_out);
/******************************/
/**** TP3 - FIN QUESTION 4 ****/
/******************************/
free(frame1);
return 0;
}
| 630136378eb64417033eaad9e8d1e875f08e2a8e.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include "libvideo.h"
#define SEUIL 50
//#define REPEAT_BLUR 200
#define RED 0
#define GREEN 1
#define BLUE 2
__global__ void kernel_blur(char * frame_in, char * frame_out, int height, int width, int n)
{
//for(int k=0 ; k<n ; k++)
{
for(int y=blockIdx.x ; y<height ; y+=gridDim.x)
for(int x=threadIdx.x ; x<width ; x+=3*blockDim.x)
{
if(x==0 || x==height-1 || y==0 || y==(width-1)*3)
frame_out[x*width*3+y]=frame_in[x*width*3+y];
else
{
frame_out[3*x*width+y+RED]=(frame_in[x*width*3+y+RED]
+ frame_in[x*width*3+(y+1)+RED]
+ frame_in[x*width*3+(y-1)+RED]
+ frame_in[(x+1)*width*3+y+RED]
+ frame_in[(x-1)*width*3+y+RED]
)/5;
frame_out[3*x*width+y+GREEN]=(frame_in[x*width*3+y+GREEN]
+ frame_in[x*width*3+(y+1)+GREEN]
+ frame_in[x*width*3+(y-1)+GREEN]
+ frame_in[(x+1)*width*3+y+GREEN]
+ frame_in[(x-1)*width*3+y+GREEN]
)/5;
frame_out[3*x*width+y+BLUE]=(frame_in[x*width*3+y+BLUE]
+ frame_in[x*width*3+(y+1)+BLUE]
+ frame_in[x*width*3+(y-1)+BLUE]
+ frame_in[(x+1)*width*3+y+BLUE]
+ frame_in[(x-1)*width*3+y+BLUE]
)/5;
}
}
char* tmp=frame_in;
frame_in=frame_out;
frame_out=tmp;
}
}
int main (int argc, char * argv[])
{
int flous=500;
int cpt_frame;
int frame_count;
int width, height;
printf("Opening videos - read and write\n"); fflush(stdout);
OpenReadAndWriteVideo("./Wildlife.wmv", "./Wildlife_flou.wmv");
printf("----------------------------------------\n");
frame_count = getFrameCount();
width = getWidth();
height = getHeight();
printf("Frame count = %d\n", frame_count); fflush(stdout);
printf("Width of frames: %d\n", width); fflush(stdout);
printf("Height of frames: %d\n", height); fflush(stdout);
// char * frames = (char *) malloc( sizeof(char) * frame_count * width * height * 3);
char * frame1 = (char *) malloc( sizeof(char) * width * height * 3);
/******************************/
/**** TP3 - QUESTION 4 ****/
/******************************/
char * cuda_frame_in, * cuda_frame_out;
cudaMalloc((void **)&cuda_frame_in, sizeof(char) * width * height * 3);
cudaMalloc((void **)&cuda_frame_out, sizeof(char) * width * height * 3);
for(cpt_frame = 190; cpt_frame < 500 && cpt_frame < frame_count; cpt_frame ++)
{
printf("%d - Read frame with index\n", cpt_frame); fflush(stdout);
readFrame_with_index(frame1, cpt_frame);
if(cpt_frame > 200 && cpt_frame < 300)
{
printf("%d - BLUR\n", cpt_frame); fflush(stdout);
cudaMemcpy(cuda_frame_in, frame1, sizeof(char) * width * height * 3, cudaMemcpyHostToDevice);
dim3 mygrid;
mygrid.x = 1;
dim3 myblock;
myblock.x = 1;
kernel_blur<<<mygrid, myblock>>>(cuda_frame_in, cuda_frame_out, height, width, flous);
cudaMemcpy(frame1, /*flous%2?cuda_frame_in:*/cuda_frame_out, sizeof(char) * width * height * 3, cudaMemcpyDeviceToHost);
}
writeFrame (frame1);
}
printf("ECRITURE VIDEO FINIE\n");
cudaFree(cuda_frame_in);
cudaFree(cuda_frame_out);
/******************************/
/**** TP3 - FIN QUESTION 4 ****/
/******************************/
free(frame1);
return 0;
}
|
bff4d85a73a2ba43e32c1ac0356af66aa8d10494.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "iterator.h"
#include <stdio.h>
#include <omp.h>
__global__ void jacobiIteration(double *u, double *uold, double *f, int N) {
int i, j;
for (i = 1; i < N + 1; ++i) {
for (j = 1; j < N + 1; ++j) {
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold[(i-1)*(N+2)+j] + uold[(i+1)*(N+2)+j] + f[(i-1)*(N)+j-1]);
}
}
}
__global__ void jacobiIteration_per_elem(double *u, double *uold, double *f, int N) {
int i = blockDim.y * blockIdx.y + threadIdx.y + 1;
int j = blockDim.x * blockIdx.x + threadIdx.x + 1;
if (i*(N+2)+j < (N + 2) * (N + 2)){
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold[(i-1)*(N+2)+j] + uold[(i+1)*(N+2)+j] + f[(i-1)*(N)+j-1]);
}
}
__global__ void jacobiIteration_per_elem_2(double *u, double *uold, double *uold_2, double *f, int N, int device) {
int i,j;
if (device == 0){
j = blockDim.x * blockIdx.x + threadIdx.x + 1;
i = blockDim.y * blockIdx.y + threadIdx.y + 1;
if (i == N / 2){
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold[(i-1)*(N+2)+j] + uold_2[j] + f[(i-1)*(N)+j-1]);
} else {
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold[(i-1)*(N+2)+j] + uold[(i+1)*(N+2)+j] + f[(i-1)*(N)+j-1]);
}
} else if (device == 1) {
j = blockDim.x * blockIdx.x + threadIdx.x + 1;
i = blockDim.y * blockIdx.y + threadIdx.y;
if (i == 0){
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold_2[(N/2)*(N+2)+j] + uold[(i+1)*(N+2)+j] + f[(i-1+N/2)*(N)+j-1]);
} else {
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold[(i-1)*(N+2)+j] + uold[(i+1)*(N+2)+j] + f[(i-1+N/2)*(N)+j-1]);
}
}
}
| bff4d85a73a2ba43e32c1ac0356af66aa8d10494.cu | #include <math.h>
#include "iterator.h"
#include <stdio.h>
#include <omp.h>
__global__ void jacobiIteration(double *u, double *uold, double *f, int N) {
int i, j;
for (i = 1; i < N + 1; ++i) {
for (j = 1; j < N + 1; ++j) {
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold[(i-1)*(N+2)+j] + uold[(i+1)*(N+2)+j] + f[(i-1)*(N)+j-1]);
}
}
}
__global__ void jacobiIteration_per_elem(double *u, double *uold, double *f, int N) {
int i = blockDim.y * blockIdx.y + threadIdx.y + 1;
int j = blockDim.x * blockIdx.x + threadIdx.x + 1;
if (i*(N+2)+j < (N + 2) * (N + 2)){
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold[(i-1)*(N+2)+j] + uold[(i+1)*(N+2)+j] + f[(i-1)*(N)+j-1]);
}
}
__global__ void jacobiIteration_per_elem_2(double *u, double *uold, double *uold_2, double *f, int N, int device) {
int i,j;
if (device == 0){
j = blockDim.x * blockIdx.x + threadIdx.x + 1;
i = blockDim.y * blockIdx.y + threadIdx.y + 1;
if (i == N / 2){
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold[(i-1)*(N+2)+j] + uold_2[j] + f[(i-1)*(N)+j-1]);
} else {
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold[(i-1)*(N+2)+j] + uold[(i+1)*(N+2)+j] + f[(i-1)*(N)+j-1]);
}
} else if (device == 1) {
j = blockDim.x * blockIdx.x + threadIdx.x + 1;
i = blockDim.y * blockIdx.y + threadIdx.y;
if (i == 0){
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold_2[(N/2)*(N+2)+j] + uold[(i+1)*(N+2)+j] + f[(i-1+N/2)*(N)+j-1]);
} else {
u[i*(N+2)+j] = 0.25 * (uold[i*(N+2)+j-1] + uold[i*(N+2)+j+1] + uold[(i-1)*(N+2)+j] + uold[(i+1)*(N+2)+j] + f[(i-1+N/2)*(N)+j-1]);
}
}
}
|
8e5e337d0cdb0360c98b47cd2bb1cfafbf164a54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// pour compiler : nvcc vecAdd.cu -o vecAdd
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(float *a, float *b, float *c, int n){
// identifiant global du thread dans la grille 1D
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// on s'assure de ne pas sortir des limites des tableaux a,b,c
if (tid < n){
//on effectue une addition lmentaire par thread
c[tid] = a[tid] + b[tid];
}
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100000;
// Host input vectors
float *h_a;
float *h_b;
//Host output vector
float *h_c;
// Device input vectors
float *d_a;
float *d_b;
//Device output vector
float *d_c;
// Size, in bytes, of each vector
size_t size = n*sizeof(float);
//////////////////////////////////////////
// Allocate memory for each vector on host
h_a = (float*) malloc (size);
h_b = (float*) malloc (size);
h_c = (float*) malloc (size);
/////////////////////////////////////////
// Allocate memory for each vector on GPU
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, size);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
/////////////////////////////////////////
// Copy host vectors to device
hipMemcpy(d_a, h_a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, size, hipMemcpyHostToDevice);
int blockSize, gridSize;
/////////////////////////////////////////
// Number of threads in each thread block
blockSize = 512;
////////////////////////////////////////
// Number of thread blocks in grid
gridSize = (n + blockSize - 1) / blockSize;
///////////////////////////////////////
// Launch the kernel
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
///////////////////////////////////////
// Copy array back to host
hipMemcpy(h_c, d_c, size, hipMemcpyDeviceToHost);
// Sum up vector c and print result divided by n, this should equal 1 within error
float sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
/////////////////////////////////////////
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
////////////////////////////////////////
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| 8e5e337d0cdb0360c98b47cd2bb1cfafbf164a54.cu | // pour compiler : nvcc vecAdd.cu -o vecAdd
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(float *a, float *b, float *c, int n){
// identifiant global du thread dans la grille 1D
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// on s'assure de ne pas sortir des limites des tableaux a,b,c
if (tid < n){
//on effectue une addition élémentaire par thread
c[tid] = a[tid] + b[tid];
}
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100000;
// Host input vectors
float *h_a;
float *h_b;
//Host output vector
float *h_c;
// Device input vectors
float *d_a;
float *d_b;
//Device output vector
float *d_c;
// Size, in bytes, of each vector
size_t size = n*sizeof(float);
//////////////////////////////////////////
// Allocate memory for each vector on host
h_a = (float*) malloc (size);
h_b = (float*) malloc (size);
h_c = (float*) malloc (size);
/////////////////////////////////////////
// Allocate memory for each vector on GPU
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i)*sin(i);
h_b[i] = cos(i)*cos(i);
}
/////////////////////////////////////////
// Copy host vectors to device
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
int blockSize, gridSize;
/////////////////////////////////////////
// Number of threads in each thread block
blockSize = 512;
////////////////////////////////////////
// Number of thread blocks in grid
gridSize = (n + blockSize - 1) / blockSize;
///////////////////////////////////////
// Launch the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
///////////////////////////////////////
// Copy array back to host
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
// Sum up vector c and print result divided by n, this should equal 1 within error
float sum = 0;
for(i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f\n", sum/n);
/////////////////////////////////////////
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
////////////////////////////////////////
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
da35dad53372fb1a8ca4305d8aaea213f8be70e0.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cmath>
#include <stdlib.h>
#include <thrust/version.h>
#include <thrust/scan.h>
#include <thrust/functional.h>
#include <thrust/sequence.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/gather.h>
#include <thrust/logical.h>
#include <thrust/host_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <thrust/system/hip/execution_policy.h>
const int ARRAY_SIZE = 1000;
enum Method {
RAW,
WRAPPED
};
bool reduce_test(Method method)
{
double* hA;
hA = (double *) malloc(ARRAY_SIZE * sizeof(double));
for (int i = 0; i < ARRAY_SIZE; i++)
hA[i] = 1.0 * (i + 1);
double* dA;
hipMalloc((void **) &dA, ARRAY_SIZE * sizeof(double));
hipMemcpy(dA, hA, sizeof(double) * ARRAY_SIZE, hipMemcpyHostToDevice);
double maximum;
switch (method) {
case RAW:
{
maximum = thrust::reduce(thrust::hip::par, dA, dA + ARRAY_SIZE, 0.0, thrust::maximum<double>());
break;
}
case WRAPPED:
{
thrust::device_ptr<double> wdA = thrust::device_pointer_cast(dA);
maximum = thrust::reduce(wdA, wdA + ARRAY_SIZE, 0.0, thrust::maximum<double>());
break;
}
}
bool result = (fabs(maximum - ARRAY_SIZE) < 1e-10);
hipFree(dA);
free(hA);
return result;
}
// ------------------------------------------------------------------------------------
int main(int argc, char **argv)
{
int major = THRUST_MAJOR_VERSION;
int minor = THRUST_MINOR_VERSION;
std::cout << "Thrust v" << major << "." << minor << std::endl << std::endl;
std::cout << "Reduce DR ... " << std::flush << reduce_test(RAW) << std::endl;
std::cout << "Reduce DW ... " << std::flush << reduce_test(WRAPPED) << std::endl;
return 0;
}
| da35dad53372fb1a8ca4305d8aaea213f8be70e0.cu | #include <iostream>
#include <cmath>
#include <stdlib.h>
#include <thrust/version.h>
#include <thrust/scan.h>
#include <thrust/functional.h>
#include <thrust/sequence.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/gather.h>
#include <thrust/logical.h>
#include <thrust/host_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/binary_search.h>
#include <thrust/execution_policy.h>
#include <thrust/system/cuda/execution_policy.h>
const int ARRAY_SIZE = 1000;
enum Method {
RAW,
WRAPPED
};
bool reduce_test(Method method)
{
double* hA;
hA = (double *) malloc(ARRAY_SIZE * sizeof(double));
for (int i = 0; i < ARRAY_SIZE; i++)
hA[i] = 1.0 * (i + 1);
double* dA;
cudaMalloc((void **) &dA, ARRAY_SIZE * sizeof(double));
cudaMemcpy(dA, hA, sizeof(double) * ARRAY_SIZE, cudaMemcpyHostToDevice);
double maximum;
switch (method) {
case RAW:
{
maximum = thrust::reduce(thrust::cuda::par, dA, dA + ARRAY_SIZE, 0.0, thrust::maximum<double>());
break;
}
case WRAPPED:
{
thrust::device_ptr<double> wdA = thrust::device_pointer_cast(dA);
maximum = thrust::reduce(wdA, wdA + ARRAY_SIZE, 0.0, thrust::maximum<double>());
break;
}
}
bool result = (fabs(maximum - ARRAY_SIZE) < 1e-10);
cudaFree(dA);
free(hA);
return result;
}
// ------------------------------------------------------------------------------------
int main(int argc, char **argv)
{
int major = THRUST_MAJOR_VERSION;
int minor = THRUST_MINOR_VERSION;
std::cout << "Thrust v" << major << "." << minor << std::endl << std::endl;
std::cout << "Reduce DR ... " << std::flush << reduce_test(RAW) << std::endl;
std::cout << "Reduce DW ... " << std::flush << reduce_test(WRAPPED) << std::endl;
return 0;
}
|
650339d7c04243bafe889bf3194fb0a1fa8b9a34.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "KerSortDataParticles.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned n = XSIZE*YSIZE;
unsigned pini = 1;
const unsigned *sortpart = NULL;
hipMalloc(&sortpart, XSIZE*YSIZE);
const double2 *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const double *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const float4 *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
double2 *a2 = NULL;
hipMalloc(&a2, XSIZE*YSIZE);
double *b2 = NULL;
hipMalloc(&b2, XSIZE*YSIZE);
float4 *c2 = NULL;
hipMalloc(&c2, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
KerSortDataParticles), dim3(gridBlock),dim3(threadBlock), 0, 0, n,pini,sortpart,a,b,c,a2,b2,c2);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
KerSortDataParticles), dim3(gridBlock),dim3(threadBlock), 0, 0, n,pini,sortpart,a,b,c,a2,b2,c2);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
KerSortDataParticles), dim3(gridBlock),dim3(threadBlock), 0, 0, n,pini,sortpart,a,b,c,a2,b2,c2);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 650339d7c04243bafe889bf3194fb0a1fa8b9a34.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "KerSortDataParticles.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned n = XSIZE*YSIZE;
unsigned pini = 1;
const unsigned *sortpart = NULL;
cudaMalloc(&sortpart, XSIZE*YSIZE);
const double2 *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const double *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const float4 *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
double2 *a2 = NULL;
cudaMalloc(&a2, XSIZE*YSIZE);
double *b2 = NULL;
cudaMalloc(&b2, XSIZE*YSIZE);
float4 *c2 = NULL;
cudaMalloc(&c2, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
KerSortDataParticles<<<gridBlock,threadBlock>>>(n,pini,sortpart,a,b,c,a2,b2,c2);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
KerSortDataParticles<<<gridBlock,threadBlock>>>(n,pini,sortpart,a,b,c,a2,b2,c2);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
KerSortDataParticles<<<gridBlock,threadBlock>>>(n,pini,sortpart,a,b,c,a2,b2,c2);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1f7e6db6689eb08e72206b42075bd269db758bfc.hip | // !!! This is a file automatically generated by hipify!!!
//---------------------------------------------------------------------------//
// Copyright (c) 2014 Benoit
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#include <algorithm>
#include <cstdlib>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "perf.hpp"
int main(int argc, char *argv[])
{
perf_parse_args(argc, argv);
std::cout << "size: " << PERF_N << std::endl;
thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N);
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
perf_timer t;
for(size_t trial = 0; trial < PERF_TRIALS; trial++){
d_vec = h_vec;
t.start();
thrust::exclusive_scan(d_vec.begin(), d_vec.end(), d_vec.begin());
hipDeviceSynchronize();
t.stop();
}
std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
}
| 1f7e6db6689eb08e72206b42075bd269db758bfc.cu | //---------------------------------------------------------------------------//
// Copyright (c) 2014 Benoit
//
// Distributed under the Boost Software License, Version 1.0
// See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//
// See http://boostorg.github.com/compute for more information.
//---------------------------------------------------------------------------//
#include <algorithm>
#include <cstdlib>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "perf.hpp"
int main(int argc, char *argv[])
{
perf_parse_args(argc, argv);
std::cout << "size: " << PERF_N << std::endl;
thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N);
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
perf_timer t;
for(size_t trial = 0; trial < PERF_TRIALS; trial++){
d_vec = h_vec;
t.start();
thrust::exclusive_scan(d_vec.begin(), d_vec.end(), d_vec.begin());
cudaDeviceSynchronize();
t.stop();
}
std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl;
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
return 0;
}
|
91d14d859cc17a05c6697b455b101bef33a1275f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cnn.h"
#include "timer.h"
#include <thrust/device_vector.h>
#include <stdio.h>
/*
* TODO
* Define kernel here
*/
__global__
void pooling(
float * inputs,
float * outputs,
int N,
int D,
int NoImg)
{
// Store each work-items unique row and column
int i = blockIdx.x * blockDim.x + threadIdx.x; // N*N
int j = blockIdx.y * blockDim.y + threadIdx.y; // D
int n = blockIdx.z * blockDim.z + threadIdx.z; // NoImg
if (i < N*N && j < D && n < NoImg) {
int x = i/N; int y = i - x*N;
float max = 0;
for (int k = 0; k < 2; k++) {
for (int l = 0; l < 2; l++) {
float pixel = inputs[(x*2 + k)*2*N + y*2+l + (j*N*N*4) + (4*N*N*D*n)];
max = (max > pixel) ? max : pixel;
}
}
outputs[i + (j*N*N) + (N*N*D*n)] = max;
}
}
__global__
void convolution_v1(
float * inputs,
float * outputs,
float * filters,
float * biases,
int N,
int D1,
int D2,
int NoImg)
{
// Store each work-items unique row and column
int d = blockIdx.x * blockDim.x + threadIdx.x; // N*N
int d2 = blockIdx.y * blockDim.y + threadIdx.y; // D2
int n = blockIdx.z * blockDim.z + threadIdx.z; // NoImg
extern __shared__ float tmpFilters[];
if (d < N*N && d2 < D2 && n < NoImg) {
for (int t = 0; t < D1; t+=1) {
for (int i = 0; i < 9; i++) tmpFilters[i + (3*3* (threadIdx.y*D1 + t))] = filters[i + (3*3 * (d2*D1 + t))];
}
__syncthreads();
int i = d/N; int j = d- i*N;
int oIdx = i*N + j + (N*N*d2) + (N*N*D2*n);
outputs[oIdx] = 0;
// Unroll 1 times
for (int t = 0; t < D1; t+=1) {
float sum = 0;
for (int k = 0; k < 3; k++) {
for (int l = 0; l < 3; l++) {
int x = i + k - 1;
int y = j + l - 1;
if (x >= 0 && x < N && y >= 0 && y < N)
sum += inputs[x*N + y + N*N*t + (N*N*D1*n)] * filters[k*3 + l + (3*3 * (d2*D1 + t))];
}
}
outputs[oIdx] += sum;
}
// RELU
float bias = biases[d2];
outputs[oIdx] = (outputs[oIdx] + bias > 0) ? (outputs[oIdx] + bias) : 0;
}
}
__global__
void convolution_v2(
float * inputs,
float * outputs,
float * filters,
float * biases,
int N,
int D1,
int D2,
int NoImg)
{
// Store each work-items unique row and column
int x1 = blockIdx.x * blockDim.x + threadIdx.x; // N*N*D2*NoImg
if (x1 < N*N*D2*NoImg) {
// Calculate index values
int n = x1/(N*N*D2); int tmp1 = x1 - n*(N*N*D2);
int d2 = tmp1/(N*N); int tmp2 = tmp1 - d2*(N*N);
int i = tmp2/N;
int j = tmp2 - i*N;
int oIdx = x1; //i*N + j + (N*N*d2) + (N*N*D2*n);
outputs[oIdx] = 0;
// Unroll 1 times
for (int t = 0; t < D1; t+=1) {
float sum = 0;
for (int k = 0; k < 3; k++) {
for (int l = 0; l < 3; l++) {
int x = i + k - 1;
int y = j + l - 1;
if (x >= 0 && x < N && y >= 0 && y < N)
sum += inputs[x*N + y + N*N*t + (N*N*D1*n)] * filters[k*3 + l + (3*3 * (d2*D1 + t))];
}
}
outputs[oIdx] += sum;
}
// RELU
float bias = biases[d2];
outputs[oIdx] = (outputs[oIdx] + bias > 0) ? (outputs[oIdx] + bias) : 0;
}
}
__global__
void fc(
float * input_neuron,
float * output_neuron,
float * weights,
float * biases,
const int N,
const int M,
const int NoImg)
{
int x = blockIdx.x * blockDim.x + threadIdx.x; // M
int y = blockIdx.y * blockDim.y + threadIdx.y; // NoImg
if (x < M && y < NoImg) {
float sum = 0;
for (int i = 0; i < N; i++) {
sum += weights[x*N + i] * input_neuron[i + N*y];
}
output_neuron[x + M*y] = sum + biases[x];
// RELU
if (output_neuron[x + M*y] < 0) {
output_neuron[x + M*y] = 0;
}
}
}
__global__
void softmax_kernel(
float * output,
int N)
{
int i = threadIdx.x;
float sum = 0;
for (i = 0; i < N; i++) {
sum += exp(output[i]);
}
for (i = 0; i < N; i++) {
output[i] = exp(output[i]) / sum;
}
}
/************************ CUDA ************************/
#define NormalToOne(x) (((x)<=0)?(1):x)
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// show memory usage of GPU
static void show_mem_gpu(const char *info) {
size_t free_byte ;
size_t total_byte ;
gpuErrchk(hipMemGetInfo( &free_byte, &total_byte )) ;
double free_db = (double)free_byte ;
double total_db = (double)total_byte ;
double used_db = total_db - free_db ;
//printf("%s - GPU memory usage: used = %.3f MB, free = %.3f MB, total = %.3f MB\n",
// info, used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0);
}
float data_transfer_time = 0;
float pooling_time = 0;
float conv_time = 0;
float fc_time = 0;
float softmax_time = 0;
/*
* D = channel size
* N = width and height of an output image
* Thus, input is (D, N * 2, N * 2) and output is (D, N, N).
*/
static void pooling_layer(float *inputs, float *outputs, int D, int N, int NoImg) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
show_mem_gpu("Before pooling");
// Call gpu kernel
dim3 threadsPerBlock(8, 8, 1);
if (N < 4) threadsPerBlock.x = N*N;
threadsPerBlock.z = NormalToOne(1024 / (threadsPerBlock.x*threadsPerBlock.y));
dim3 numBlocks((N*N + threadsPerBlock.x - 1)/threadsPerBlock.x,
(D + threadsPerBlock.y - 1)/threadsPerBlock.y,
(NoImg + threadsPerBlock.z - 1)/threadsPerBlock.z);
hipEventRecord(start);
hipLaunchKernelGGL(( pooling), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, inputs, outputs, N, D, NoImg);
hipEventRecord(stop);
hipEventSynchronize(stop);
show_mem_gpu("After pooling");
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
pooling_time += milliseconds/1000;
}
static void convolution_layer_v1(float *inputs, float *outputs, float *filters, float *biases, int D2, int D1, int N, int NoImg) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Call GPU kernel
dim3 threadsPerBlock(8, 8, 16);
if (N < 4) threadsPerBlock.x = N*N;
threadsPerBlock.z = NormalToOne(1024 / (threadsPerBlock.x*threadsPerBlock.y));
dim3 numBlocks((N*N + threadsPerBlock.x - 1)/threadsPerBlock.x,
(D2 + threadsPerBlock.y - 1)/threadsPerBlock.y,
(NoImg + threadsPerBlock.z - 1)/threadsPerBlock.z);
hipEventRecord(start);
hipLaunchKernelGGL(( convolution_v1), dim3(numBlocks), dim3(threadsPerBlock), 3*3*D1*threadsPerBlock.y*sizeof(float), 0, inputs, outputs, filters, biases, N, D1, D2, NoImg);
hipEventRecord(stop);
gpuErrchk(hipEventSynchronize(stop));
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("conv time: %f ms\n", milliseconds);
conv_time += milliseconds/1000;
}
static void convolution_layer_v2(float *inputs, float *outputs, float *filters, float *biases, int D2, int D1, int N, int NoImg) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
show_mem_gpu("Before conv");
// Call GPU kernel
dim3 threadsPerBlock(1024, 1, 1);
dim3 numBlocks((N*N*D2*NoImg + threadsPerBlock.x - 1)/threadsPerBlock.x, 1, 1);
hipEventRecord(start);
hipLaunchKernelGGL(( convolution_v2), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, inputs, outputs, filters, biases, N, D1, D2, NoImg);
hipEventRecord(stop);
gpuErrchk(hipEventSynchronize(stop));
show_mem_gpu("After conv");
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("conv time: %f ms\n", milliseconds);
conv_time += milliseconds/1000;
}
/*
* M = output size
* N = input size
*/
static void fc_layer(float *input_neuron, float *output_neuron, float *weights, float *biases, int M, int N, int NoImg) {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Call GPU kernel
dim3 blockSize(16, 1);
if (M < 64) blockSize.x = M;
blockSize.y = NormalToOne(1024 / blockSize.x);
dim3 gridSize((M + blockSize.x - 1) / blockSize.x, (NoImg + blockSize.y - 1)/blockSize.y);
hipEventRecord(start);
hipLaunchKernelGGL(( fc), dim3(gridSize), dim3(blockSize), 0, 0, input_neuron, output_neuron, weights, biases, N, M, NoImg);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
fc_time += milliseconds/1000;
}
static void softmax(float *output, int N) {
timer_start(1);
int i;
float max = output[0];
for (i = 1; i < N; i++) {
max = (output[i] > max)?output[i]:max;
}
float sum = 0;
for (i = 0; i < N; i++) {
sum += exp(output[i] - max);
}
for (i = 0; i < N; i++) {
output[i] = exp(output[i] - max) / sum;
}
softmax_time += timer_end(1);
}
static int find_max(float *fc, int N) {
int i;
int maxid = 0;
float maxval = 0;
for (i = 0; i < N; i++) {
if (maxval < fc[i]) {
maxval = fc[i];
maxid = i;
}
}
return maxid;
}
float* alloc_layer(size_t n) {
return (float*)malloc(n * sizeof(float));
}
void cnn_init() {
/*
* TODO
* Initialize OpenCL objects as global variables. For example,
* clGetPlatformIDs(1, &platform, NULL);
*/
}
const int NETWORK_SIZES[] = {
64 * 3 * 3 * 3, 64,
64 * 64 * 3 * 3, 64,
128 * 64 * 3 * 3, 128,
128 * 128 * 3 * 3, 128,
256 * 128 * 3 * 3, 256,
256 * 256 * 3 * 3, 256,
256 * 256 * 3 * 3, 256,
512 * 256 * 3 * 3, 512,
512 * 512 * 3 * 3, 512,
512 * 512 * 3 * 3, 512,
512 * 512 * 3 * 3, 512,
512 * 512 * 3 * 3, 512,
512 * 512 * 3 * 3, 512,
512 * 512, 512,
512 * 512, 512,
10 * 512, 10
};
const int OUTPUT_SIZES[] = {
64 * 32 * 32,
64 * 32 * 32,
64 * 16 * 16,
128 * 16 * 16,
128 * 16 * 16,
128 * 8 * 8,
256 * 8 * 8,
256 * 8 * 8,
256 * 8 * 8,
256 * 4 * 4,
512 * 4 * 4,
512 * 4 * 4,
512 * 4 * 4,
512 * 2 * 2,
512 * 2 * 2,
512 * 2 * 2,
512 * 2 * 2,
512 * 1 * 1,
512,
512,
10
};
void cnn(float *images, float **network, int *labels, float *confidences, int num_images, int batch_size) {
/*
* TODO
* Implement here.
* Write classification results to labels and confidences.
* See "cnn_seq.c" if you don't know what to do.
*/
// slice the network into weights and biases
float *w1_1, *b1_1, *w1_2, *b1_2;
float *w2_1, *b2_1, *w2_2, *b2_2;
float *w3_1, *b3_1, *w3_2, *b3_2, *w3_3, *b3_3;
float *w4_1, *b4_1, *w4_2, *b4_2, *w4_3, *b4_3;
float *w5_1, *b5_1, *w5_2, *b5_2, *w5_3, *b5_3;
float *w1, *b1, *w2, *b2, *w3, *b3;
// Set data for weights and biases
w1_1 = network[0]; b1_1 = network[1];
w1_2 = network[2]; b1_2 = network[3];
w2_1 = network[4]; b2_1 = network[5];
w2_2 = network[6]; b2_2 = network[7];
w3_1 = network[8]; b3_1 = network[9];
w3_2 = network[10]; b3_2 = network[11];
w3_3 = network[12]; b3_3 = network[13];
w4_1 = network[14]; b4_1 = network[15];
w4_2 = network[16]; b4_2 = network[17];
w4_3 = network[18]; b4_3 = network[19];
w5_1 = network[20]; b5_1 = network[21];
w5_2 = network[22]; b5_2 = network[23];
w5_3 = network[24]; b5_3 = network[25];
w1 = network[26]; b1 = network[27];
w2 = network[28]; b2 = network[29];
w3 = network[30]; b3 = network[31];
// view networks values
for (int i = 0; i < NETWORK_SIZES[0]; i++) {
//printf("w1_1[%d] = %f\n", i, w1_1[i]);
}
// Allocate vectors in device memory
float *d_w1_1, *d_b1_1, *d_w1_2, *d_b1_2;
float *d_w2_1, *d_b2_1, *d_w2_2, *d_b2_2;
float *d_w3_1, *d_b3_1, *d_w3_2, *d_b3_2, *d_w3_3, *d_b3_3;
float *d_w4_1, *d_b4_1, *d_w4_2, *d_b4_2, *d_w4_3, *d_b4_3;
float *d_w5_1, *d_b5_1, *d_w5_2, *d_b5_2, *d_w5_3, *d_b5_3;
float *d_w1, *d_b1, *d_w2, *d_b2, *d_w3, *d_b3;
hipMalloc(&d_w1_1, NETWORK_SIZES[0] * sizeof(float));
hipMalloc(&d_w1_2, NETWORK_SIZES[2] * sizeof(float));
hipMalloc(&d_w2_1, NETWORK_SIZES[4] * sizeof(float));
hipMalloc(&d_w2_2, NETWORK_SIZES[6] * sizeof(float));
hipMalloc(&d_w3_1, NETWORK_SIZES[8] * sizeof(float));
hipMalloc(&d_w3_2, NETWORK_SIZES[10] * sizeof(float));
hipMalloc(&d_w3_3, NETWORK_SIZES[12] * sizeof(float));
hipMalloc(&d_w4_1, NETWORK_SIZES[14] * sizeof(float));
hipMalloc(&d_w4_2, NETWORK_SIZES[16] * sizeof(float));
hipMalloc(&d_w4_3, NETWORK_SIZES[18] * sizeof(float));
hipMalloc(&d_w5_1, NETWORK_SIZES[20] * sizeof(float));
hipMalloc(&d_w5_2, NETWORK_SIZES[22] * sizeof(float));
hipMalloc(&d_w5_3, NETWORK_SIZES[24] * sizeof(float));
hipMalloc(&d_w1, NETWORK_SIZES[26] * sizeof(float));
hipMalloc(&d_w2, NETWORK_SIZES[28] * sizeof(float));
hipMalloc(&d_w3, NETWORK_SIZES[30] * sizeof(float));
hipMalloc(&d_b1_1, NETWORK_SIZES[1] * sizeof(float));
hipMalloc(&d_b1_2, NETWORK_SIZES[3] * sizeof(float));
hipMalloc(&d_b2_1, NETWORK_SIZES[5] * sizeof(float));
hipMalloc(&d_b2_2, NETWORK_SIZES[7] * sizeof(float));
hipMalloc(&d_b3_1, NETWORK_SIZES[9] * sizeof(float));
hipMalloc(&d_b3_2, NETWORK_SIZES[11] * sizeof(float));
hipMalloc(&d_b3_3, NETWORK_SIZES[13] * sizeof(float));
hipMalloc(&d_b4_1, NETWORK_SIZES[15] * sizeof(float));
hipMalloc(&d_b4_2, NETWORK_SIZES[17] * sizeof(float));
hipMalloc(&d_b4_3, NETWORK_SIZES[19] * sizeof(float));
hipMalloc(&d_b5_1, NETWORK_SIZES[21] * sizeof(float));
hipMalloc(&d_b5_2, NETWORK_SIZES[23] * sizeof(float));
hipMalloc(&d_b5_3, NETWORK_SIZES[25] * sizeof(float));
hipMalloc(&d_b1, NETWORK_SIZES[27] * sizeof(float));
hipMalloc(&d_b2, NETWORK_SIZES[29] * sizeof(float));
hipMalloc(&d_b3, NETWORK_SIZES[31] * sizeof(float));
// Create cudaEvent to measure cuda time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Copy vectors from host memory to device memory
hipEventRecord(start);
hipMemcpy(d_w1_1, w1_1, NETWORK_SIZES[0] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w1_2, w1_2, NETWORK_SIZES[2] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w2_1, w2_1, NETWORK_SIZES[4] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w2_2, w2_2, NETWORK_SIZES[6] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w3_1, w3_1, NETWORK_SIZES[8] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w3_2, w3_2, NETWORK_SIZES[10] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w3_3, w3_3, NETWORK_SIZES[12] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w4_1, w4_1, NETWORK_SIZES[14] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w4_2, w4_2, NETWORK_SIZES[16] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w4_3, w4_3, NETWORK_SIZES[18] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w5_1, w5_1, NETWORK_SIZES[20] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w5_2, w5_2, NETWORK_SIZES[22] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w5_3, w5_3, NETWORK_SIZES[24] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w1, w1, NETWORK_SIZES[26] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w2, w2, NETWORK_SIZES[28] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_w3, w3, NETWORK_SIZES[30] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b1_1, b1_1, NETWORK_SIZES[1] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b1_2, b1_2, NETWORK_SIZES[3] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b2_1, b2_1, NETWORK_SIZES[5] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b2_2, b2_2, NETWORK_SIZES[7] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b3_1, b3_1, NETWORK_SIZES[9] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b3_2, b3_2, NETWORK_SIZES[11] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b3_3, b3_3, NETWORK_SIZES[13] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b4_1, b4_1, NETWORK_SIZES[15] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b4_2, b4_2, NETWORK_SIZES[17] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b4_3, b4_3, NETWORK_SIZES[19] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b5_1, b5_1, NETWORK_SIZES[21] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b5_2, b5_2, NETWORK_SIZES[23] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b5_3, b5_3, NETWORK_SIZES[25] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b1, b1, NETWORK_SIZES[27] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b2, b2, NETWORK_SIZES[29] * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b3, b3, NETWORK_SIZES[31] * sizeof(float), hipMemcpyHostToDevice);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
data_transfer_time = milliseconds/1000;
printf("network data transfer time = %f s\n", data_transfer_time);
data_transfer_time = 0;
show_mem_gpu("After network data transfer");
// Batch images size
int batchImg = batch_size;
int batchImg2 = 2*batchImg;
printf("batch size = %d\n", batchImg);
// Allocate output vectors in device memory to transfer between layers
/*float *d_c1_1, *d_c1_2, *d_p1;
float *d_c2_1, *d_c2_2, *d_p2;
float *d_c3_1, *d_c3_2, *d_c3_3, *d_p3;
float *d_c4_1, *d_c4_2, *d_c4_3, *d_p4;
float *d_c5_1, *d_c5_2, *d_c5_3, *d_p5;
float *d_fc1, *d_fc2, *d_fc3;*/
float *d1, *d2, *p1;
hipMalloc(&d1, batchImg * OUTPUT_SIZES[0] * sizeof(float));
hipMalloc(&d2, batchImg * OUTPUT_SIZES[1] * sizeof(float));
hipMalloc(&p1, batchImg * OUTPUT_SIZES[2] * sizeof(float));
/*hipMalloc(&d_p1, batchImg * OUTPUT_SIZES[2] * sizeof(float));
hipMalloc(&d_c2_1, batchImg * OUTPUT_SIZES[3] * sizeof(float));
hipMalloc(&d_c2_2, batchImg * OUTPUT_SIZES[4] * sizeof(float));
hipMalloc(&d_p2, batchImg * OUTPUT_SIZES[5] * sizeof(float));
hipMalloc(&d_c3_1, batchImg * OUTPUT_SIZES[6] * sizeof(float));
hipMalloc(&d_c3_2, batchImg * OUTPUT_SIZES[7] * sizeof(float));
hipMalloc(&d_c3_3, batchImg * OUTPUT_SIZES[8] * sizeof(float));
hipMalloc(&d_p3, batchImg * OUTPUT_SIZES[9] * sizeof(float));
hipMalloc(&d_c4_1, batchImg * OUTPUT_SIZES[10] * sizeof(float));
hipMalloc(&d_c4_2, batchImg * OUTPUT_SIZES[11] * sizeof(float));
hipMalloc(&d_c4_3, batchImg * OUTPUT_SIZES[12] * sizeof(float));
hipMalloc(&d_p4, batchImg * OUTPUT_SIZES[13] * sizeof(float));
hipMalloc(&d_c5_1, batchImg * OUTPUT_SIZES[14] * sizeof(float));
hipMalloc(&d_c5_2, batchImg * OUTPUT_SIZES[15] * sizeof(float));
hipMalloc(&d_c5_3, batchImg * OUTPUT_SIZES[16] * sizeof(float));
hipMalloc(&d_p5, batchImg * OUTPUT_SIZES[17] * sizeof(float));
hipMalloc(&d_fc1, batchImg * OUTPUT_SIZES[18] * sizeof(float));
hipMalloc(&d_fc2, batchImg * OUTPUT_SIZES[19] * sizeof(float));
hipMalloc(&d_fc3, batchImg * OUTPUT_SIZES[20] * sizeof(float));*/
show_mem_gpu("After malloc output vectors");
// run network
size_t image_size = batchImg*3*32*32 * sizeof(float);
float *d_image;
hipMalloc(&d_image, image_size);
int start_num_images = num_images%batchImg;
// Images will processed by batch
for(int i = start_num_images; i < num_images; i += batchImg2)
{
printf("i = %d\n", i);
batchImg = batch_size;
// Copy image from host to device
float *image = images + i * 3 * 32 * 32;
hipEventRecord(start);
hipMemcpy(d_image, image, image_size, hipMemcpyHostToDevice);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
data_transfer_time += milliseconds/1000;
convolution_layer_v2(d_image, d1, d_w1_1, d_b1_1, 64, 3, 32, batchImg);
convolution_layer_v2(d1, d2, d_w1_2, d_b1_2, 64, 64, 32, batchImg);
pooling_layer(d2, p1, 64, 16, batchImg);
/////////////////
// Copy image from host to device
image = images + (i+batchImg) * 3 * 32 * 32;
hipEventRecord(start);
hipMemcpy(d_image, image, image_size, hipMemcpyHostToDevice);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
data_transfer_time += milliseconds/1000;
convolution_layer_v2(d_image, d1, d_w1_1, d_b1_1, 64, 3, 32, batchImg);
convolution_layer_v2(d1, d2, d_w1_2, d_b1_2, 64, 64, 32, batchImg);
pooling_layer(d2, d1, 64, 16, batchImg);
// copy p1 to d1
batchImg = batchImg2;
////////////////
convolution_layer_v2(d2, d1, d_w2_1, d_b2_1, 128, 64, 16, batchImg);
convolution_layer_v2(d1, d2, d_w2_2, d_b2_2, 128, 128, 16, batchImg);
pooling_layer(d2, d1, 128, 8, batchImg);
convolution_layer_v2(d1, d2, d_w3_1, d_b3_1, 256, 128, 8, batchImg);
convolution_layer_v2(d2, d1, d_w3_2, d_b3_2, 256, 256, 8, batchImg);
convolution_layer_v2(d1, d2, d_w3_3, d_b3_3, 256, 256, 8, batchImg);
pooling_layer(d2, d1, 256, 4, batchImg);
convolution_layer_v2(d1, d2, d_w4_1, d_b4_1, 512, 256, 4, batchImg);
convolution_layer_v2(d2, d1, d_w4_2, d_b4_2, 512, 512, 4, batchImg);
convolution_layer_v2(d1, d2, d_w4_3, d_b4_3, 512, 512, 4, batchImg);
pooling_layer(d2, d1, 512, 2, batchImg);
convolution_layer_v2(d1, d2, d_w5_1, d_b5_1, 512, 512, 2, batchImg);
convolution_layer_v2(d2, d1, d_w5_2, d_b5_2, 512, 512, 2, batchImg);
convolution_layer_v2(d1, d2, d_w5_3, d_b5_3, 512, 512, 2, batchImg);
pooling_layer(d2, d1, 512, 1, batchImg);
fc_layer(d1, d2, d_w1, d_b1, 512, 512, batchImg);
fc_layer(d2, d1, d_w2, d_b2, 512, 512, batchImg);
fc_layer(d1, d2, d_w3, d_b3, 10, 512, batchImg);
// Copy result from device memory to host memory
float *fc3_mul = alloc_layer(OUTPUT_SIZES[20] * batchImg);
hipEventRecord(start);
hipMemcpy(fc3_mul, d2, batchImg * OUTPUT_SIZES[20] * sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
data_transfer_time += milliseconds/1000;
// Predicted labels
for (int j = 0; j < batchImg; j++) {
float *fc3 = fc3_mul + j*10;
softmax(fc3, 10);
int idx = i + j;
labels[idx] = find_max(fc3, 10);
confidences[idx] = fc3[labels[idx]];
}
free(fc3_mul);
}
/*// The remaining images
size_t image_size2 = start_num_images*3*32*32 * sizeof(float);
float *d_image2;
batchImg = start_num_images;
for(int i = 0; i < start_num_images; i += start_num_images)
{
// Copy image from host to device
float *image = images + i * 3 * 32 * 32;
hipEventRecord(start);
hipMalloc(&d_image2, image_size2);
hipMemcpy(d_image2, image, image_size2, hipMemcpyHostToDevice);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
data_transfer_time += milliseconds/1000;
convolution_layer_v2(d_image2, d1, d_w1_1, d_b1_1, 64, 3, 32, batchImg);
convolution_layer_v2(d1, d2, d_w1_2, d_b1_2, 64, 64, 32, batchImg);
pooling_layer(d2, d1, 64, 16, batchImg);
convolution_layer_v2(d2, d1, d_w2_1, d_b2_1, 128, 64, 16, batchImg);
convolution_layer_v2(d1, d2, d_w2_2, d_b2_2, 128, 128, 16, batchImg);
pooling_layer(d2, d1, 128, 8, batchImg);
convolution_layer_v2(d1, d2, d_w3_1, d_b3_1, 256, 128, 8, batchImg);
convolution_layer_v2(d2, d1, d_w3_2, d_b3_2, 256, 256, 8, batchImg);
convolution_layer_v2(d1, d2, d_w3_3, d_b3_3, 256, 256, 8, batchImg);
pooling_layer(d2, d1, 256, 4, batchImg);
convolution_layer_v2(d1, d2, d_w4_1, d_b4_1, 512, 256, 4, batchImg);
convolution_layer_v2(d2, d1, d_w4_2, d_b4_2, 512, 512, 4, batchImg);
convolution_layer_v2(d1, d2, d_w4_3, d_b4_3, 512, 512, 4, batchImg);
pooling_layer(d2, d1, 512, 2, batchImg);
convolution_layer_v2(d1, d2, d_w5_1, d_b5_1, 512, 512, 2, batchImg);
convolution_layer_v2(d2, d1, d_w5_2, d_b5_2, 512, 512, 2, batchImg);
convolution_layer_v2(d1, d2, d_w5_3, d_b5_3, 512, 512, 2, batchImg);
pooling_layer(d2, d1, 512, 1, batchImg);
fc_layer(d1, d2, d_w1, d_b1, 512, 512, batchImg);
fc_layer(d2, d1, d_w2, d_b2, 512, 512, batchImg);
fc_layer(d1, d2, d_w3, d_b3, 10, 512, batchImg);
// Copy result from device memory to host memory
float *fc3_mul = alloc_layer(OUTPUT_SIZES[20] * batchImg);
hipEventRecord(start);
hipMemcpy(fc3_mul, d2, batchImg * OUTPUT_SIZES[20] * sizeof(float), hipMemcpyDeviceToHost);
hipEventRecord(stop);
hipEventSynchronize(stop);
milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
data_transfer_time += milliseconds/1000;
// Predicted labels
for (int j = 0; j < batchImg; j++) {
float *fc3 = fc3_mul + j*10;
softmax(fc3, 10);
int idx = i + j;
labels[idx] = find_max(fc3, 10);
confidences[idx] = fc3[labels[idx]];
}
free(fc3_mul);
}
hipFree(d_image2);*/
printf("data transfer time = %f s\n", data_transfer_time);
printf("pooing time = %f s\n", pooling_time);
printf("convolution time = %f s\n", conv_time);
printf("fully connected time = %f s\n", fc_time);
printf("softmax time = %f s\n", softmax_time);
hipFree(d_w1_1); hipFree(d_b1_1); hipFree(d_w1_2); hipFree(d_b1_2);
hipFree(d_w2_1); hipFree(d_b2_2); hipFree(d_w2_2); hipFree(d_b2_2);
hipFree(d_w3_1); hipFree(d_b3_1); hipFree(d_w3_2); hipFree(d_b3_2); hipFree(d_w3_3); hipFree(d_b3_3);
hipFree(d_w4_1); hipFree(d_b4_1); hipFree(d_w4_2); hipFree(d_b4_2); hipFree(d_w4_3); hipFree(d_b4_3);
hipFree(d_w5_1); hipFree(d_b5_1); hipFree(d_w5_2); hipFree(d_b5_2); hipFree(d_w5_3); hipFree(d_b5_3);
hipFree(d_w1); hipFree(d_b1); hipFree(d_w2); hipFree(d_b2); hipFree(d_w3); hipFree(d_b3);
/*hipFree(d_c1_1); hipFree(d_c1_2); hipFree(d_p1);
hipFree(d_c2_1); hipFree(d_c2_2); hipFree(d_p2);
hipFree(d_c3_1); hipFree(d_c3_2); hipFree(d_c3_3); hipFree(d_p3);
hipFree(d_c4_1); hipFree(d_c4_2); hipFree(d_c4_3); hipFree(d_p4);
hipFree(d_c5_1); hipFree(d_c5_2); hipFree(d_c5_3); hipFree(d_p5);
hipFree(d_fc1); hipFree(d_fc2); hipFree(d_fc3);*/
}
| 91d14d859cc17a05c6697b455b101bef33a1275f.cu | #include "cnn.h"
#include "timer.h"
#include <thrust/device_vector.h>
#include <stdio.h>
/*
* TODO
* Define kernel here
*/
__global__
void pooling(
float * inputs,
float * outputs,
int N,
int D,
int NoImg)
{
// Store each work-item’s unique row and column
int i = blockIdx.x * blockDim.x + threadIdx.x; // N*N
int j = blockIdx.y * blockDim.y + threadIdx.y; // D
int n = blockIdx.z * blockDim.z + threadIdx.z; // NoImg
if (i < N*N && j < D && n < NoImg) {
int x = i/N; int y = i - x*N;
float max = 0;
for (int k = 0; k < 2; k++) {
for (int l = 0; l < 2; l++) {
float pixel = inputs[(x*2 + k)*2*N + y*2+l + (j*N*N*4) + (4*N*N*D*n)];
max = (max > pixel) ? max : pixel;
}
}
outputs[i + (j*N*N) + (N*N*D*n)] = max;
}
}
__global__
void convolution_v1(
float * inputs,
float * outputs,
float * filters,
float * biases,
int N,
int D1,
int D2,
int NoImg)
{
// Store each work-item’s unique row and column
int d = blockIdx.x * blockDim.x + threadIdx.x; // N*N
int d2 = blockIdx.y * blockDim.y + threadIdx.y; // D2
int n = blockIdx.z * blockDim.z + threadIdx.z; // NoImg
extern __shared__ float tmpFilters[];
if (d < N*N && d2 < D2 && n < NoImg) {
for (int t = 0; t < D1; t+=1) {
for (int i = 0; i < 9; i++) tmpFilters[i + (3*3* (threadIdx.y*D1 + t))] = filters[i + (3*3 * (d2*D1 + t))];
}
__syncthreads();
int i = d/N; int j = d- i*N;
int oIdx = i*N + j + (N*N*d2) + (N*N*D2*n);
outputs[oIdx] = 0;
// Unroll 1 times
for (int t = 0; t < D1; t+=1) {
float sum = 0;
for (int k = 0; k < 3; k++) {
for (int l = 0; l < 3; l++) {
int x = i + k - 1;
int y = j + l - 1;
if (x >= 0 && x < N && y >= 0 && y < N)
sum += inputs[x*N + y + N*N*t + (N*N*D1*n)] * filters[k*3 + l + (3*3 * (d2*D1 + t))];
}
}
outputs[oIdx] += sum;
}
// RELU
float bias = biases[d2];
outputs[oIdx] = (outputs[oIdx] + bias > 0) ? (outputs[oIdx] + bias) : 0;
}
}
__global__
void convolution_v2(
float * inputs,
float * outputs,
float * filters,
float * biases,
int N,
int D1,
int D2,
int NoImg)
{
// Store each work-item’s unique row and column
int x1 = blockIdx.x * blockDim.x + threadIdx.x; // N*N*D2*NoImg
if (x1 < N*N*D2*NoImg) {
// Calculate index values
int n = x1/(N*N*D2); int tmp1 = x1 - n*(N*N*D2);
int d2 = tmp1/(N*N); int tmp2 = tmp1 - d2*(N*N);
int i = tmp2/N;
int j = tmp2 - i*N;
int oIdx = x1; //i*N + j + (N*N*d2) + (N*N*D2*n);
outputs[oIdx] = 0;
// Unroll 1 times
for (int t = 0; t < D1; t+=1) {
float sum = 0;
for (int k = 0; k < 3; k++) {
for (int l = 0; l < 3; l++) {
int x = i + k - 1;
int y = j + l - 1;
if (x >= 0 && x < N && y >= 0 && y < N)
sum += inputs[x*N + y + N*N*t + (N*N*D1*n)] * filters[k*3 + l + (3*3 * (d2*D1 + t))];
}
}
outputs[oIdx] += sum;
}
// RELU
float bias = biases[d2];
outputs[oIdx] = (outputs[oIdx] + bias > 0) ? (outputs[oIdx] + bias) : 0;
}
}
__global__
void fc(
float * input_neuron,
float * output_neuron,
float * weights,
float * biases,
const int N,
const int M,
const int NoImg)
{
int x = blockIdx.x * blockDim.x + threadIdx.x; // M
int y = blockIdx.y * blockDim.y + threadIdx.y; // NoImg
if (x < M && y < NoImg) {
float sum = 0;
for (int i = 0; i < N; i++) {
sum += weights[x*N + i] * input_neuron[i + N*y];
}
output_neuron[x + M*y] = sum + biases[x];
// RELU
if (output_neuron[x + M*y] < 0) {
output_neuron[x + M*y] = 0;
}
}
}
__global__
void softmax_kernel(
float * output,
int N)
{
int i = threadIdx.x;
float sum = 0;
for (i = 0; i < N; i++) {
sum += exp(output[i]);
}
for (i = 0; i < N; i++) {
output[i] = exp(output[i]) / sum;
}
}
/************************ CUDA ************************/
#define NormalToOne(x) (((x)<=0)?(1):x)
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// show memory usage of GPU
static void show_mem_gpu(const char *info) {
size_t free_byte ;
size_t total_byte ;
gpuErrchk(cudaMemGetInfo( &free_byte, &total_byte )) ;
double free_db = (double)free_byte ;
double total_db = (double)total_byte ;
double used_db = total_db - free_db ;
//printf("%s - GPU memory usage: used = %.3f MB, free = %.3f MB, total = %.3f MB\n",
// info, used_db/1024.0/1024.0, free_db/1024.0/1024.0, total_db/1024.0/1024.0);
}
float data_transfer_time = 0;
float pooling_time = 0;
float conv_time = 0;
float fc_time = 0;
float softmax_time = 0;
/*
* D = channel size
* N = width and height of an output image
* Thus, input is (D, N * 2, N * 2) and output is (D, N, N).
*/
static void pooling_layer(float *inputs, float *outputs, int D, int N, int NoImg) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
show_mem_gpu("Before pooling");
// Call gpu kernel
dim3 threadsPerBlock(8, 8, 1);
if (N < 4) threadsPerBlock.x = N*N;
threadsPerBlock.z = NormalToOne(1024 / (threadsPerBlock.x*threadsPerBlock.y));
dim3 numBlocks((N*N + threadsPerBlock.x - 1)/threadsPerBlock.x,
(D + threadsPerBlock.y - 1)/threadsPerBlock.y,
(NoImg + threadsPerBlock.z - 1)/threadsPerBlock.z);
cudaEventRecord(start);
pooling<<<numBlocks, threadsPerBlock>>>(inputs, outputs, N, D, NoImg);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
show_mem_gpu("After pooling");
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
pooling_time += milliseconds/1000;
}
static void convolution_layer_v1(float *inputs, float *outputs, float *filters, float *biases, int D2, int D1, int N, int NoImg) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Call GPU kernel
dim3 threadsPerBlock(8, 8, 16);
if (N < 4) threadsPerBlock.x = N*N;
threadsPerBlock.z = NormalToOne(1024 / (threadsPerBlock.x*threadsPerBlock.y));
dim3 numBlocks((N*N + threadsPerBlock.x - 1)/threadsPerBlock.x,
(D2 + threadsPerBlock.y - 1)/threadsPerBlock.y,
(NoImg + threadsPerBlock.z - 1)/threadsPerBlock.z);
cudaEventRecord(start);
convolution_v1<<<numBlocks, threadsPerBlock, 3*3*D1*threadsPerBlock.y*sizeof(float)>>>(inputs, outputs, filters, biases, N, D1, D2, NoImg);
cudaEventRecord(stop);
gpuErrchk(cudaEventSynchronize(stop));
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("conv time: %f ms\n", milliseconds);
conv_time += milliseconds/1000;
}
static void convolution_layer_v2(float *inputs, float *outputs, float *filters, float *biases, int D2, int D1, int N, int NoImg) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
show_mem_gpu("Before conv");
// Call GPU kernel
dim3 threadsPerBlock(1024, 1, 1);
dim3 numBlocks((N*N*D2*NoImg + threadsPerBlock.x - 1)/threadsPerBlock.x, 1, 1);
cudaEventRecord(start);
convolution_v2<<<numBlocks, threadsPerBlock>>>(inputs, outputs, filters, biases, N, D1, D2, NoImg);
cudaEventRecord(stop);
gpuErrchk(cudaEventSynchronize(stop));
show_mem_gpu("After conv");
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("conv time: %f ms\n", milliseconds);
conv_time += milliseconds/1000;
}
/*
* M = output size
* N = input size
*/
static void fc_layer(float *input_neuron, float *output_neuron, float *weights, float *biases, int M, int N, int NoImg) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Call GPU kernel
dim3 blockSize(16, 1);
if (M < 64) blockSize.x = M;
blockSize.y = NormalToOne(1024 / blockSize.x);
dim3 gridSize((M + blockSize.x - 1) / blockSize.x, (NoImg + blockSize.y - 1)/blockSize.y);
cudaEventRecord(start);
fc<<<gridSize, blockSize>>>(input_neuron, output_neuron, weights, biases, N, M, NoImg);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
fc_time += milliseconds/1000;
}
static void softmax(float *output, int N) {
timer_start(1);
int i;
float max = output[0];
for (i = 1; i < N; i++) {
max = (output[i] > max)?output[i]:max;
}
float sum = 0;
for (i = 0; i < N; i++) {
sum += exp(output[i] - max);
}
for (i = 0; i < N; i++) {
output[i] = exp(output[i] - max) / sum;
}
softmax_time += timer_end(1);
}
static int find_max(float *fc, int N) {
int i;
int maxid = 0;
float maxval = 0;
for (i = 0; i < N; i++) {
if (maxval < fc[i]) {
maxval = fc[i];
maxid = i;
}
}
return maxid;
}
float* alloc_layer(size_t n) {
return (float*)malloc(n * sizeof(float));
}
void cnn_init() {
/*
* TODO
* Initialize OpenCL objects as global variables. For example,
* clGetPlatformIDs(1, &platform, NULL);
*/
}
const int NETWORK_SIZES[] = {
64 * 3 * 3 * 3, 64,
64 * 64 * 3 * 3, 64,
128 * 64 * 3 * 3, 128,
128 * 128 * 3 * 3, 128,
256 * 128 * 3 * 3, 256,
256 * 256 * 3 * 3, 256,
256 * 256 * 3 * 3, 256,
512 * 256 * 3 * 3, 512,
512 * 512 * 3 * 3, 512,
512 * 512 * 3 * 3, 512,
512 * 512 * 3 * 3, 512,
512 * 512 * 3 * 3, 512,
512 * 512 * 3 * 3, 512,
512 * 512, 512,
512 * 512, 512,
10 * 512, 10
};
const int OUTPUT_SIZES[] = {
64 * 32 * 32,
64 * 32 * 32,
64 * 16 * 16,
128 * 16 * 16,
128 * 16 * 16,
128 * 8 * 8,
256 * 8 * 8,
256 * 8 * 8,
256 * 8 * 8,
256 * 4 * 4,
512 * 4 * 4,
512 * 4 * 4,
512 * 4 * 4,
512 * 2 * 2,
512 * 2 * 2,
512 * 2 * 2,
512 * 2 * 2,
512 * 1 * 1,
512,
512,
10
};
void cnn(float *images, float **network, int *labels, float *confidences, int num_images, int batch_size) {
/*
* TODO
* Implement here.
* Write classification results to labels and confidences.
* See "cnn_seq.c" if you don't know what to do.
*/
// slice the network into weights and biases
float *w1_1, *b1_1, *w1_2, *b1_2;
float *w2_1, *b2_1, *w2_2, *b2_2;
float *w3_1, *b3_1, *w3_2, *b3_2, *w3_3, *b3_3;
float *w4_1, *b4_1, *w4_2, *b4_2, *w4_3, *b4_3;
float *w5_1, *b5_1, *w5_2, *b5_2, *w5_3, *b5_3;
float *w1, *b1, *w2, *b2, *w3, *b3;
// Set data for weights and biases
w1_1 = network[0]; b1_1 = network[1];
w1_2 = network[2]; b1_2 = network[3];
w2_1 = network[4]; b2_1 = network[5];
w2_2 = network[6]; b2_2 = network[7];
w3_1 = network[8]; b3_1 = network[9];
w3_2 = network[10]; b3_2 = network[11];
w3_3 = network[12]; b3_3 = network[13];
w4_1 = network[14]; b4_1 = network[15];
w4_2 = network[16]; b4_2 = network[17];
w4_3 = network[18]; b4_3 = network[19];
w5_1 = network[20]; b5_1 = network[21];
w5_2 = network[22]; b5_2 = network[23];
w5_3 = network[24]; b5_3 = network[25];
w1 = network[26]; b1 = network[27];
w2 = network[28]; b2 = network[29];
w3 = network[30]; b3 = network[31];
// view networks values
for (int i = 0; i < NETWORK_SIZES[0]; i++) {
//printf("w1_1[%d] = %f\n", i, w1_1[i]);
}
// Allocate vectors in device memory
float *d_w1_1, *d_b1_1, *d_w1_2, *d_b1_2;
float *d_w2_1, *d_b2_1, *d_w2_2, *d_b2_2;
float *d_w3_1, *d_b3_1, *d_w3_2, *d_b3_2, *d_w3_3, *d_b3_3;
float *d_w4_1, *d_b4_1, *d_w4_2, *d_b4_2, *d_w4_3, *d_b4_3;
float *d_w5_1, *d_b5_1, *d_w5_2, *d_b5_2, *d_w5_3, *d_b5_3;
float *d_w1, *d_b1, *d_w2, *d_b2, *d_w3, *d_b3;
cudaMalloc(&d_w1_1, NETWORK_SIZES[0] * sizeof(float));
cudaMalloc(&d_w1_2, NETWORK_SIZES[2] * sizeof(float));
cudaMalloc(&d_w2_1, NETWORK_SIZES[4] * sizeof(float));
cudaMalloc(&d_w2_2, NETWORK_SIZES[6] * sizeof(float));
cudaMalloc(&d_w3_1, NETWORK_SIZES[8] * sizeof(float));
cudaMalloc(&d_w3_2, NETWORK_SIZES[10] * sizeof(float));
cudaMalloc(&d_w3_3, NETWORK_SIZES[12] * sizeof(float));
cudaMalloc(&d_w4_1, NETWORK_SIZES[14] * sizeof(float));
cudaMalloc(&d_w4_2, NETWORK_SIZES[16] * sizeof(float));
cudaMalloc(&d_w4_3, NETWORK_SIZES[18] * sizeof(float));
cudaMalloc(&d_w5_1, NETWORK_SIZES[20] * sizeof(float));
cudaMalloc(&d_w5_2, NETWORK_SIZES[22] * sizeof(float));
cudaMalloc(&d_w5_3, NETWORK_SIZES[24] * sizeof(float));
cudaMalloc(&d_w1, NETWORK_SIZES[26] * sizeof(float));
cudaMalloc(&d_w2, NETWORK_SIZES[28] * sizeof(float));
cudaMalloc(&d_w3, NETWORK_SIZES[30] * sizeof(float));
cudaMalloc(&d_b1_1, NETWORK_SIZES[1] * sizeof(float));
cudaMalloc(&d_b1_2, NETWORK_SIZES[3] * sizeof(float));
cudaMalloc(&d_b2_1, NETWORK_SIZES[5] * sizeof(float));
cudaMalloc(&d_b2_2, NETWORK_SIZES[7] * sizeof(float));
cudaMalloc(&d_b3_1, NETWORK_SIZES[9] * sizeof(float));
cudaMalloc(&d_b3_2, NETWORK_SIZES[11] * sizeof(float));
cudaMalloc(&d_b3_3, NETWORK_SIZES[13] * sizeof(float));
cudaMalloc(&d_b4_1, NETWORK_SIZES[15] * sizeof(float));
cudaMalloc(&d_b4_2, NETWORK_SIZES[17] * sizeof(float));
cudaMalloc(&d_b4_3, NETWORK_SIZES[19] * sizeof(float));
cudaMalloc(&d_b5_1, NETWORK_SIZES[21] * sizeof(float));
cudaMalloc(&d_b5_2, NETWORK_SIZES[23] * sizeof(float));
cudaMalloc(&d_b5_3, NETWORK_SIZES[25] * sizeof(float));
cudaMalloc(&d_b1, NETWORK_SIZES[27] * sizeof(float));
cudaMalloc(&d_b2, NETWORK_SIZES[29] * sizeof(float));
cudaMalloc(&d_b3, NETWORK_SIZES[31] * sizeof(float));
// Create cudaEvent to measure cuda time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Copy vectors from host memory to device memory
cudaEventRecord(start);
cudaMemcpy(d_w1_1, w1_1, NETWORK_SIZES[0] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w1_2, w1_2, NETWORK_SIZES[2] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w2_1, w2_1, NETWORK_SIZES[4] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w2_2, w2_2, NETWORK_SIZES[6] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w3_1, w3_1, NETWORK_SIZES[8] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w3_2, w3_2, NETWORK_SIZES[10] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w3_3, w3_3, NETWORK_SIZES[12] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w4_1, w4_1, NETWORK_SIZES[14] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w4_2, w4_2, NETWORK_SIZES[16] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w4_3, w4_3, NETWORK_SIZES[18] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w5_1, w5_1, NETWORK_SIZES[20] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w5_2, w5_2, NETWORK_SIZES[22] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w5_3, w5_3, NETWORK_SIZES[24] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w1, w1, NETWORK_SIZES[26] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w2, w2, NETWORK_SIZES[28] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_w3, w3, NETWORK_SIZES[30] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b1_1, b1_1, NETWORK_SIZES[1] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b1_2, b1_2, NETWORK_SIZES[3] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b2_1, b2_1, NETWORK_SIZES[5] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b2_2, b2_2, NETWORK_SIZES[7] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b3_1, b3_1, NETWORK_SIZES[9] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b3_2, b3_2, NETWORK_SIZES[11] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b3_3, b3_3, NETWORK_SIZES[13] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b4_1, b4_1, NETWORK_SIZES[15] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b4_2, b4_2, NETWORK_SIZES[17] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b4_3, b4_3, NETWORK_SIZES[19] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b5_1, b5_1, NETWORK_SIZES[21] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b5_2, b5_2, NETWORK_SIZES[23] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b5_3, b5_3, NETWORK_SIZES[25] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b1, b1, NETWORK_SIZES[27] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b2, b2, NETWORK_SIZES[29] * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b3, b3, NETWORK_SIZES[31] * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
data_transfer_time = milliseconds/1000;
printf("network data transfer time = %f s\n", data_transfer_time);
data_transfer_time = 0;
show_mem_gpu("After network data transfer");
// Batch images size
int batchImg = batch_size;
int batchImg2 = 2*batchImg;
printf("batch size = %d\n", batchImg);
// Allocate output vectors in device memory to transfer between layers
/*float *d_c1_1, *d_c1_2, *d_p1;
float *d_c2_1, *d_c2_2, *d_p2;
float *d_c3_1, *d_c3_2, *d_c3_3, *d_p3;
float *d_c4_1, *d_c4_2, *d_c4_3, *d_p4;
float *d_c5_1, *d_c5_2, *d_c5_3, *d_p5;
float *d_fc1, *d_fc2, *d_fc3;*/
float *d1, *d2, *p1;
cudaMalloc(&d1, batchImg * OUTPUT_SIZES[0] * sizeof(float));
cudaMalloc(&d2, batchImg * OUTPUT_SIZES[1] * sizeof(float));
cudaMalloc(&p1, batchImg * OUTPUT_SIZES[2] * sizeof(float));
/*cudaMalloc(&d_p1, batchImg * OUTPUT_SIZES[2] * sizeof(float));
cudaMalloc(&d_c2_1, batchImg * OUTPUT_SIZES[3] * sizeof(float));
cudaMalloc(&d_c2_2, batchImg * OUTPUT_SIZES[4] * sizeof(float));
cudaMalloc(&d_p2, batchImg * OUTPUT_SIZES[5] * sizeof(float));
cudaMalloc(&d_c3_1, batchImg * OUTPUT_SIZES[6] * sizeof(float));
cudaMalloc(&d_c3_2, batchImg * OUTPUT_SIZES[7] * sizeof(float));
cudaMalloc(&d_c3_3, batchImg * OUTPUT_SIZES[8] * sizeof(float));
cudaMalloc(&d_p3, batchImg * OUTPUT_SIZES[9] * sizeof(float));
cudaMalloc(&d_c4_1, batchImg * OUTPUT_SIZES[10] * sizeof(float));
cudaMalloc(&d_c4_2, batchImg * OUTPUT_SIZES[11] * sizeof(float));
cudaMalloc(&d_c4_3, batchImg * OUTPUT_SIZES[12] * sizeof(float));
cudaMalloc(&d_p4, batchImg * OUTPUT_SIZES[13] * sizeof(float));
cudaMalloc(&d_c5_1, batchImg * OUTPUT_SIZES[14] * sizeof(float));
cudaMalloc(&d_c5_2, batchImg * OUTPUT_SIZES[15] * sizeof(float));
cudaMalloc(&d_c5_3, batchImg * OUTPUT_SIZES[16] * sizeof(float));
cudaMalloc(&d_p5, batchImg * OUTPUT_SIZES[17] * sizeof(float));
cudaMalloc(&d_fc1, batchImg * OUTPUT_SIZES[18] * sizeof(float));
cudaMalloc(&d_fc2, batchImg * OUTPUT_SIZES[19] * sizeof(float));
cudaMalloc(&d_fc3, batchImg * OUTPUT_SIZES[20] * sizeof(float));*/
show_mem_gpu("After malloc output vectors");
// run network
size_t image_size = batchImg*3*32*32 * sizeof(float);
float *d_image;
cudaMalloc(&d_image, image_size);
int start_num_images = num_images%batchImg;
// Images will processed by batch
for(int i = start_num_images; i < num_images; i += batchImg2)
{
printf("i = %d\n", i);
batchImg = batch_size;
// Copy image from host to device
float *image = images + i * 3 * 32 * 32;
cudaEventRecord(start);
cudaMemcpy(d_image, image, image_size, cudaMemcpyHostToDevice);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
data_transfer_time += milliseconds/1000;
convolution_layer_v2(d_image, d1, d_w1_1, d_b1_1, 64, 3, 32, batchImg);
convolution_layer_v2(d1, d2, d_w1_2, d_b1_2, 64, 64, 32, batchImg);
pooling_layer(d2, p1, 64, 16, batchImg);
/////////////////
// Copy image from host to device
image = images + (i+batchImg) * 3 * 32 * 32;
cudaEventRecord(start);
cudaMemcpy(d_image, image, image_size, cudaMemcpyHostToDevice);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
data_transfer_time += milliseconds/1000;
convolution_layer_v2(d_image, d1, d_w1_1, d_b1_1, 64, 3, 32, batchImg);
convolution_layer_v2(d1, d2, d_w1_2, d_b1_2, 64, 64, 32, batchImg);
pooling_layer(d2, d1, 64, 16, batchImg);
// copy p1 to d1
batchImg = batchImg2;
////////////////
convolution_layer_v2(d2, d1, d_w2_1, d_b2_1, 128, 64, 16, batchImg);
convolution_layer_v2(d1, d2, d_w2_2, d_b2_2, 128, 128, 16, batchImg);
pooling_layer(d2, d1, 128, 8, batchImg);
convolution_layer_v2(d1, d2, d_w3_1, d_b3_1, 256, 128, 8, batchImg);
convolution_layer_v2(d2, d1, d_w3_2, d_b3_2, 256, 256, 8, batchImg);
convolution_layer_v2(d1, d2, d_w3_3, d_b3_3, 256, 256, 8, batchImg);
pooling_layer(d2, d1, 256, 4, batchImg);
convolution_layer_v2(d1, d2, d_w4_1, d_b4_1, 512, 256, 4, batchImg);
convolution_layer_v2(d2, d1, d_w4_2, d_b4_2, 512, 512, 4, batchImg);
convolution_layer_v2(d1, d2, d_w4_3, d_b4_3, 512, 512, 4, batchImg);
pooling_layer(d2, d1, 512, 2, batchImg);
convolution_layer_v2(d1, d2, d_w5_1, d_b5_1, 512, 512, 2, batchImg);
convolution_layer_v2(d2, d1, d_w5_2, d_b5_2, 512, 512, 2, batchImg);
convolution_layer_v2(d1, d2, d_w5_3, d_b5_3, 512, 512, 2, batchImg);
pooling_layer(d2, d1, 512, 1, batchImg);
fc_layer(d1, d2, d_w1, d_b1, 512, 512, batchImg);
fc_layer(d2, d1, d_w2, d_b2, 512, 512, batchImg);
fc_layer(d1, d2, d_w3, d_b3, 10, 512, batchImg);
// Copy result from device memory to host memory
float *fc3_mul = alloc_layer(OUTPUT_SIZES[20] * batchImg);
cudaEventRecord(start);
cudaMemcpy(fc3_mul, d2, batchImg * OUTPUT_SIZES[20] * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
data_transfer_time += milliseconds/1000;
// Predicted labels
for (int j = 0; j < batchImg; j++) {
float *fc3 = fc3_mul + j*10;
softmax(fc3, 10);
int idx = i + j;
labels[idx] = find_max(fc3, 10);
confidences[idx] = fc3[labels[idx]];
}
free(fc3_mul);
}
/*// The remaining images
size_t image_size2 = start_num_images*3*32*32 * sizeof(float);
float *d_image2;
batchImg = start_num_images;
for(int i = 0; i < start_num_images; i += start_num_images)
{
// Copy image from host to device
float *image = images + i * 3 * 32 * 32;
cudaEventRecord(start);
cudaMalloc(&d_image2, image_size2);
cudaMemcpy(d_image2, image, image_size2, cudaMemcpyHostToDevice);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
data_transfer_time += milliseconds/1000;
convolution_layer_v2(d_image2, d1, d_w1_1, d_b1_1, 64, 3, 32, batchImg);
convolution_layer_v2(d1, d2, d_w1_2, d_b1_2, 64, 64, 32, batchImg);
pooling_layer(d2, d1, 64, 16, batchImg);
convolution_layer_v2(d2, d1, d_w2_1, d_b2_1, 128, 64, 16, batchImg);
convolution_layer_v2(d1, d2, d_w2_2, d_b2_2, 128, 128, 16, batchImg);
pooling_layer(d2, d1, 128, 8, batchImg);
convolution_layer_v2(d1, d2, d_w3_1, d_b3_1, 256, 128, 8, batchImg);
convolution_layer_v2(d2, d1, d_w3_2, d_b3_2, 256, 256, 8, batchImg);
convolution_layer_v2(d1, d2, d_w3_3, d_b3_3, 256, 256, 8, batchImg);
pooling_layer(d2, d1, 256, 4, batchImg);
convolution_layer_v2(d1, d2, d_w4_1, d_b4_1, 512, 256, 4, batchImg);
convolution_layer_v2(d2, d1, d_w4_2, d_b4_2, 512, 512, 4, batchImg);
convolution_layer_v2(d1, d2, d_w4_3, d_b4_3, 512, 512, 4, batchImg);
pooling_layer(d2, d1, 512, 2, batchImg);
convolution_layer_v2(d1, d2, d_w5_1, d_b5_1, 512, 512, 2, batchImg);
convolution_layer_v2(d2, d1, d_w5_2, d_b5_2, 512, 512, 2, batchImg);
convolution_layer_v2(d1, d2, d_w5_3, d_b5_3, 512, 512, 2, batchImg);
pooling_layer(d2, d1, 512, 1, batchImg);
fc_layer(d1, d2, d_w1, d_b1, 512, 512, batchImg);
fc_layer(d2, d1, d_w2, d_b2, 512, 512, batchImg);
fc_layer(d1, d2, d_w3, d_b3, 10, 512, batchImg);
// Copy result from device memory to host memory
float *fc3_mul = alloc_layer(OUTPUT_SIZES[20] * batchImg);
cudaEventRecord(start);
cudaMemcpy(fc3_mul, d2, batchImg * OUTPUT_SIZES[20] * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
data_transfer_time += milliseconds/1000;
// Predicted labels
for (int j = 0; j < batchImg; j++) {
float *fc3 = fc3_mul + j*10;
softmax(fc3, 10);
int idx = i + j;
labels[idx] = find_max(fc3, 10);
confidences[idx] = fc3[labels[idx]];
}
free(fc3_mul);
}
cudaFree(d_image2);*/
printf("data transfer time = %f s\n", data_transfer_time);
printf("pooing time = %f s\n", pooling_time);
printf("convolution time = %f s\n", conv_time);
printf("fully connected time = %f s\n", fc_time);
printf("softmax time = %f s\n", softmax_time);
cudaFree(d_w1_1); cudaFree(d_b1_1); cudaFree(d_w1_2); cudaFree(d_b1_2);
cudaFree(d_w2_1); cudaFree(d_b2_2); cudaFree(d_w2_2); cudaFree(d_b2_2);
cudaFree(d_w3_1); cudaFree(d_b3_1); cudaFree(d_w3_2); cudaFree(d_b3_2); cudaFree(d_w3_3); cudaFree(d_b3_3);
cudaFree(d_w4_1); cudaFree(d_b4_1); cudaFree(d_w4_2); cudaFree(d_b4_2); cudaFree(d_w4_3); cudaFree(d_b4_3);
cudaFree(d_w5_1); cudaFree(d_b5_1); cudaFree(d_w5_2); cudaFree(d_b5_2); cudaFree(d_w5_3); cudaFree(d_b5_3);
cudaFree(d_w1); cudaFree(d_b1); cudaFree(d_w2); cudaFree(d_b2); cudaFree(d_w3); cudaFree(d_b3);
/*cudaFree(d_c1_1); cudaFree(d_c1_2); cudaFree(d_p1);
cudaFree(d_c2_1); cudaFree(d_c2_2); cudaFree(d_p2);
cudaFree(d_c3_1); cudaFree(d_c3_2); cudaFree(d_c3_3); cudaFree(d_p3);
cudaFree(d_c4_1); cudaFree(d_c4_2); cudaFree(d_c4_3); cudaFree(d_p4);
cudaFree(d_c5_1); cudaFree(d_c5_2); cudaFree(d_c5_3); cudaFree(d_p5);
cudaFree(d_fc1); cudaFree(d_fc2); cudaFree(d_fc3);*/
}
|
b9bb279e7cbfd7c2903c9f52d98fd016a3321cda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* Created by: Hang Zhang
* ECE Department, Rutgers University
* Email: [email protected]
* Copyright (c) 2016
*
* Feel free to reuse and distribute this software for research or
* non-profit purpose, subject to the following conditions:
* 1. The code must retain the above copyright notice, this list of
* conditions.
* 2. Original authors' names are not deleted.
* 3. The authors' names are not used to endorse or promote products
* derived from this software
*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
*/
__global__ void HZAffine_Fast_kernel (
THCDeviceTensor<real, 3> input,
THCDeviceTensor<real, 3> output,
THCDeviceTensor<real, 2> matrix)
{
/* declarations of the variables */
int ch, xo, yo, xi, yi, inwidth, inheight;
real xi_, yi_, zi_;
/* Get the index and channels */
ch = blockIdx.z;
xo = blockIdx.x * blockDim.x + threadIdx.x;
yo = blockIdx.y * blockDim.y + threadIdx.y;
/* boundary check for output */
if (xo >= output.getSize(2) || yo >= output.getSize(1)) return;
inwidth = input.getSize(2);
inheight = input.getSize(1);
/* main operation */
xi_ = matrix[0][0]*xo + matrix[1][0]*yo + matrix[2][0];
yi_ = matrix[0][1]*xo + matrix[1][1]*yo + matrix[2][1];
zi_ = matrix[0][2]*xo + matrix[1][2]*yo + matrix[2][2];
xi = (int) (xi_ / zi_);
yi = (int) (yi_ / zi_);
/* boundary check for input*/
if(xi >= 0 && xi < inwidth && yi >=0 && yi < inheight)
output[ch][yo][xo] = input[ch][yi][xi].ldg();
else
output[ch][yo][xo] = 0;
}
void HZAffineFast(THCState *state, THCTensor *input_,
THCTensor *output_, THCTensor *matrix_)
/*
* mapping the image pixels based on the inversed
* affine transformation matrix
*/
{
/* Check the GPU index */
HZPROC_assertSameGPU(state, 3, input_, output_, matrix_);
/* Device tensors */
THCDeviceTensor<real, 3> input = devicetensor<3>(state, input_);
THCDeviceTensor<real, 3> output = devicetensor<3>(state, output_);
/* inverse the affine matrix */
THCTensor *mat_ = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, mat_, matrix_);
THCTensor_(getri)(state, mat_, matrix_);
THCDeviceTensor<real, 2> matrix = devicetensor<2>(state, mat_);
/* kernel function */
hipStream_t stream = THCState_getCurrentStream(state);
dim3 threads(16, 16);
dim3 blocks(output.getSize(2)/16+1, output.getSize(1)/16+1,
output.getSize(0));
hipLaunchKernelGGL(( HZAffine_Fast_kernel), dim3(blocks), dim3(threads), 0, stream, input, output, matrix);
//Free
THCTensor_(free)(state, mat_);
THCudaCheck(hipGetLastError());
}
__global__ void HZAffine_Bili_kernel (
THCDeviceTensor<real, 3> input,
THCDeviceTensor<real, 3> output,
THCDeviceTensor<real, 2> matrix)
{
/* declarations of the variables */
int ch, xo, yo, x0, y0, inwidth, inheight;
real xi, yi, wx, wy, w00, w01, w10, w11;
real xi_, yi_, zi_;
/* Get the index and channels */
ch = blockIdx.z;
xo = blockIdx.x * blockDim.x + threadIdx.x;
yo = blockIdx.y * blockDim.y + threadIdx.y;
/* boundary check for output */
if (xo >= output.getSize(2) || yo >= output.getSize(1)) return;
inwidth = input.getSize(2);
inheight = input.getSize(1);
/* main operation */
xi_ = matrix[0][0]*xo + matrix[1][0]*yo + matrix[2][0];
yi_ = matrix[0][1]*xo + matrix[1][1]*yo + matrix[2][1];
zi_ = matrix[0][2]*xo + matrix[1][2]*yo + matrix[2][2];
xi = xi_ / zi_;
yi = yi_ / zi_;
x0 = (int)xi;
y0 = (int)yi;
/* boundary check for input*/
if(x0 >= 0 && x0 < inwidth-1 && y0 >=0 && yi < inheight-1)
{
wx = 1.0 - (xi - x0);
wy = 1.0 - (yi - y0);
w00 = wx * wy;
w01 = (1-wx) * wy;
w10 = wx * (1-wy);
w11 = (1-wx) * (1-wy);
output[ch][yo][xo] = w00*input[ch][y0 ][x0 ].ldg()
+ w01*input[ch][y0 ][x0+1].ldg()
+ w10*input[ch][y0+1][x0 ].ldg()
+ w11*input[ch][y0+1][x0+1].ldg();
}
else
output[ch][yo][xo] = 0;
}
void HZAffineBili(THCState *state, THCTensor *input_,
THCTensor *output_, THCTensor *matrix_)
/*
* mapping the image pixels based on the inversed
* affine transformation matrix
*/
{
/* Check the GPU index */
HZPROC_assertSameGPU(state, 3, input_, output_, matrix_);
/* Device tensors */
THCDeviceTensor<real, 3> input = devicetensor<3>(state, input_);
THCDeviceTensor<real, 3> output = devicetensor<3>(state, output_);
/* inverse the affine matrix */
THCTensor *mat_ = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, mat_, matrix_);
THCTensor_(getri)(state, mat_, matrix_);
THCDeviceTensor<real, 2> matrix = devicetensor<2>(state, mat_);
/* kernel function */
hipStream_t stream = THCState_getCurrentStream(state);
dim3 threads(16, 16);
dim3 blocks(output.getSize(2)/16+1, output.getSize(1)/16+1,
output.getSize(0));
hipLaunchKernelGGL(( HZAffine_Bili_kernel), dim3(blocks), dim3(threads), 0, stream, input, output, matrix);
//Free
THCTensor_(free)(state, mat_);
THCudaCheck(hipGetLastError());
}
| b9bb279e7cbfd7c2903c9f52d98fd016a3321cda.cu | /*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
* Created by: Hang Zhang
* ECE Department, Rutgers University
* Email: [email protected]
* Copyright (c) 2016
*
* Feel free to reuse and distribute this software for research or
* non-profit purpose, subject to the following conditions:
* 1. The code must retain the above copyright notice, this list of
* conditions.
* 2. Original authors' names are not deleted.
* 3. The authors' names are not used to endorse or promote products
* derived from this software
*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
*/
__global__ void HZAffine_Fast_kernel (
THCDeviceTensor<real, 3> input,
THCDeviceTensor<real, 3> output,
THCDeviceTensor<real, 2> matrix)
{
/* declarations of the variables */
int ch, xo, yo, xi, yi, inwidth, inheight;
real xi_, yi_, zi_;
/* Get the index and channels */
ch = blockIdx.z;
xo = blockIdx.x * blockDim.x + threadIdx.x;
yo = blockIdx.y * blockDim.y + threadIdx.y;
/* boundary check for output */
if (xo >= output.getSize(2) || yo >= output.getSize(1)) return;
inwidth = input.getSize(2);
inheight = input.getSize(1);
/* main operation */
xi_ = matrix[0][0]*xo + matrix[1][0]*yo + matrix[2][0];
yi_ = matrix[0][1]*xo + matrix[1][1]*yo + matrix[2][1];
zi_ = matrix[0][2]*xo + matrix[1][2]*yo + matrix[2][2];
xi = (int) (xi_ / zi_);
yi = (int) (yi_ / zi_);
/* boundary check for input*/
if(xi >= 0 && xi < inwidth && yi >=0 && yi < inheight)
output[ch][yo][xo] = input[ch][yi][xi].ldg();
else
output[ch][yo][xo] = 0;
}
void HZAffineFast(THCState *state, THCTensor *input_,
THCTensor *output_, THCTensor *matrix_)
/*
* mapping the image pixels based on the inversed
* affine transformation matrix
*/
{
/* Check the GPU index */
HZPROC_assertSameGPU(state, 3, input_, output_, matrix_);
/* Device tensors */
THCDeviceTensor<real, 3> input = devicetensor<3>(state, input_);
THCDeviceTensor<real, 3> output = devicetensor<3>(state, output_);
/* inverse the affine matrix */
THCTensor *mat_ = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, mat_, matrix_);
THCTensor_(getri)(state, mat_, matrix_);
THCDeviceTensor<real, 2> matrix = devicetensor<2>(state, mat_);
/* kernel function */
cudaStream_t stream = THCState_getCurrentStream(state);
dim3 threads(16, 16);
dim3 blocks(output.getSize(2)/16+1, output.getSize(1)/16+1,
output.getSize(0));
HZAffine_Fast_kernel<<<blocks, threads, 0, stream>>>(input, output, matrix);
//Free
THCTensor_(free)(state, mat_);
THCudaCheck(cudaGetLastError());
}
__global__ void HZAffine_Bili_kernel (
THCDeviceTensor<real, 3> input,
THCDeviceTensor<real, 3> output,
THCDeviceTensor<real, 2> matrix)
{
/* declarations of the variables */
int ch, xo, yo, x0, y0, inwidth, inheight;
real xi, yi, wx, wy, w00, w01, w10, w11;
real xi_, yi_, zi_;
/* Get the index and channels */
ch = blockIdx.z;
xo = blockIdx.x * blockDim.x + threadIdx.x;
yo = blockIdx.y * blockDim.y + threadIdx.y;
/* boundary check for output */
if (xo >= output.getSize(2) || yo >= output.getSize(1)) return;
inwidth = input.getSize(2);
inheight = input.getSize(1);
/* main operation */
xi_ = matrix[0][0]*xo + matrix[1][0]*yo + matrix[2][0];
yi_ = matrix[0][1]*xo + matrix[1][1]*yo + matrix[2][1];
zi_ = matrix[0][2]*xo + matrix[1][2]*yo + matrix[2][2];
xi = xi_ / zi_;
yi = yi_ / zi_;
x0 = (int)xi;
y0 = (int)yi;
/* boundary check for input*/
if(x0 >= 0 && x0 < inwidth-1 && y0 >=0 && yi < inheight-1)
{
wx = 1.0 - (xi - x0);
wy = 1.0 - (yi - y0);
w00 = wx * wy;
w01 = (1-wx) * wy;
w10 = wx * (1-wy);
w11 = (1-wx) * (1-wy);
output[ch][yo][xo] = w00*input[ch][y0 ][x0 ].ldg()
+ w01*input[ch][y0 ][x0+1].ldg()
+ w10*input[ch][y0+1][x0 ].ldg()
+ w11*input[ch][y0+1][x0+1].ldg();
}
else
output[ch][yo][xo] = 0;
}
void HZAffineBili(THCState *state, THCTensor *input_,
THCTensor *output_, THCTensor *matrix_)
/*
* mapping the image pixels based on the inversed
* affine transformation matrix
*/
{
/* Check the GPU index */
HZPROC_assertSameGPU(state, 3, input_, output_, matrix_);
/* Device tensors */
THCDeviceTensor<real, 3> input = devicetensor<3>(state, input_);
THCDeviceTensor<real, 3> output = devicetensor<3>(state, output_);
/* inverse the affine matrix */
THCTensor *mat_ = THCTensor_(new)(state);
THCTensor_(resizeAs)(state, mat_, matrix_);
THCTensor_(getri)(state, mat_, matrix_);
THCDeviceTensor<real, 2> matrix = devicetensor<2>(state, mat_);
/* kernel function */
cudaStream_t stream = THCState_getCurrentStream(state);
dim3 threads(16, 16);
dim3 blocks(output.getSize(2)/16+1, output.getSize(1)/16+1,
output.getSize(0));
HZAffine_Bili_kernel<<<blocks, threads, 0, stream>>>(input, output, matrix);
//Free
THCTensor_(free)(state, mat_);
THCudaCheck(cudaGetLastError());
}
|
29f75046c8a65481b4ecc635ba265ada843b0229.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2017 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sparse_fully_connected_1x1_layer_updater_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "cudnn_util.h"
#include "neural_network_cusparse_exception.h"
#include "neural_network_cublas_exception.h"
#include "neural_network_cuda_exception.h"
#include "neural_network_cudnn_exception.h"
#include "../sparse_convolution_layer.h"
namespace nnforge
{
namespace cuda
{
#define OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE 4
__global__ void sparse_fully_connected_1x1_backprop_upd_kernel(
const float * __restrict output_errors,
float * __restrict input_errors,
const float * __restrict weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int entry_count,
int entry32_block_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int max_valid_lane = min(OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE, end_column_index - base_nnz_index);
bool valid[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
int column_ids[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
float w[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i)
{
valid[i] = (i < max_valid_lane);
int index = valid[i] ? base_nnz_index + i : (end_column_index - 1);
column_ids[i] = __load_nc(column_indices + index);
w[i] = __load_nc(weights + index);
}
int base_entry_id = ((blockIdx.z * blockDim.z + threadIdx.z) << 5) * entry32_block_size;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int current_entry_id = base_entry_id + lane_id;
const float * base_output_errors = output_errors + row_id * entry_count;
for(int j = 0; j < entry32_block_size; ++j, current_entry_id += 32)
{
if (current_entry_id < entry_count)
{
float output_error = __load_nc(base_output_errors + current_entry_id);
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i)
if (valid[i])
atomicAdd(input_errors + column_ids[i] * entry_count + current_entry_id, output_error * w[i]);
}
}
}
#define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4
template<bool single_entry_pass>
__global__ void sparse_fully_connected_1x1_update_weights_kernel(
const float * __restrict output_errors,
const float * __restrict input_neurons,
float * __restrict gradient_weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int entry_count,
int entry32_block_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * OUTPUT_ELEM_COUNT_BLOCK_SIZE;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int max_valid_lane = min(OUTPUT_ELEM_COUNT_BLOCK_SIZE, end_column_index - base_nnz_index);
bool valid[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
valid[i] = (i < max_valid_lane);
int column_ids[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
column_ids[i] = __load_nc(column_indices + (valid[i] ? base_nnz_index + i : (end_column_index - 1)));
int base_entry_id = ((blockIdx.z * blockDim.z + threadIdx.z) << 5) * entry32_block_size;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int current_entry_id = base_entry_id + lane_id;
float sums[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] = 0.0F;
const float * base_output_errors = output_errors + row_id * entry_count;
for(int j = 0; j < entry32_block_size; ++j, current_entry_id += 32)
{
if (current_entry_id < entry_count)
{
float output_error = __load_nc(base_output_errors + current_entry_id);
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] += __load_nc(input_neurons + column_ids[i] * entry_count + current_entry_id) * output_error;
}
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
#ifdef __CUDACC_VER_MAJOR__
#if __CUDACC_VER_MAJOR__ < 9
sums[i] += __shfl_xor(sums[i], tx);
#else
sums[i] += __shfl_xor_sync(0xFFFFFFFF, sums[i], tx);
#endif
#endif
}
#pragma unroll
for(int i = 1; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
if (lane_id == i)
sums[0] = sums[i];
if (lane_id < max_valid_lane)
{
if (single_entry_pass)
{
gradient_weights[base_nnz_index + lane_id] += sums[0];
}
else
{
atomicAdd(gradient_weights + base_nnz_index + lane_id, sums[0]);
}
}
}
sparse_fully_connected_1x1_layer_updater_cuda::sparse_fully_connected_1x1_layer_updater_cuda()
: output_data_desc(0)
, bias_desc(0)
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&output_data_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&bias_desc));
}
sparse_fully_connected_1x1_layer_updater_cuda::~sparse_fully_connected_1x1_layer_updater_cuda()
{
cudnnDestroyTensorDescriptor(output_data_desc);
cudnnDestroyTensorDescriptor(bias_desc);
}
void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
cusparse_safe_call(hipsparseSetStream(cuda_config->get_cusparse_handle(), stream_id));
float alpha = 1.0F;
float beta = 0.0F;
hipsparseMatDescr_t mat_descr;
cusparse_safe_call(hipsparseCreateMatDescr(&mat_descr));
cusparse_safe_call(hipsparseScsrmm(
cuda_config->get_cusparse_handle(),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
output_elem_count_per_entry,
entry_count,
input_elem_count_per_entry_list[0],
feature_map_connection_count,
&alpha,
mat_descr,
*data[0],
*data_custom[1],
*data_custom[0],
*input_buffers[0],
input_elem_count_per_entry_list[0],
&beta,
*output_buffer,
output_elem_count_per_entry));
// Add bias
if (bias)
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_util::set_tensor_descriptor(
output_data_desc,
output_configuration_specific,
entry_count);
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnAddTensor(
cuda_config->get_cudnn_handle(),
&alpha,
bias_desc,
*data[1],
&beta,
output_data_desc,
*output_buffer));
}
}
void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_backward_data_propagation(
hipStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
// Too slow
/*
cusparse_safe_call(hipsparseSetStream(cuda_config->get_cusparse_handle(), stream_id));
float alpha = 1.0F;
float beta = (add_update_to_destination ? 1.0F : 0.0F);
hipsparseMatDescr_t mat_descr;
cusparse_safe_call(hipsparseCreateMatDescr(&mat_descr));
cusparse_safe_call(hipsparseScsrmm(
cuda_config->get_cusparse_handle(),
HIPSPARSE_OPERATION_TRANSPOSE,
output_elem_count_per_entry,
entry_count,
input_elem_count_per_entry,
feature_map_connection_count,
&alpha,
mat_descr,
*data[0],
*data_custom[1],
*data_custom[0],
*output_errors_buffer,
output_elem_count_per_entry,
&beta,
*input_errors_buffer,
input_elem_count_per_entry));
*/
cuda_util::set_with_value(
*cuda_config,
*temporary_working_per_entry_buffer,
0.0F,
input_elem_count_per_entry_aligned * entry_count,
stream_id);
cublas_safe_call(hipblasSetStream(cuda_config->get_cublas_handle(), stream_id));
// transpose output
{
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(hipblasSgeam(
cuda_config->get_cublas_handle(),
HIPBLAS_OP_T,
HIPBLAS_OP_N,
entry_count,
output_elem_count_per_entry,
&alpha,
*output_errors_buffer,
output_elem_count_per_entry,
&beta,
((float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
entry_count,
((float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
entry_count));
}
std::pair<int, int> entry32_block_size_and_count = get_entry32_backprop_block_size_and_count(entry_count);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * ((max_column_index_count_per_row + OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE),
output_elem_count_per_entry,
entry32_block_size_and_count.second,
32);
hipLaunchKernelGGL(( sparse_fully_connected_1x1_backprop_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
((float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
*temporary_working_per_entry_buffer,
*data[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
entry_count,
entry32_block_size_and_count.first);
// transpose input
{
float alpha = 1.0F;
float beta = (add_update_to_destination ? 1.0F : 0.0F);
cublas_safe_call(hipblasSgeam(
cuda_config->get_cublas_handle(),
HIPBLAS_OP_T,
HIPBLAS_OP_N,
input_elem_count_per_entry_list[0],
entry_count,
&alpha,
*temporary_working_per_entry_buffer,
entry_count,
&beta,
*input_errors_buffer,
input_elem_count_per_entry_list[0],
*input_errors_buffer,
input_elem_count_per_entry_list[0]));
}
}
void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_backward_weights_propagation(
hipStream_t stream_id,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::ptr>& gradient,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
// Update weights
{
cublas_safe_call(hipblasSetStream(cuda_config->get_cublas_handle(), stream_id));
// transpose input
{
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(hipblasSgeam(
cuda_config->get_cublas_handle(),
HIPBLAS_OP_T,
HIPBLAS_OP_N,
entry_count,
input_elem_count_per_entry_list[0],
&alpha,
*input_neurons_buffers[0],
input_elem_count_per_entry_list[0],
&beta,
*temporary_working_per_entry_buffer,
entry_count,
*temporary_working_per_entry_buffer,
entry_count));
}
// transpose output
{
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(hipblasSgeam(
cuda_config->get_cublas_handle(),
HIPBLAS_OP_T,
HIPBLAS_OP_N,
entry_count,
output_elem_count_per_entry,
&alpha,
*output_errors_buffer,
output_elem_count_per_entry,
&beta,
((float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
entry_count,
((float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
entry_count));
}
std::pair<int, int> entry32_block_size_and_count = get_entry32_update_block_size_and_count(entry_count);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * ((max_column_index_count_per_row + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE),
output_elem_count_per_entry,
entry32_block_size_and_count.second,
32);
if (entry32_block_size_and_count.second > 1)
{
hipLaunchKernelGGL(( sparse_fully_connected_1x1_update_weights_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
((const float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
*temporary_working_per_entry_buffer,
*gradient[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
entry_count,
entry32_block_size_and_count.first);
}
else
{
hipLaunchKernelGGL(( sparse_fully_connected_1x1_update_weights_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
((const float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
*temporary_working_per_entry_buffer,
*gradient[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
entry_count,
entry32_block_size_and_count.first);
}
}
// Update biases
if (bias)
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_util::set_tensor_descriptor(
output_data_desc,
output_configuration_specific,
entry_count);
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnConvolutionBackwardBias(
cuda_config->get_cudnn_handle(),
&alpha,
output_data_desc,
*output_errors_buffer,
&beta,
bias_desc,
*gradient[1]));
}
}
void sparse_fully_connected_1x1_layer_updater_cuda::updater_configured()
{
std::shared_ptr<const sparse_convolution_layer> layer_derived = std::dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema);
feature_map_connection_count = layer_derived->feature_map_connection_count;
bias = layer_derived->bias;
int input_data_single_update_32block_entry_size = input_elem_count_per_entry_list[0] * 32 * sizeof(float);
max_entry32_update_block_size = ::max(1, cuda_config->l2_cache_size / 2 / input_data_single_update_32block_entry_size);
int input_data_single_backprop_32block_entry_size = input_elem_count_per_entry_list[0] * 32 * sizeof(float);
max_entry32_backprop_block_size = ::max(1, cuda_config->l2_cache_size / 2 / input_data_single_backprop_32block_entry_size);
cudnn_util::set_tensor_bias_descriptor(
bias_desc,
output_configuration_specific.feature_map_count,
static_cast<unsigned int>(output_configuration_specific.dimension_sizes.size()));
input_elem_count_per_entry_aligned = (input_elem_count_per_entry_list[0] + 4 - 1) / 4 * 4;
output_elem_count_per_entry_aligned = (output_elem_count_per_entry + 4 - 1) / 4 * 4;
}
size_t sparse_fully_connected_1x1_layer_updater_cuda::get_temporary_working_per_entry_buffer_size(const layer_action& action) const
{
if ((action.get_action_type() == layer_action::backward_data) || (action.get_action_type() == layer_action::backward_weights))
return (input_elem_count_per_entry_aligned * sizeof(float)) + (output_elem_count_per_entry_aligned * sizeof(float));
else
return layer_updater_cuda::get_temporary_working_per_entry_buffer_size(action);
}
void sparse_fully_connected_1x1_layer_updater_cuda::notify_data_custom(layer_data_custom::const_ptr host_data_custom)
{
max_column_index_count_per_row = 0;
const std::vector<int>& row_indices = host_data_custom->at(1);
for(int i = 0; i < row_indices.size() - 1; ++i)
max_column_index_count_per_row = ::max(max_column_index_count_per_row, row_indices[i + 1] - row_indices[i]);
}
std::pair<int, int> sparse_fully_connected_1x1_layer_updater_cuda::get_entry32_update_block_size_and_count(unsigned int entry_count) const
{
int candidate_block_size = (entry_count + 32 - 1) / 32;
if (candidate_block_size <= max_entry32_update_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_entry32_update_block_size - 1) / max_entry32_update_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
std::pair<int, int> sparse_fully_connected_1x1_layer_updater_cuda::get_entry32_backprop_block_size_and_count(unsigned int entry_count) const
{
int candidate_block_size = (entry_count + 32 - 1) / 32;
if (candidate_block_size <= max_entry32_backprop_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_entry32_backprop_block_size - 1) / max_entry32_backprop_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
bool sparse_fully_connected_1x1_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const
{
return false;
}
bool sparse_fully_connected_1x1_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
bool sparse_fully_connected_1x1_layer_updater_cuda::is_backward_weights_dependent_on_input_buffer(unsigned int data_input_index) const
{
return true;
}
}
}
| 29f75046c8a65481b4ecc635ba265ada843b0229.cu | /*
* Copyright 2011-2017 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sparse_fully_connected_1x1_layer_updater_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "cudnn_util.h"
#include "neural_network_cusparse_exception.h"
#include "neural_network_cublas_exception.h"
#include "neural_network_cuda_exception.h"
#include "neural_network_cudnn_exception.h"
#include "../sparse_convolution_layer.h"
namespace nnforge
{
namespace cuda
{
#define OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE 4
__global__ void sparse_fully_connected_1x1_backprop_upd_kernel(
const float * __restrict output_errors,
float * __restrict input_errors,
const float * __restrict weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int entry_count,
int entry32_block_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int max_valid_lane = min(OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE, end_column_index - base_nnz_index);
bool valid[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
int column_ids[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
float w[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i)
{
valid[i] = (i < max_valid_lane);
int index = valid[i] ? base_nnz_index + i : (end_column_index - 1);
column_ids[i] = __load_nc(column_indices + index);
w[i] = __load_nc(weights + index);
}
int base_entry_id = ((blockIdx.z * blockDim.z + threadIdx.z) << 5) * entry32_block_size;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int current_entry_id = base_entry_id + lane_id;
const float * base_output_errors = output_errors + row_id * entry_count;
for(int j = 0; j < entry32_block_size; ++j, current_entry_id += 32)
{
if (current_entry_id < entry_count)
{
float output_error = __load_nc(base_output_errors + current_entry_id);
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i)
if (valid[i])
atomicAdd(input_errors + column_ids[i] * entry_count + current_entry_id, output_error * w[i]);
}
}
}
#define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4
template<bool single_entry_pass>
__global__ void sparse_fully_connected_1x1_update_weights_kernel(
const float * __restrict output_errors,
const float * __restrict input_neurons,
float * __restrict gradient_weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int entry_count,
int entry32_block_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * OUTPUT_ELEM_COUNT_BLOCK_SIZE;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int max_valid_lane = min(OUTPUT_ELEM_COUNT_BLOCK_SIZE, end_column_index - base_nnz_index);
bool valid[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
valid[i] = (i < max_valid_lane);
int column_ids[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
column_ids[i] = __load_nc(column_indices + (valid[i] ? base_nnz_index + i : (end_column_index - 1)));
int base_entry_id = ((blockIdx.z * blockDim.z + threadIdx.z) << 5) * entry32_block_size;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int current_entry_id = base_entry_id + lane_id;
float sums[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] = 0.0F;
const float * base_output_errors = output_errors + row_id * entry_count;
for(int j = 0; j < entry32_block_size; ++j, current_entry_id += 32)
{
if (current_entry_id < entry_count)
{
float output_error = __load_nc(base_output_errors + current_entry_id);
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] += __load_nc(input_neurons + column_ids[i] * entry_count + current_entry_id) * output_error;
}
}
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
#ifdef __CUDACC_VER_MAJOR__
#if __CUDACC_VER_MAJOR__ < 9
sums[i] += __shfl_xor(sums[i], tx);
#else
sums[i] += __shfl_xor_sync(0xFFFFFFFF, sums[i], tx);
#endif
#endif
}
#pragma unroll
for(int i = 1; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
if (lane_id == i)
sums[0] = sums[i];
if (lane_id < max_valid_lane)
{
if (single_entry_pass)
{
gradient_weights[base_nnz_index + lane_id] += sums[0];
}
else
{
atomicAdd(gradient_weights + base_nnz_index + lane_id, sums[0]);
}
}
}
sparse_fully_connected_1x1_layer_updater_cuda::sparse_fully_connected_1x1_layer_updater_cuda()
: output_data_desc(0)
, bias_desc(0)
{
cudnn_safe_call(cudnnCreateTensorDescriptor(&output_data_desc));
cudnn_safe_call(cudnnCreateTensorDescriptor(&bias_desc));
}
sparse_fully_connected_1x1_layer_updater_cuda::~sparse_fully_connected_1x1_layer_updater_cuda()
{
cudnnDestroyTensorDescriptor(output_data_desc);
cudnnDestroyTensorDescriptor(bias_desc);
}
void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
cusparse_safe_call(cusparseSetStream(cuda_config->get_cusparse_handle(), stream_id));
float alpha = 1.0F;
float beta = 0.0F;
cusparseMatDescr_t mat_descr;
cusparse_safe_call(cusparseCreateMatDescr(&mat_descr));
cusparse_safe_call(cusparseScsrmm(
cuda_config->get_cusparse_handle(),
CUSPARSE_OPERATION_NON_TRANSPOSE,
output_elem_count_per_entry,
entry_count,
input_elem_count_per_entry_list[0],
feature_map_connection_count,
&alpha,
mat_descr,
*data[0],
*data_custom[1],
*data_custom[0],
*input_buffers[0],
input_elem_count_per_entry_list[0],
&beta,
*output_buffer,
output_elem_count_per_entry));
// Add bias
if (bias)
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_util::set_tensor_descriptor(
output_data_desc,
output_configuration_specific,
entry_count);
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnAddTensor(
cuda_config->get_cudnn_handle(),
&alpha,
bias_desc,
*data[1],
&beta,
output_data_desc,
*output_buffer));
}
}
void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_backward_data_propagation(
cudaStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
// Too slow
/*
cusparse_safe_call(cusparseSetStream(cuda_config->get_cusparse_handle(), stream_id));
float alpha = 1.0F;
float beta = (add_update_to_destination ? 1.0F : 0.0F);
cusparseMatDescr_t mat_descr;
cusparse_safe_call(cusparseCreateMatDescr(&mat_descr));
cusparse_safe_call(cusparseScsrmm(
cuda_config->get_cusparse_handle(),
CUSPARSE_OPERATION_TRANSPOSE,
output_elem_count_per_entry,
entry_count,
input_elem_count_per_entry,
feature_map_connection_count,
&alpha,
mat_descr,
*data[0],
*data_custom[1],
*data_custom[0],
*output_errors_buffer,
output_elem_count_per_entry,
&beta,
*input_errors_buffer,
input_elem_count_per_entry));
*/
cuda_util::set_with_value(
*cuda_config,
*temporary_working_per_entry_buffer,
0.0F,
input_elem_count_per_entry_aligned * entry_count,
stream_id);
cublas_safe_call(cublasSetStream(cuda_config->get_cublas_handle(), stream_id));
// transpose output
{
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(cublasSgeam(
cuda_config->get_cublas_handle(),
CUBLAS_OP_T,
CUBLAS_OP_N,
entry_count,
output_elem_count_per_entry,
&alpha,
*output_errors_buffer,
output_elem_count_per_entry,
&beta,
((float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
entry_count,
((float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
entry_count));
}
std::pair<int, int> entry32_block_size_and_count = get_entry32_backprop_block_size_and_count(entry_count);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * ((max_column_index_count_per_row + OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE),
output_elem_count_per_entry,
entry32_block_size_and_count.second,
32);
sparse_fully_connected_1x1_backprop_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
((float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
*temporary_working_per_entry_buffer,
*data[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
entry_count,
entry32_block_size_and_count.first);
// transpose input
{
float alpha = 1.0F;
float beta = (add_update_to_destination ? 1.0F : 0.0F);
cublas_safe_call(cublasSgeam(
cuda_config->get_cublas_handle(),
CUBLAS_OP_T,
CUBLAS_OP_N,
input_elem_count_per_entry_list[0],
entry_count,
&alpha,
*temporary_working_per_entry_buffer,
entry_count,
&beta,
*input_errors_buffer,
input_elem_count_per_entry_list[0],
*input_errors_buffer,
input_elem_count_per_entry_list[0]));
}
}
void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_backward_weights_propagation(
cudaStream_t stream_id,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::ptr>& gradient,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
// Update weights
{
cublas_safe_call(cublasSetStream(cuda_config->get_cublas_handle(), stream_id));
// transpose input
{
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(cublasSgeam(
cuda_config->get_cublas_handle(),
CUBLAS_OP_T,
CUBLAS_OP_N,
entry_count,
input_elem_count_per_entry_list[0],
&alpha,
*input_neurons_buffers[0],
input_elem_count_per_entry_list[0],
&beta,
*temporary_working_per_entry_buffer,
entry_count,
*temporary_working_per_entry_buffer,
entry_count));
}
// transpose output
{
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(cublasSgeam(
cuda_config->get_cublas_handle(),
CUBLAS_OP_T,
CUBLAS_OP_N,
entry_count,
output_elem_count_per_entry,
&alpha,
*output_errors_buffer,
output_elem_count_per_entry,
&beta,
((float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
entry_count,
((float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
entry_count));
}
std::pair<int, int> entry32_block_size_and_count = get_entry32_update_block_size_and_count(entry_count);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * ((max_column_index_count_per_row + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE),
output_elem_count_per_entry,
entry32_block_size_and_count.second,
32);
if (entry32_block_size_and_count.second > 1)
{
sparse_fully_connected_1x1_update_weights_kernel<false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
((const float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
*temporary_working_per_entry_buffer,
*gradient[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
entry_count,
entry32_block_size_and_count.first);
}
else
{
sparse_fully_connected_1x1_update_weights_kernel<true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
((const float *)*temporary_working_per_entry_buffer) + input_elem_count_per_entry_aligned * entry_count,
*temporary_working_per_entry_buffer,
*gradient[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
entry_count,
entry32_block_size_and_count.first);
}
}
// Update biases
if (bias)
{
cudnn_safe_call(cudnnSetStream(cuda_config->get_cudnn_handle(), stream_id));
cudnn_util::set_tensor_descriptor(
output_data_desc,
output_configuration_specific,
entry_count);
float alpha = 1.0F;
float beta = 1.0F;
cudnn_safe_call(cudnnConvolutionBackwardBias(
cuda_config->get_cudnn_handle(),
&alpha,
output_data_desc,
*output_errors_buffer,
&beta,
bias_desc,
*gradient[1]));
}
}
void sparse_fully_connected_1x1_layer_updater_cuda::updater_configured()
{
std::shared_ptr<const sparse_convolution_layer> layer_derived = std::dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema);
feature_map_connection_count = layer_derived->feature_map_connection_count;
bias = layer_derived->bias;
int input_data_single_update_32block_entry_size = input_elem_count_per_entry_list[0] * 32 * sizeof(float);
max_entry32_update_block_size = std::max(1, cuda_config->l2_cache_size / 2 / input_data_single_update_32block_entry_size);
int input_data_single_backprop_32block_entry_size = input_elem_count_per_entry_list[0] * 32 * sizeof(float);
max_entry32_backprop_block_size = std::max(1, cuda_config->l2_cache_size / 2 / input_data_single_backprop_32block_entry_size);
cudnn_util::set_tensor_bias_descriptor(
bias_desc,
output_configuration_specific.feature_map_count,
static_cast<unsigned int>(output_configuration_specific.dimension_sizes.size()));
input_elem_count_per_entry_aligned = (input_elem_count_per_entry_list[0] + 4 - 1) / 4 * 4;
output_elem_count_per_entry_aligned = (output_elem_count_per_entry + 4 - 1) / 4 * 4;
}
size_t sparse_fully_connected_1x1_layer_updater_cuda::get_temporary_working_per_entry_buffer_size(const layer_action& action) const
{
if ((action.get_action_type() == layer_action::backward_data) || (action.get_action_type() == layer_action::backward_weights))
return (input_elem_count_per_entry_aligned * sizeof(float)) + (output_elem_count_per_entry_aligned * sizeof(float));
else
return layer_updater_cuda::get_temporary_working_per_entry_buffer_size(action);
}
void sparse_fully_connected_1x1_layer_updater_cuda::notify_data_custom(layer_data_custom::const_ptr host_data_custom)
{
max_column_index_count_per_row = 0;
const std::vector<int>& row_indices = host_data_custom->at(1);
for(int i = 0; i < row_indices.size() - 1; ++i)
max_column_index_count_per_row = std::max(max_column_index_count_per_row, row_indices[i + 1] - row_indices[i]);
}
std::pair<int, int> sparse_fully_connected_1x1_layer_updater_cuda::get_entry32_update_block_size_and_count(unsigned int entry_count) const
{
int candidate_block_size = (entry_count + 32 - 1) / 32;
if (candidate_block_size <= max_entry32_update_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_entry32_update_block_size - 1) / max_entry32_update_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
std::pair<int, int> sparse_fully_connected_1x1_layer_updater_cuda::get_entry32_backprop_block_size_and_count(unsigned int entry_count) const
{
int candidate_block_size = (entry_count + 32 - 1) / 32;
if (candidate_block_size <= max_entry32_backprop_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_entry32_backprop_block_size - 1) / max_entry32_backprop_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
bool sparse_fully_connected_1x1_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const
{
return false;
}
bool sparse_fully_connected_1x1_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
bool sparse_fully_connected_1x1_layer_updater_cuda::is_backward_weights_dependent_on_input_buffer(unsigned int data_input_index) const
{
return true;
}
}
}
|
8fb9913486d1c202b093691a862f1ff11b8d5770.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "..\common\book.h"
#include "..\common\lock.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#define SISE (100*1024*1024)
#define ELEMNTS (SISE/sizeof(unsigned int))
#define ENTRIES 1024
struct Entry
{
unsigned int key;
void *value;
Entry *next;
};
struct Table
{
size_t count;
Entry **entries;
Entry *pool;
};
void init_table(Table &table,size_t entr,size_t elem)
{
table.count=entr;
//notes: callo(size_t nelem, size_t elsize) alocate nelem blocks memory which size is elsize,make the initial
//value to be 0
HANDLE_ERROR(hipMalloc((void **)&table.entries,entr*sizeof(Entry*)));
HANDLE_ERROR(hipMemset(table.entries,0,entr*sizeof(Entry *)));
HANDLE_ERROR(hipMalloc((void **)&table.pool,elem*sizeof(Entry)));
}
__device__ __host__ size_t hash(unsigned int key,size_t count)
{
return key%count;
}
__global__ void add_elem(Table table,unsigned int *key,void **value,Lock *lock)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (tid<ELEMNTS)
{
unsigned int key_=key[tid];
size_t hashvalue=hash(key_,table.count);
for (int i=0;i!=32;++i)
{
if (tid%32==i)
{
Entry *newelem=&(table.pool[tid]);
newelem->key=key_;
newelem->value=value[tid];
lock[hashvalue].lock();
newelem->next=table.entries[hashvalue];
table.entries[hashvalue]=newelem;
lock[hashvalue].unlock();
}
}
tid+=stride;
}
}
void freettable(Table &table)
{
HANDLE_ERROR(hipFree(table.entries));
HANDLE_ERROR(hipFree(table.pool));
}
void copytohost_table(const Table &dev_t,Table &host_t)
{
host_t.count=dev_t.count;
host_t.entries=(Entry **)calloc(dev_t.count,sizeof(Entry*));
host_t.pool=(Entry *)malloc(ELEMNTS*sizeof(Entry));
HANDLE_ERROR( hipMemcpy( host_t.entries, dev_t.entries,dev_t.count * sizeof(Entry*),hipMemcpyDeviceToHost ) );
HANDLE_ERROR( hipMemcpy( host_t.pool, dev_t.pool,ELEMNTS * sizeof( Entry ),hipMemcpyDeviceToHost ) );
for (int i=0;i<dev_t.count;++i)
{
if (host_t.entries[i]!=NULL)
{
host_t.entries[i]=(Entry *)((size_t)host_t.pool+(size_t)host_t.entries[i]-(size_t)dev_t.pool);
}
}
for (int i=0;i!=ELEMNTS;++i)
{
if (host_t.pool[i].next!=NULL)
{
host_t.pool[i].next=(Entry *)((size_t)host_t.pool+(size_t)host_t.pool[i].next-(size_t)dev_t.pool);
}
}
}
void verifytable(const Table &dev_table)
{
Table table;
copytohost_table(dev_table,table);
int cnt=0;
for (size_t i=0;i!=table.count;++i)
{
Entry *elm=table.entries[i];
while (elm!=NULL)
{
++cnt;
if (hash(elm->key,table.count)!=i)
{
printf("%d hash to %ld,but located in %ld\n",elm->key,hash(elm->key,table.count),i);
}
elm=elm->next;
}
}
if (cnt!=ELEMNTS)
{
printf("%d was found ,but real num is %d\n",cnt,ELEMNTS);
}else{
printf("%d elemnts was all found.sucucess!\n",cnt);
}
free(table.entries);
free(table.pool);
}
int main(void)
{
unsigned int *buff=(unsigned int*)big_random_block(SISE);
unsigned int *dev_key;
void **dev_value;
HANDLE_ERROR(hipMalloc((void **)&dev_key,SISE));
HANDLE_ERROR(hipMalloc((void **)&dev_value,SISE));
HANDLE_ERROR(hipMemcpy(dev_key,buff,SISE,hipMemcpyHostToDevice));
Lock lock[ENTRIES];
Lock *dev_lock;
HANDLE_ERROR(hipMalloc((void **)&dev_lock,ENTRIES*sizeof(Lock)));
HANDLE_ERROR(hipMemcpy(dev_lock,lock,ENTRIES*sizeof(Lock),hipMemcpyHostToDevice));
Table table;
init_table(table,ENTRIES,ELEMNTS);
hipEvent_t start,stop;
HANDLE_ERROR(hipEventCreate(&start,0));
HANDLE_ERROR(hipEventCreate(&stop,0));
HANDLE_ERROR(hipEventRecord(start,0));
hipLaunchKernelGGL(( add_elem), dim3(60),dim3(256), 0, 0, table,dev_key,dev_value,dev_lock);
HANDLE_ERROR( hipEventRecord( stop, 0 ) );
HANDLE_ERROR( hipEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( hipEventElapsedTime( &elapsedTime,start, stop ) );
printf( "Time to hash: %3.1f ms\n", elapsedTime );
verifytable(table);
freettable(table);
HANDLE_ERROR( hipEventDestroy( start ) );
HANDLE_ERROR( hipEventDestroy( stop ) );
HANDLE_ERROR( hipFree( dev_lock ) );
HANDLE_ERROR( hipFree( dev_key ) );
HANDLE_ERROR( hipFree( dev_value ) );
free(buff);
getchar();
return 0;
}
| 8fb9913486d1c202b093691a862f1ff11b8d5770.cu | #include "cuda.h"
#include "..\common\book.h"
#include "..\common\lock.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#define SISE (100*1024*1024)
#define ELEMNTS (SISE/sizeof(unsigned int))
#define ENTRIES 1024
struct Entry
{
unsigned int key;
void *value;
Entry *next;
};
struct Table
{
size_t count;
Entry **entries;
Entry *pool;
};
void init_table(Table &table,size_t entr,size_t elem)
{
table.count=entr;
//notes: callo(size_t nelem, size_t elsize) alocate nelem blocks memory which size is elsize,make the initial
//value to be 0
HANDLE_ERROR(cudaMalloc((void **)&table.entries,entr*sizeof(Entry*)));
HANDLE_ERROR(cudaMemset(table.entries,0,entr*sizeof(Entry *)));
HANDLE_ERROR(cudaMalloc((void **)&table.pool,elem*sizeof(Entry)));
}
__device__ __host__ size_t hash(unsigned int key,size_t count)
{
return key%count;
}
__global__ void add_elem(Table table,unsigned int *key,void **value,Lock *lock)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (tid<ELEMNTS)
{
unsigned int key_=key[tid];
size_t hashvalue=hash(key_,table.count);
for (int i=0;i!=32;++i)
{
if (tid%32==i)
{
Entry *newelem=&(table.pool[tid]);
newelem->key=key_;
newelem->value=value[tid];
lock[hashvalue].lock();
newelem->next=table.entries[hashvalue];
table.entries[hashvalue]=newelem;
lock[hashvalue].unlock();
}
}
tid+=stride;
}
}
void freettable(Table &table)
{
HANDLE_ERROR(cudaFree(table.entries));
HANDLE_ERROR(cudaFree(table.pool));
}
void copytohost_table(const Table &dev_t,Table &host_t)
{
host_t.count=dev_t.count;
host_t.entries=(Entry **)calloc(dev_t.count,sizeof(Entry*));
host_t.pool=(Entry *)malloc(ELEMNTS*sizeof(Entry));
HANDLE_ERROR( cudaMemcpy( host_t.entries, dev_t.entries,dev_t.count * sizeof(Entry*),cudaMemcpyDeviceToHost ) );
HANDLE_ERROR( cudaMemcpy( host_t.pool, dev_t.pool,ELEMNTS * sizeof( Entry ),cudaMemcpyDeviceToHost ) );
for (int i=0;i<dev_t.count;++i)
{
if (host_t.entries[i]!=NULL)
{
host_t.entries[i]=(Entry *)((size_t)host_t.pool+(size_t)host_t.entries[i]-(size_t)dev_t.pool);
}
}
for (int i=0;i!=ELEMNTS;++i)
{
if (host_t.pool[i].next!=NULL)
{
host_t.pool[i].next=(Entry *)((size_t)host_t.pool+(size_t)host_t.pool[i].next-(size_t)dev_t.pool);
}
}
}
void verifytable(const Table &dev_table)
{
Table table;
copytohost_table(dev_table,table);
int cnt=0;
for (size_t i=0;i!=table.count;++i)
{
Entry *elm=table.entries[i];
while (elm!=NULL)
{
++cnt;
if (hash(elm->key,table.count)!=i)
{
printf("%d hash to %ld,but located in %ld\n",elm->key,hash(elm->key,table.count),i);
}
elm=elm->next;
}
}
if (cnt!=ELEMNTS)
{
printf("%d was found ,but real num is %d\n",cnt,ELEMNTS);
}else{
printf("%d elemnts was all found.sucucess!\n",cnt);
}
free(table.entries);
free(table.pool);
}
int main(void)
{
unsigned int *buff=(unsigned int*)big_random_block(SISE);
unsigned int *dev_key;
void **dev_value;
HANDLE_ERROR(cudaMalloc((void **)&dev_key,SISE));
HANDLE_ERROR(cudaMalloc((void **)&dev_value,SISE));
HANDLE_ERROR(cudaMemcpy(dev_key,buff,SISE,cudaMemcpyHostToDevice));
Lock lock[ENTRIES];
Lock *dev_lock;
HANDLE_ERROR(cudaMalloc((void **)&dev_lock,ENTRIES*sizeof(Lock)));
HANDLE_ERROR(cudaMemcpy(dev_lock,lock,ENTRIES*sizeof(Lock),cudaMemcpyHostToDevice));
Table table;
init_table(table,ENTRIES,ELEMNTS);
cudaEvent_t start,stop;
HANDLE_ERROR(cudaEventCreate(&start,0));
HANDLE_ERROR(cudaEventCreate(&stop,0));
HANDLE_ERROR(cudaEventRecord(start,0));
add_elem<<<60,256>>>(table,dev_key,dev_value,dev_lock);
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,start, stop ) );
printf( "Time to hash: %3.1f ms\n", elapsedTime );
verifytable(table);
freettable(table);
HANDLE_ERROR( cudaEventDestroy( start ) );
HANDLE_ERROR( cudaEventDestroy( stop ) );
HANDLE_ERROR( cudaFree( dev_lock ) );
HANDLE_ERROR( cudaFree( dev_key ) );
HANDLE_ERROR( cudaFree( dev_value ) );
free(buff);
getchar();
return 0;
}
|
faa4a91b5262cbb9c716555951126f85581d0aa0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <sys/time.h>
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
struct timeval tv_init;
static int now (int x) {
struct timeval tv_check;
gettimeofday(&tv_check, NULL);
long int usec = tv_check.tv_usec - tv_init.tv_usec;
long int sec = tv_check.tv_sec - tv_init.tv_sec;
long int msec = usec / 1000;
return (int)(sec*1000+msec);
}
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
printf( "The file was not opened\n" );
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++)
{
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed){
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=step/Cap;
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_cols*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \
int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed=0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed);
}
return dst;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
gettimeofday(&tv_init, NULL);
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char *tfile, *pfile, *ofile;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc != 7)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
hipMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
hipMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, hipMemcpyHostToDevice);
hipMalloc((void**)&MatrixPower, sizeof(float)*size);
hipMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, hipMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
int t0 = now(0);
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows);
int t1 = now(1);
printf("Ending simulation\n");
hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, hipMemcpyDeviceToHost);
printf("TIMING: %d\n", t1-t0);
writeoutput(MatrixOut,grid_rows, grid_cols, ofile);
hipFree(MatrixPower);
hipFree(MatrixTemp[0]);
hipFree(MatrixTemp[1]);
free(MatrixOut);
}
| faa4a91b5262cbb9c716555951126f85581d0aa0.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <sys/time.h>
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
/* chip parameters */
float t_chip = 0.0005;
float chip_height = 0.016;
float chip_width = 0.016;
/* ambient temperature, assuming no package at all */
float amb_temp = 80.0;
void run(int argc, char** argv);
/* define timer macros */
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
struct timeval tv_init;
static int now (int x) {
struct timeval tv_check;
gettimeofday(&tv_check, NULL);
long int usec = tv_check.tv_usec - tv_init.tv_usec;
long int sec = tv_check.tv_sec - tv_init.tv_sec;
long int msec = usec / 1000;
return (int)(sec*1000+msec);
}
void
fatal(char *s)
{
fprintf(stderr, "error: %s\n", s);
}
void writeoutput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j, index=0;
FILE *fp;
char str[STR_SIZE];
if( (fp = fopen(file, "w" )) == 0 )
printf( "The file was not opened\n" );
for (i=0; i < grid_rows; i++)
for (j=0; j < grid_cols; j++)
{
sprintf(str, "%d\t%g\n", index, vect[i*grid_cols+j]);
fputs(str,fp);
index++;
}
fclose(fp);
}
void readinput(float *vect, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float val;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fatal("not enough lines in file");
//if ((sscanf(str, "%d%f", &index, &val) != 2) || (index != ((i-1)*(grid_cols-2)+j-1)))
if ((sscanf(str, "%f", &val) != 1))
fatal("invalid file format");
vect[i*grid_cols+j] = val;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration, //number of iteration
float *power, //power input
float *temp_src, //temperature input/output
float *temp_dst, //temperature input/output
int grid_cols, //Col of grid
int grid_rows, //Row of grid
int border_cols, // border offset
int border_rows, // border offset
float Cap, //Capacitance
float Rx,
float Ry,
float Rz,
float step,
float time_elapsed){
__shared__ float temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_t[BLOCK_SIZE][BLOCK_SIZE]; // saving temparary temperature result
float amb_temp = 80.0;
float step_div_Cap;
float Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=step/Cap;
Rx_1=1/Rx;
Ry_1=1/Ry;
Rz_1=1/Rz;
// each block finally computes result for a small block
// after N iterations.
// it is the non-overlapping small blocks that cover
// all the input data
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;//EXPAND_RATE
int small_block_cols = BLOCK_SIZE-iteration*2;//EXPAND_RATE
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_cols*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index]; // Load the temperature data from global memory to shared memory
power_on_cuda[ty][tx] = power[index];// Load the power data from global memory to shared memory
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed) //Assign the computation range
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
/*
compute N time steps
*/
int compute_tran_temp(float *MatrixPower,float *MatrixTemp[2], int col, int row, \
int total_iterations, int num_iterations, int blockCols, int blockRows, int borderCols, int borderRows)
{
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
float grid_height = chip_height / row;
float grid_width = chip_width / col;
float Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
float Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
float Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
float Rz = t_chip / (K_SI * grid_height * grid_width);
float max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
float step = PRECISION / max_slope;
float t;
float time_elapsed;
time_elapsed=0.001;
int src = 1, dst = 0;
for (t = 0; t < total_iterations; t+=num_iterations) {
int temp = src;
src = dst;
dst = temp;
calculate_temp<<<dimGrid, dimBlock>>>(MIN(num_iterations, total_iterations-t), MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
col,row,borderCols, borderRows, Cap,Rx,Ry,Rz,step,time_elapsed);
}
return dst;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
gettimeofday(&tv_init, NULL);
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
run(argc,argv);
return EXIT_SUCCESS;
}
void run(int argc, char** argv)
{
int size;
int grid_rows,grid_cols;
float *FilesavingTemp,*FilesavingPower,*MatrixOut;
char *tfile, *pfile, *ofile;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
if (argc != 7)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||
(grid_cols = atoi(argv[1]))<=0||
(pyramid_height = atoi(argv[2]))<=0||
(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
ofile=argv[6];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2// add one iteration will extend the pyramid base by 2 per each borderline
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
FilesavingTemp = (float *) malloc(size*sizeof(float));
FilesavingPower = (float *) malloc(size*sizeof(float));
MatrixOut = (float *) calloc (size, sizeof(float));
if( !FilesavingPower || !FilesavingTemp || !MatrixOut)
fatal("unable to allocate memory");
printf("pyramidHeight: %d\ngridSize: [%d, %d]\nborder:[%d, %d]\nblockGrid:[%d, %d]\ntargetBlock:[%d, %d]\n",\
pyramid_height, grid_cols, grid_rows, borderCols, borderRows, blockCols, blockRows, smallBlockCol, smallBlockRow);
readinput(FilesavingTemp, grid_rows, grid_cols, tfile);
readinput(FilesavingPower, grid_rows, grid_cols, pfile);
float *MatrixTemp[2], *MatrixPower;
cudaMalloc((void**)&MatrixTemp[0], sizeof(float)*size);
cudaMalloc((void**)&MatrixTemp[1], sizeof(float)*size);
cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(float)*size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&MatrixPower, sizeof(float)*size);
cudaMemcpy(MatrixPower, FilesavingPower, sizeof(float)*size, cudaMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
int t0 = now(0);
int ret = compute_tran_temp(MatrixPower,MatrixTemp,grid_cols,grid_rows, \
total_iterations,pyramid_height, blockCols, blockRows, borderCols, borderRows);
int t1 = now(1);
printf("Ending simulation\n");
cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(float)*size, cudaMemcpyDeviceToHost);
printf("TIMING: %d\n", t1-t0);
writeoutput(MatrixOut,grid_rows, grid_cols, ofile);
cudaFree(MatrixPower);
cudaFree(MatrixTemp[0]);
cudaFree(MatrixTemp[1]);
free(MatrixOut);
}
|
402230c0ac4bf08396160cac4da56e40c54893dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void multiplyBy2(int *size, int *in, int *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < *size) {
out[ix] = in[ix] * 2;
}
} | 402230c0ac4bf08396160cac4da56e40c54893dc.cu | #include "includes.h"
__global__ void multiplyBy2(int *size, int *in, int *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < *size) {
out[ix] = in[ix] * 2;
}
} |
40906b11620c2df7e6d5302c3b5ac65de40bfb72.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2011 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This is a simple test program to measure the memcopy bandwidth of the GPU.
* It can measure device to device copy bandwidth, host to device copy bandwidth
* for pageable and pinned memory, and device to host copy bandwidth for pageable
* and pinned memory.
*
* Usage:
* ./bandwidthTest [option]...
*/
// includes
#include <sdkHelper.h> // helper for shared functions common to CUDA SDK samples
#include <shrQATest.h> // This is for automated testing output (--qatest)
#include <shrUtils.h>
#include <hip/hip_runtime.h>
#include <memory>
#include <iostream>
#include <cassert>
// defines, project
#define MEMCOPY_ITERATIONS 10
#define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M
#define DEFAULT_INCREMENT (1 << 22) //4 M
#define CACHE_CLEAR_SIZE (1 << 24) //16 M
//shmoo mode defines
#define SHMOO_MEMSIZE_MAX (1 << 26) //64 M
#define SHMOO_MEMSIZE_START (1 << 10) //1 KB
#define SHMOO_INCREMENT_1KB (1 << 10) //1 KB
#define SHMOO_INCREMENT_2KB (1 << 11) //2 KB
#define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB
#define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_INCREMENT_1MB (1 << 20) //1 MB
#define SHMOO_INCREMENT_2MB (1 << 21) //2 MB
#define SHMOO_INCREMENT_4MB (1 << 22) //4 MB
#define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB
#define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB
#define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_LIMIT_1MB (1 << 20) //1 MB
#define SHMOO_LIMIT_16MB (1 << 24) //16 MB
#define SHMOO_LIMIT_32MB (1 << 25) //32 MB
//enums, project
enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE };
enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE };
enum printMode { USER_READABLE, CSV };
enum memoryMode { PINNED, PAGEABLE };
// if true, use CPU based timing for everything
static bool bDontUseGPUTiming;
int *pArgc = NULL;
char **pArgv = NULL;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(const int argc, const char **argv);
void testBandwidth( unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testDeviceToDeviceTransfer(unsigned int memSize);
void printResultsReadable(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc);
void printResultsCSV(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc);
void printHelp(void);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( hipError_t err, const char *file, const int line )
{
if( hipSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError( const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// General GPU Device CUDA Initialization
int gpuDeviceInit(int devID)
{
int deviceCount;
checkCudaErrors(hipGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
exit(-1);
}
if (devID < 0)
devID = 0;
if (devID > deviceCount-1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount);
fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
fprintf(stderr, "\n");
return -devID;
}
hipDeviceProp_t deviceProp;
checkCudaErrors( hipGetDeviceProperties(&deviceProp, devID) );
if (deviceProp.major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(-1); \
}
checkCudaErrors( hipSetDevice(devID) );
printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name);
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
int gpuGetMaxGflopsDeviceId()
{
int current_device = 0, sm_per_multiproc = 0;
int max_compute_perf = 0, max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceCount( &device_count );
// Find the best major SM Architecture GPU device
while ( current_device < device_count ) {
hipGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major > 0 && deviceProp.major < 9999) {
best_SM_arch = MAX(best_SM_arch, deviceProp.major);
}
current_device++;
}
// Find the best CUDA capable GPU device
current_device = 0;
while( current_device < device_count ) {
hipGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
if( compute_perf > max_compute_perf ) {
// If we find GPU with SM major > 2, search only these
if ( best_SM_arch > 2 ) {
// If our device==dest_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
++current_device;
}
return max_perf_device;
}
// Initialization code to find the best CUDA Device
int findCudaDevice(int argc, const char **argv)
{
hipDeviceProp_t deviceProp;
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameters\n");
exit(-1);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
exit(-1);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
checkCudaErrors( hipSetDevice( devID ) );
checkCudaErrors( hipGetDeviceProperties(&deviceProp, devID) );
printf("> Using CUDA device [%d]: %s\n", devID, deviceProp.name);
}
return devID;
}
// end of CUDA Helper Functions
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
pArgc = &argc;
pArgv = argv;
shrQAStart(argc, argv);
// set logfile name and start logs
shrSetLogFileName ("bandwidthTest.txt");
shrLog("%s Starting...\n\n", argv[0]);
int iRetVal = runTest(argc, (const char**)argv);
// finish
shrQAFinishExit(argc, (const char **)argv, (iRetVal==0 ? QA_PASSED : QA_FAILED));
}
///////////////////////////////////////////////////////////////////////////////
//Parse args, run the appropriate tests
///////////////////////////////////////////////////////////////////////////////
int runTest(const int argc, const char **argv)
{
int start = DEFAULT_SIZE;
int end = DEFAULT_SIZE;
int startDevice = 0;
int endDevice = 0;
int increment = DEFAULT_INCREMENT;
testMode mode = QUICK_MODE;
bool htod = false;
bool dtoh = false;
bool dtod = false;
bool wc = false;
char *modeStr;
char *device = NULL;
printMode printmode = USER_READABLE;
char *memModeStr = NULL;
memoryMode memMode = PAGEABLE;
//process command line args
if(checkCmdLineFlag( argc, argv, "help"))
{
printHelp();
return 0;
}
if(checkCmdLineFlag( argc, argv, "csv"))
{
printmode = CSV;
}
if( getCmdLineArgumentString(argc, argv, "memory", &memModeStr) )
{
if( strcmp(memModeStr, "pageable") == 0 )
{
memMode = PAGEABLE;
}
else if( strcmp(memModeStr, "pinned") == 0)
{
memMode = PINNED;
}
else
{
shrLog("Invalid memory mode - valid modes are pageable or pinned\n");
shrLog("See --help for more information\n");
return -1000;
}
}
else
{
//default - pageable memory
memMode = PAGEABLE;
}
if( shrGetCmdLineArgumentstr(argc, argv, "device", &device) )
{
int deviceCount;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess) {
shrLog( "hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id) );
shrQAFinishExit(*pArgc, (const char **)pArgv, QA_FAILED);
}
if( deviceCount == 0 )
{
shrLog("!!!!!No devices found!!!!!\n");
return -2000;
}
if( strcmp (device, "all") == 0 )
{
printf ("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n");
startDevice = 0;
endDevice = deviceCount-1;
}
else
{
startDevice = endDevice = atoi(device);
if( startDevice >= deviceCount || startDevice < 0)
{
shrLog("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0);
startDevice = endDevice = 0;
}
}
}
shrLog("Running on...\n\n");
for( int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
hipDeviceProp_t deviceProp;
hipError_t error_id = hipGetDeviceProperties(&deviceProp, currentDevice);
if (error_id == hipSuccess) {
shrLog(" Device %d: %s\n", currentDevice, deviceProp.name);
} else {
shrLog( "hipGetDeviceProperties returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id) );
shrQAFinishExit(*pArgc, (const char **)pArgv, QA_FAILED);
}
}
if( shrGetCmdLineArgumentstr(argc, argv, "mode", &modeStr) )
{
//figure out the mode
if( strcmp(modeStr, "quick") == 0 )
{
shrLog(" Quick Mode\n\n");
mode = QUICK_MODE;
}
else if( strcmp(modeStr, "shmoo") == 0 )
{
shrLog(" Shmoo Mode\n\n");
mode = SHMOO_MODE;
}
else if( strcmp(modeStr, "range") == 0 )
{
shrLog(" Range Mode\n\n");
mode = RANGE_MODE;
}
else
{
shrLog("Invalid mode - valid modes are quick, range, or shmoo\n");
shrLog("See --help for more information\n");
return -3000;
}
}
else
{
//default mode - quick
shrLog(" Quick Mode\n\n");
mode = QUICK_MODE;
}
if(checkCmdLineFlag( argc, argv, "htod"))
htod = true;
if(checkCmdLineFlag( argc, argv, "dtoh"))
dtoh = true;
if(checkCmdLineFlag( argc, argv, "dtod"))
dtod = true;
#if CUDART_VERSION >= 2020
if(checkCmdLineFlag( argc, argv, "wc"))
wc = true;
#endif
if(checkCmdLineFlag( argc, argv, "cputiming"))
bDontUseGPUTiming = true;
if( !htod && !dtoh && !dtod )
{
//default: All
htod = true;
dtoh = true;
dtod = true;
}
if( RANGE_MODE == mode )
{
if( shrGetCmdLineArgumenti( argc, argv, "start", &start) )
{
if( start <= 0 )
{
shrLog("Illegal argument - start must be greater than zero\n");
return -4000;
}
}
else
{
shrLog("Must specify a starting size in range mode\n");
shrLog("See --help for more information\n");
return -5000;
}
if( shrGetCmdLineArgumenti( argc, argv, "end", &end) )
{
if( end <= 0 )
{
shrLog("Illegal argument - end must be greater than zero\n");
return -6000;
}
if( start > end )
{
shrLog("Illegal argument - start is greater than end\n");
return -7000;
}
}
else
{
shrLog("Must specify an end size in range mode.\n");
shrLog("See --help for more information\n");
return -8000;
}
if( shrGetCmdLineArgumenti( argc, argv, "increment", &increment) )
{
if( increment <= 0 )
{
shrLog("Illegal argument - increment must be greater than zero\n");
return -9000;
}
}
else
{
shrLog("Must specify an increment in user mode\n");
shrLog("See --help for more information\n");
return -10000;
}
}
if( htod )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
if( dtoh )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc);
}
if( dtod )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
// Ensure that we reset all CUDA Devices in question
for (int nDevice = startDevice; nDevice < endDevice; nDevice++) {
hipSetDevice(nDevice);
hipDeviceReset();
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////
// Run a bandwidth test
///////////////////////////////////////////////////////////////////////////////
void
testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
switch( mode )
{
case QUICK_MODE:
testBandwidthQuick( DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc );
break;
case RANGE_MODE:
testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc);
break;
case SHMOO_MODE:
testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc);
break;
default:
break;
}
}
//////////////////////////////////////////////////////////////////////
// Run a quick mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc);
}
///////////////////////////////////////////////////////////////////////
// Run a range mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//count the number of copies we're going to run
unsigned int count = 1 + ((end - start) / increment);
unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) );
double *bandwidths = ( double * ) malloc( count * sizeof(double) );
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (unsigned int i = 0; i < count; i++)
bandwidths[i] = 0.0;
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
hipSetDevice(currentDevice);
//run each of the copies
for(unsigned int i = 0; i < count; i++)
{
memSizes[i] = start + i * increment;
switch(kind)
{
case DEVICE_TO_HOST: bandwidths[i] += testDeviceToHostTransfer( memSizes[i], memMode, wc);
break;
case HOST_TO_DEVICE: bandwidths[i] += testHostToDeviceTransfer( memSizes[i], memMode, wc);
break;
case DEVICE_TO_DEVICE: bandwidths[i] += testDeviceToDeviceTransfer( memSizes[i] );
break;
}
}
} // Complete the bandwidth computation on all the devices
//print results
if(printmode == CSV)
{
printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
else
{
printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
//clean up
free(memSizes);
free(bandwidths);
}
//////////////////////////////////////////////////////////////////////////////
// Intense shmoo mode - covers a large range of values with varying increments
//////////////////////////////////////////////////////////////////////////////
void
testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//count the number of copies to make
unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB)
+ ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB)
+ ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB)
+ ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB)
+ ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB)
+ ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB)
+ ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB);
unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) );
double *bandwidths = ( double * ) malloc( count * sizeof(double) );
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (unsigned int i = 0; i < count; i++)
bandwidths[i] = 0.0;
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
hipSetDevice(currentDevice);
//Run the shmoo
int iteration = 0;
unsigned int memSize = 0;
while( memSize <= SHMOO_MEMSIZE_MAX )
{
if( memSize < SHMOO_LIMIT_20KB )
{
memSize += SHMOO_INCREMENT_1KB;
}
else if( memSize < SHMOO_LIMIT_50KB )
{
memSize += SHMOO_INCREMENT_2KB;
}else if( memSize < SHMOO_LIMIT_100KB )
{
memSize += SHMOO_INCREMENT_10KB;
}else if( memSize < SHMOO_LIMIT_1MB )
{
memSize += SHMOO_INCREMENT_100KB;
}else if( memSize < SHMOO_LIMIT_16MB )
{
memSize += SHMOO_INCREMENT_1MB;
}else if( memSize < SHMOO_LIMIT_32MB )
{
memSize += SHMOO_INCREMENT_2MB;
}else
{
memSize += SHMOO_INCREMENT_4MB;
}
memSizes[iteration] = memSize;
switch(kind)
{
case DEVICE_TO_HOST: bandwidths[iteration] += testDeviceToHostTransfer( memSizes[iteration], memMode, wc );
break;
case HOST_TO_DEVICE: bandwidths[iteration] += testHostToDeviceTransfer( memSizes[iteration], memMode, wc );
break;
case DEVICE_TO_DEVICE: bandwidths[iteration] += testDeviceToDeviceTransfer( memSizes[iteration] );
break;
}
iteration++;
shrLog(".");
}
} // Complete the bandwidth computation on all the devices
//print results
shrLog("\n");
if( CSV == printmode)
{
printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
else
{
printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
//clean up
free(memSizes);
free(bandwidths);
}
///////////////////////////////////////////////////////////////////////////////
// test the bandwidth of a device to host memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
unsigned char *h_idata = NULL;
unsigned char *h_odata = NULL;
hipEvent_t start, stop;
sdkCreateTimer( &timer );
checkCudaErrors( hipEventCreate( &start ) );
checkCudaErrors( hipEventCreate( &stop ) );
//allocate host memory
if( PINNED == memMode )
{
//pinned memory mode - use special function to get OS-pinned memory
#if CUDART_VERSION >= 2020
checkCudaErrors( hipHostMalloc( (void**)&h_idata, memSize, (wc) ? hipHostMallocWriteCombined : 0 ) );
checkCudaErrors( hipHostMalloc( (void**)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0 ) );
#else
checkCudaErrors( hipHostMalloc( (void**)&h_idata, memSize ) );
checkCudaErrors( hipHostMalloc( (void**)&h_odata, memSize ) );
#endif
}
else
{
//pageable memory mode - use malloc
h_idata = (unsigned char *)malloc( memSize );
h_odata = (unsigned char *)malloc( memSize );
if( h_idata == 0 || h_odata == 0 ) {
fprintf(stderr, "Not enough memory avaialable on host to run test!\n" );
exit(-1);
}
}
//initialize the memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char) (i & 0xff);
}
// allocate device memory
unsigned char* d_idata;
checkCudaErrors( hipMalloc( (void**) &d_idata, memSize));
//initialize the device memory
checkCudaErrors( hipMemcpy( d_idata, h_idata, memSize,
hipMemcpyHostToDevice) );
//copy data from GPU to Host
sdkStartTimer( &timer );
checkCudaErrors( hipEventRecord( start, 0 ) );
if( PINNED == memMode )
{
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
checkCudaErrors( hipMemcpyAsync( h_odata, d_idata, memSize,
hipMemcpyDeviceToHost, 0) );
}
}
else
{
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
checkCudaErrors( hipMemcpy( h_odata, d_idata, memSize,
hipMemcpyDeviceToHost) );
}
}
checkCudaErrors( hipEventRecord( stop, 0 ) );
// make sure GPU has finished copying
checkCudaErrors( hipDeviceSynchronize() );
//get the the total elapsed time in ms
sdkStopTimer( &timer );
checkCudaErrors( hipEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if( PINNED != memMode || bDontUseGPUTiming )
{
elapsedTimeInMs = sdkGetTimerValue( &timer );
}
//calculate bandwidth in MB/s
bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
checkCudaErrors( hipEventDestroy(stop) );
checkCudaErrors( hipEventDestroy(start) );
sdkDeleteTimer( &timer );
if( PINNED == memMode )
{
checkCudaErrors( hipHostFree(h_idata) );
checkCudaErrors( hipHostFree(h_odata) );
}
else
{
free(h_idata);
free(h_odata);
}
checkCudaErrors(hipFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a host to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
hipEvent_t start, stop;
sdkCreateTimer( &timer );
checkCudaErrors( hipEventCreate( &start ) );
checkCudaErrors( hipEventCreate( &stop ) );
//allocate host memory
unsigned char *h_odata = NULL;
if( PINNED == memMode )
{
#if CUDART_VERSION >= 2020
//pinned memory mode - use special function to get OS-pinned memory
checkCudaErrors( hipHostMalloc( (void**)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0 ) );
#else
//pinned memory mode - use special function to get OS-pinned memory
checkCudaErrors( hipHostMalloc( (void**)&h_odata, memSize ) );
#endif
}
else
{
//pageable memory mode - use malloc
h_odata = (unsigned char *)malloc( memSize );
if( h_odata == 0 ) {
fprintf(stderr, "Not enough memory avaialable on host to run test!\n" );
exit(-1);
}
}
unsigned char *h_cacheClear1 = (unsigned char *)malloc( CACHE_CLEAR_SIZE );
unsigned char *h_cacheClear2 = (unsigned char *)malloc( CACHE_CLEAR_SIZE );
if( h_cacheClear1 == 0 || h_cacheClear1 == 0 ) {
fprintf(stderr, "Not enough memory avaialable on host to run test!\n" );
exit(-1);
}
//initialize the memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_odata[i] = (unsigned char) (i & 0xff);
}
for(unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++)
{
h_cacheClear1[i] = (unsigned char) (i & 0xff);
h_cacheClear2[i] = (unsigned char) (0xff - (i & 0xff));
}
//allocate device memory
unsigned char* d_idata;
checkCudaErrors( hipMalloc( (void**) &d_idata, memSize));
sdkStartTimer( &timer );
checkCudaErrors( hipEventRecord( start, 0 ) );
//copy host memory to device memory
if( PINNED == memMode )
{
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors( hipMemcpyAsync( d_idata, h_odata, memSize,
hipMemcpyHostToDevice, 0) );
}
}
else {
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors( hipMemcpy( d_idata, h_odata, memSize,
hipMemcpyHostToDevice) );
}
}
checkCudaErrors( hipEventRecord( stop, 0 ) );
checkCudaErrors( hipDeviceSynchronize() );
//total elapsed time in ms
sdkStopTimer( &timer );
checkCudaErrors( hipEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if ( PINNED != memMode || bDontUseGPUTiming )
{
elapsedTimeInMs = sdkGetTimerValue( &timer );
}
sdkResetTimer( &timer );
//calculate bandwidth in MB/s
bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
checkCudaErrors( hipEventDestroy(stop) );
checkCudaErrors( hipEventDestroy(start) );
sdkDeleteTimer( &timer );
if( PINNED == memMode )
{
checkCudaErrors( hipHostFree(h_odata) );
}
else
{
free(h_odata);
}
free(h_cacheClear1);
free(h_cacheClear2);
checkCudaErrors(hipFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a device to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToDeviceTransfer(unsigned int memSize)
{
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
hipEvent_t start, stop;
sdkCreateTimer( &timer );
checkCudaErrors( hipEventCreate( &start ) );
checkCudaErrors( hipEventCreate( &stop ) );
//allocate host memory
unsigned char *h_idata = (unsigned char *)malloc( memSize );
if( h_idata == 0 ) {
fprintf(stderr, "Not enough memory avaialable on host to run test!\n" );
exit(-1);
}
//initialize the host memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char) (i & 0xff);
}
//allocate device memory
unsigned char *d_idata;
checkCudaErrors( hipMalloc( (void**) &d_idata, memSize));
unsigned char *d_odata;
checkCudaErrors( hipMalloc( (void**) &d_odata, memSize));
//initialize memory
checkCudaErrors( hipMemcpy( d_idata, h_idata, memSize,
hipMemcpyHostToDevice) );
//run the memcopy
sdkStartTimer( &timer );
checkCudaErrors( hipEventRecord( start, 0 ) );
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
checkCudaErrors( hipMemcpy( d_odata, d_idata, memSize,
hipMemcpyDeviceToDevice) );
}
checkCudaErrors( hipEventRecord( stop, 0 ) );
//Since device to device memory copies are non-blocking,
//hipDeviceSynchronize() is required in order to get
//proper timing.
checkCudaErrors( hipDeviceSynchronize() );
//get the the total elapsed time in ms
sdkStopTimer( &timer );
checkCudaErrors( hipEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if ( bDontUseGPUTiming )
{
elapsedTimeInMs = sdkGetTimerValue( &timer );
}
//calculate bandwidth in MB/s
bandwidthInMBs = 2.0f * (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
sdkDeleteTimer( &timer );
free(h_idata);
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipFree(d_idata));
checkCudaErrors(hipFree(d_odata));
return bandwidthInMBs;
}
/////////////////////////////////////////////////////////
//print results in an easily read format
////////////////////////////////////////////////////////
void printResultsReadable(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc)
{
// log config information
if (kind == DEVICE_TO_DEVICE)
{
shrLog(" Device to Device Bandwidth, %i Device(s)\n", iNumDevs);
}
else
{
if (kind == DEVICE_TO_HOST)
{
shrLog(" Device to Host Bandwidth, %i Device(s), ", iNumDevs);
}
else if (kind == HOST_TO_DEVICE)
{
shrLog(" Host to Device Bandwidth, %i Device(s), ", iNumDevs);
}
if(memMode == PAGEABLE)
{
shrLog("Paged memory\n");
}
else if (memMode == PINNED)
{
shrLog("Pinned memory");
if (wc) {
shrLog(", Write-Combined Memory Enabled");
}
shrLog("\n");
}
}
shrLog(" Transfer Size (Bytes)\tBandwidth(MB/s)\n");
unsigned int i;
for(i = 0; i < (count - 1); i++)
{
shrLog(" %u\t\t\t%s%.1f\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]);
}
shrLog(" %u\t\t\t%s%.1f\n\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]);
}
///////////////////////////////////////////////////////////////////////////
//print results in a database format
///////////////////////////////////////////////////////////////////////////
void printResultsCSV(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc)
{
std::string sConfig;
// log config information
if (kind == DEVICE_TO_DEVICE)
{
sConfig += "D2D";
}
else
{
if (kind == DEVICE_TO_HOST)
{
sConfig += "D2H";
}
else if (kind == HOST_TO_DEVICE)
{
sConfig += "H2D";
}
if(memMode == PAGEABLE)
{
sConfig += "-Paged";
}
else if (memMode == PINNED)
{
sConfig += "-Pinned";
if (wc)
{
sConfig += "-WriteCombined";
}
}
}
unsigned int i;
double dSeconds = 0.0;
for(i = 0; i < count; i++)
{
dSeconds = (double)memSizes[i] / (bandwidths[i] * (double)(1<<20));
shrLogEx(LOGBOTH | MASTER, 0, "bandwidthTest-%s, Bandwidth = %.1f MB/s, Time = %.5f s, Size = %u bytes, NumDevsUsed = %d\n",
sConfig.c_str(), bandwidths[i], dSeconds, memSizes[i], iNumDevs);
}
}
///////////////////////////////////////////////////////////////////////////
//Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void)
{
shrLog("Usage: bandwidthTest [OPTION]...\n");
shrLog("Test the bandwidth for device to host, host to device, and device to device transfers\n");
shrLog("\n");
shrLog("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n");
shrLog("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n");
shrLog("\n");
shrLog("Options:\n");
shrLog("--help\tDisplay this help menu\n");
shrLog("--csv\tPrint results as a CSV\n");
shrLog("--device=[deviceno]\tSpecify the device device to be used\n");
shrLog(" all - compute cumulative bandwidth on all the devices\n");
shrLog(" 0,1,2,...,n - Specify any particular device to be used\n");
shrLog("--memory=[MEMMODE]\tSpecify which memory mode to use\n");
shrLog(" pageable - pageable memory\n");
shrLog(" pinned - non-pageable system memory\n");
shrLog("--mode=[MODE]\tSpecify the mode to use\n");
shrLog(" quick - performs a quick measurement\n");
shrLog(" range - measures a user-specified range of values\n");
shrLog(" shmoo - performs an intense shmoo of a large range of values\n");
shrLog("--htod\tMeasure host to device transfers\n");
shrLog("--dtoh\tMeasure device to host transfers\n");
shrLog("--dtod\tMeasure device to device transfers\n");
#if CUDART_VERSION >= 2020
shrLog("--wc\tAllocate pinned memory as write-combined\n");
#endif
shrLog("--cputiming\tForce CPU-based timing always\n");
shrLog("Range mode options\n");
shrLog("--start=[SIZE]\tStarting transfer size in bytes\n");
shrLog("--end=[SIZE]\tEnding transfer size in bytes\n");
shrLog("--increment=[SIZE]\tIncrement size in bytes\n");
}
| 40906b11620c2df7e6d5302c3b5ac65de40bfb72.cu | /*
* Copyright 1993-2011 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* This is a simple test program to measure the memcopy bandwidth of the GPU.
* It can measure device to device copy bandwidth, host to device copy bandwidth
* for pageable and pinned memory, and device to host copy bandwidth for pageable
* and pinned memory.
*
* Usage:
* ./bandwidthTest [option]...
*/
// includes
#include <sdkHelper.h> // helper for shared functions common to CUDA SDK samples
#include <shrQATest.h> // This is for automated testing output (--qatest)
#include <shrUtils.h>
#include <cuda.h>
#include <memory>
#include <iostream>
#include <cassert>
// defines, project
#define MEMCOPY_ITERATIONS 10
#define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M
#define DEFAULT_INCREMENT (1 << 22) //4 M
#define CACHE_CLEAR_SIZE (1 << 24) //16 M
//shmoo mode defines
#define SHMOO_MEMSIZE_MAX (1 << 26) //64 M
#define SHMOO_MEMSIZE_START (1 << 10) //1 KB
#define SHMOO_INCREMENT_1KB (1 << 10) //1 KB
#define SHMOO_INCREMENT_2KB (1 << 11) //2 KB
#define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB
#define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_INCREMENT_1MB (1 << 20) //1 MB
#define SHMOO_INCREMENT_2MB (1 << 21) //2 MB
#define SHMOO_INCREMENT_4MB (1 << 22) //4 MB
#define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB
#define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB
#define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB
#define SHMOO_LIMIT_1MB (1 << 20) //1 MB
#define SHMOO_LIMIT_16MB (1 << 24) //16 MB
#define SHMOO_LIMIT_32MB (1 << 25) //32 MB
//enums, project
enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE };
enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE };
enum printMode { USER_READABLE, CSV };
enum memoryMode { PINNED, PAGEABLE };
// if true, use CPU based timing for everything
static bool bDontUseGPUTiming;
int *pArgc = NULL;
char **pArgv = NULL;
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(const int argc, const char **argv);
void testBandwidth( unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc);
float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc);
float testDeviceToDeviceTransfer(unsigned int memSize);
void printResultsReadable(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc);
void printResultsCSV(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc);
void printHelp(void);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( cudaError err, const char *file, const int line )
{
if( cudaSuccess != err) {
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError( const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// General GPU Device CUDA Initialization
int gpuDeviceInit(int devID)
{
int deviceCount;
checkCudaErrors(cudaGetDeviceCount(&deviceCount));
if (deviceCount == 0) {
fprintf(stderr, "gpuDeviceInit() CUDA error: no devices supporting CUDA.\n");
exit(-1);
}
if (devID < 0)
devID = 0;
if (devID > deviceCount-1) {
fprintf(stderr, "\n");
fprintf(stderr, ">> %d CUDA capable GPU device(s) detected. <<\n", deviceCount);
fprintf(stderr, ">> gpuDeviceInit (-device=%d) is not a valid GPU device. <<\n", devID);
fprintf(stderr, "\n");
return -devID;
}
cudaDeviceProp deviceProp;
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
if (deviceProp.major < 1) {
fprintf(stderr, "gpuDeviceInit(): GPU device does not support CUDA.\n");
exit(-1); \
}
checkCudaErrors( cudaSetDevice(devID) );
printf("> gpuDeviceInit() CUDA device [%d]: %s\n", devID, deviceProp.name);
return devID;
}
// This function returns the best GPU (with maximum GFLOPS)
int gpuGetMaxGflopsDeviceId()
{
int current_device = 0, sm_per_multiproc = 0;
int max_compute_perf = 0, max_perf_device = 0;
int device_count = 0, best_SM_arch = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceCount( &device_count );
// Find the best major SM Architecture GPU device
while ( current_device < device_count ) {
cudaGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major > 0 && deviceProp.major < 9999) {
best_SM_arch = MAX(best_SM_arch, deviceProp.major);
}
current_device++;
}
// Find the best CUDA capable GPU device
current_device = 0;
while( current_device < device_count ) {
cudaGetDeviceProperties( &deviceProp, current_device );
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
sm_per_multiproc = 1;
} else {
sm_per_multiproc = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
int compute_perf = deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
if( compute_perf > max_compute_perf ) {
// If we find GPU with SM major > 2, search only these
if ( best_SM_arch > 2 ) {
// If our device==dest_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch) {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
} else {
max_compute_perf = compute_perf;
max_perf_device = current_device;
}
}
++current_device;
}
return max_perf_device;
}
// Initialization code to find the best CUDA Device
int findCudaDevice(int argc, const char **argv)
{
cudaDeviceProp deviceProp;
int devID = 0;
// If the command-line has a device number specified, use it
if (checkCmdLineFlag(argc, argv, "device")) {
devID = getCmdLineArgumentInt(argc, argv, "device=");
if (devID < 0) {
printf("Invalid command line parameters\n");
exit(-1);
} else {
devID = gpuDeviceInit(devID);
if (devID < 0) {
printf("exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_FAILED);
exit(-1);
}
}
} else {
// Otherwise pick the device with highest Gflops/s
devID = gpuGetMaxGflopsDeviceId();
checkCudaErrors( cudaSetDevice( devID ) );
checkCudaErrors( cudaGetDeviceProperties(&deviceProp, devID) );
printf("> Using CUDA device [%d]: %s\n", devID, deviceProp.name);
}
return devID;
}
// end of CUDA Helper Functions
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
pArgc = &argc;
pArgv = argv;
shrQAStart(argc, argv);
// set logfile name and start logs
shrSetLogFileName ("bandwidthTest.txt");
shrLog("%s Starting...\n\n", argv[0]);
int iRetVal = runTest(argc, (const char**)argv);
// finish
shrQAFinishExit(argc, (const char **)argv, (iRetVal==0 ? QA_PASSED : QA_FAILED));
}
///////////////////////////////////////////////////////////////////////////////
//Parse args, run the appropriate tests
///////////////////////////////////////////////////////////////////////////////
int runTest(const int argc, const char **argv)
{
int start = DEFAULT_SIZE;
int end = DEFAULT_SIZE;
int startDevice = 0;
int endDevice = 0;
int increment = DEFAULT_INCREMENT;
testMode mode = QUICK_MODE;
bool htod = false;
bool dtoh = false;
bool dtod = false;
bool wc = false;
char *modeStr;
char *device = NULL;
printMode printmode = USER_READABLE;
char *memModeStr = NULL;
memoryMode memMode = PAGEABLE;
//process command line args
if(checkCmdLineFlag( argc, argv, "help"))
{
printHelp();
return 0;
}
if(checkCmdLineFlag( argc, argv, "csv"))
{
printmode = CSV;
}
if( getCmdLineArgumentString(argc, argv, "memory", &memModeStr) )
{
if( strcmp(memModeStr, "pageable") == 0 )
{
memMode = PAGEABLE;
}
else if( strcmp(memModeStr, "pinned") == 0)
{
memMode = PINNED;
}
else
{
shrLog("Invalid memory mode - valid modes are pageable or pinned\n");
shrLog("See --help for more information\n");
return -1000;
}
}
else
{
//default - pageable memory
memMode = PAGEABLE;
}
if( shrGetCmdLineArgumentstr(argc, argv, "device", &device) )
{
int deviceCount;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess) {
shrLog( "cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id) );
shrQAFinishExit(*pArgc, (const char **)pArgv, QA_FAILED);
}
if( deviceCount == 0 )
{
shrLog("!!!!!No devices found!!!!!\n");
return -2000;
}
if( strcmp (device, "all") == 0 )
{
printf ("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n");
startDevice = 0;
endDevice = deviceCount-1;
}
else
{
startDevice = endDevice = atoi(device);
if( startDevice >= deviceCount || startDevice < 0)
{
shrLog("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0);
startDevice = endDevice = 0;
}
}
}
shrLog("Running on...\n\n");
for( int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
cudaDeviceProp deviceProp;
cudaError_t error_id = cudaGetDeviceProperties(&deviceProp, currentDevice);
if (error_id == cudaSuccess) {
shrLog(" Device %d: %s\n", currentDevice, deviceProp.name);
} else {
shrLog( "cudaGetDeviceProperties returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id) );
shrQAFinishExit(*pArgc, (const char **)pArgv, QA_FAILED);
}
}
if( shrGetCmdLineArgumentstr(argc, argv, "mode", &modeStr) )
{
//figure out the mode
if( strcmp(modeStr, "quick") == 0 )
{
shrLog(" Quick Mode\n\n");
mode = QUICK_MODE;
}
else if( strcmp(modeStr, "shmoo") == 0 )
{
shrLog(" Shmoo Mode\n\n");
mode = SHMOO_MODE;
}
else if( strcmp(modeStr, "range") == 0 )
{
shrLog(" Range Mode\n\n");
mode = RANGE_MODE;
}
else
{
shrLog("Invalid mode - valid modes are quick, range, or shmoo\n");
shrLog("See --help for more information\n");
return -3000;
}
}
else
{
//default mode - quick
shrLog(" Quick Mode\n\n");
mode = QUICK_MODE;
}
if(checkCmdLineFlag( argc, argv, "htod"))
htod = true;
if(checkCmdLineFlag( argc, argv, "dtoh"))
dtoh = true;
if(checkCmdLineFlag( argc, argv, "dtod"))
dtod = true;
#if CUDART_VERSION >= 2020
if(checkCmdLineFlag( argc, argv, "wc"))
wc = true;
#endif
if(checkCmdLineFlag( argc, argv, "cputiming"))
bDontUseGPUTiming = true;
if( !htod && !dtoh && !dtod )
{
//default: All
htod = true;
dtoh = true;
dtod = true;
}
if( RANGE_MODE == mode )
{
if( shrGetCmdLineArgumenti( argc, argv, "start", &start) )
{
if( start <= 0 )
{
shrLog("Illegal argument - start must be greater than zero\n");
return -4000;
}
}
else
{
shrLog("Must specify a starting size in range mode\n");
shrLog("See --help for more information\n");
return -5000;
}
if( shrGetCmdLineArgumenti( argc, argv, "end", &end) )
{
if( end <= 0 )
{
shrLog("Illegal argument - end must be greater than zero\n");
return -6000;
}
if( start > end )
{
shrLog("Illegal argument - start is greater than end\n");
return -7000;
}
}
else
{
shrLog("Must specify an end size in range mode.\n");
shrLog("See --help for more information\n");
return -8000;
}
if( shrGetCmdLineArgumenti( argc, argv, "increment", &increment) )
{
if( increment <= 0 )
{
shrLog("Illegal argument - increment must be greater than zero\n");
return -9000;
}
}
else
{
shrLog("Must specify an increment in user mode\n");
shrLog("See --help for more information\n");
return -10000;
}
}
if( htod )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
if( dtoh )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc);
}
if( dtod )
{
testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment,
mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc);
}
// Ensure that we reset all CUDA Devices in question
for (int nDevice = startDevice; nDevice < endDevice; nDevice++) {
cudaSetDevice(nDevice);
cudaDeviceReset();
}
return 0;
}
///////////////////////////////////////////////////////////////////////////////
// Run a bandwidth test
///////////////////////////////////////////////////////////////////////////////
void
testBandwidth(unsigned int start, unsigned int end, unsigned int increment,
testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
switch( mode )
{
case QUICK_MODE:
testBandwidthQuick( DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc );
break;
case RANGE_MODE:
testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc);
break;
case SHMOO_MODE:
testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc);
break;
default:
break;
}
}
//////////////////////////////////////////////////////////////////////
// Run a quick mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc);
}
///////////////////////////////////////////////////////////////////////
// Run a range mode bandwidth test
//////////////////////////////////////////////////////////////////////
void
testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment,
memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//count the number of copies we're going to run
unsigned int count = 1 + ((end - start) / increment);
unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) );
double *bandwidths = ( double * ) malloc( count * sizeof(double) );
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (unsigned int i = 0; i < count; i++)
bandwidths[i] = 0.0;
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
cudaSetDevice(currentDevice);
//run each of the copies
for(unsigned int i = 0; i < count; i++)
{
memSizes[i] = start + i * increment;
switch(kind)
{
case DEVICE_TO_HOST: bandwidths[i] += testDeviceToHostTransfer( memSizes[i], memMode, wc);
break;
case HOST_TO_DEVICE: bandwidths[i] += testHostToDeviceTransfer( memSizes[i], memMode, wc);
break;
case DEVICE_TO_DEVICE: bandwidths[i] += testDeviceToDeviceTransfer( memSizes[i] );
break;
}
}
} // Complete the bandwidth computation on all the devices
//print results
if(printmode == CSV)
{
printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
else
{
printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
//clean up
free(memSizes);
free(bandwidths);
}
//////////////////////////////////////////////////////////////////////////////
// Intense shmoo mode - covers a large range of values with varying increments
//////////////////////////////////////////////////////////////////////////////
void
testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc)
{
//count the number of copies to make
unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB)
+ ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB)
+ ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB)
+ ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB)
+ ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB)
+ ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB)
+ ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB);
unsigned int *memSizes = ( unsigned int * )malloc( count * sizeof( unsigned int ) );
double *bandwidths = ( double * ) malloc( count * sizeof(double) );
// Before calculating the cumulative bandwidth, initialize bandwidths array to NULL
for (unsigned int i = 0; i < count; i++)
bandwidths[i] = 0.0;
// Use the device asked by the user
for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++)
{
cudaSetDevice(currentDevice);
//Run the shmoo
int iteration = 0;
unsigned int memSize = 0;
while( memSize <= SHMOO_MEMSIZE_MAX )
{
if( memSize < SHMOO_LIMIT_20KB )
{
memSize += SHMOO_INCREMENT_1KB;
}
else if( memSize < SHMOO_LIMIT_50KB )
{
memSize += SHMOO_INCREMENT_2KB;
}else if( memSize < SHMOO_LIMIT_100KB )
{
memSize += SHMOO_INCREMENT_10KB;
}else if( memSize < SHMOO_LIMIT_1MB )
{
memSize += SHMOO_INCREMENT_100KB;
}else if( memSize < SHMOO_LIMIT_16MB )
{
memSize += SHMOO_INCREMENT_1MB;
}else if( memSize < SHMOO_LIMIT_32MB )
{
memSize += SHMOO_INCREMENT_2MB;
}else
{
memSize += SHMOO_INCREMENT_4MB;
}
memSizes[iteration] = memSize;
switch(kind)
{
case DEVICE_TO_HOST: bandwidths[iteration] += testDeviceToHostTransfer( memSizes[iteration], memMode, wc );
break;
case HOST_TO_DEVICE: bandwidths[iteration] += testHostToDeviceTransfer( memSizes[iteration], memMode, wc );
break;
case DEVICE_TO_DEVICE: bandwidths[iteration] += testDeviceToDeviceTransfer( memSizes[iteration] );
break;
}
iteration++;
shrLog(".");
}
} // Complete the bandwidth computation on all the devices
//print results
shrLog("\n");
if( CSV == printmode)
{
printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
else
{
printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc);
}
//clean up
free(memSizes);
free(bandwidths);
}
///////////////////////////////////////////////////////////////////////////////
// test the bandwidth of a device to host memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
unsigned char *h_idata = NULL;
unsigned char *h_odata = NULL;
cudaEvent_t start, stop;
sdkCreateTimer( &timer );
checkCudaErrors( cudaEventCreate( &start ) );
checkCudaErrors( cudaEventCreate( &stop ) );
//allocate host memory
if( PINNED == memMode )
{
//pinned memory mode - use special function to get OS-pinned memory
#if CUDART_VERSION >= 2020
checkCudaErrors( cudaHostAlloc( (void**)&h_idata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) );
checkCudaErrors( cudaHostAlloc( (void**)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) );
#else
checkCudaErrors( cudaMallocHost( (void**)&h_idata, memSize ) );
checkCudaErrors( cudaMallocHost( (void**)&h_odata, memSize ) );
#endif
}
else
{
//pageable memory mode - use malloc
h_idata = (unsigned char *)malloc( memSize );
h_odata = (unsigned char *)malloc( memSize );
if( h_idata == 0 || h_odata == 0 ) {
fprintf(stderr, "Not enough memory avaialable on host to run test!\n" );
exit(-1);
}
}
//initialize the memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char) (i & 0xff);
}
// allocate device memory
unsigned char* d_idata;
checkCudaErrors( cudaMalloc( (void**) &d_idata, memSize));
//initialize the device memory
checkCudaErrors( cudaMemcpy( d_idata, h_idata, memSize,
cudaMemcpyHostToDevice) );
//copy data from GPU to Host
sdkStartTimer( &timer );
checkCudaErrors( cudaEventRecord( start, 0 ) );
if( PINNED == memMode )
{
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
checkCudaErrors( cudaMemcpyAsync( h_odata, d_idata, memSize,
cudaMemcpyDeviceToHost, 0) );
}
}
else
{
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
checkCudaErrors( cudaMemcpy( h_odata, d_idata, memSize,
cudaMemcpyDeviceToHost) );
}
}
checkCudaErrors( cudaEventRecord( stop, 0 ) );
// make sure GPU has finished copying
checkCudaErrors( cudaDeviceSynchronize() );
//get the the total elapsed time in ms
sdkStopTimer( &timer );
checkCudaErrors( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if( PINNED != memMode || bDontUseGPUTiming )
{
elapsedTimeInMs = sdkGetTimerValue( &timer );
}
//calculate bandwidth in MB/s
bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
checkCudaErrors( cudaEventDestroy(stop) );
checkCudaErrors( cudaEventDestroy(start) );
sdkDeleteTimer( &timer );
if( PINNED == memMode )
{
checkCudaErrors( cudaFreeHost(h_idata) );
checkCudaErrors( cudaFreeHost(h_odata) );
}
else
{
free(h_idata);
free(h_odata);
}
checkCudaErrors(cudaFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a host to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc)
{
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
cudaEvent_t start, stop;
sdkCreateTimer( &timer );
checkCudaErrors( cudaEventCreate( &start ) );
checkCudaErrors( cudaEventCreate( &stop ) );
//allocate host memory
unsigned char *h_odata = NULL;
if( PINNED == memMode )
{
#if CUDART_VERSION >= 2020
//pinned memory mode - use special function to get OS-pinned memory
checkCudaErrors( cudaHostAlloc( (void**)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0 ) );
#else
//pinned memory mode - use special function to get OS-pinned memory
checkCudaErrors( cudaMallocHost( (void**)&h_odata, memSize ) );
#endif
}
else
{
//pageable memory mode - use malloc
h_odata = (unsigned char *)malloc( memSize );
if( h_odata == 0 ) {
fprintf(stderr, "Not enough memory avaialable on host to run test!\n" );
exit(-1);
}
}
unsigned char *h_cacheClear1 = (unsigned char *)malloc( CACHE_CLEAR_SIZE );
unsigned char *h_cacheClear2 = (unsigned char *)malloc( CACHE_CLEAR_SIZE );
if( h_cacheClear1 == 0 || h_cacheClear1 == 0 ) {
fprintf(stderr, "Not enough memory avaialable on host to run test!\n" );
exit(-1);
}
//initialize the memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_odata[i] = (unsigned char) (i & 0xff);
}
for(unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++)
{
h_cacheClear1[i] = (unsigned char) (i & 0xff);
h_cacheClear2[i] = (unsigned char) (0xff - (i & 0xff));
}
//allocate device memory
unsigned char* d_idata;
checkCudaErrors( cudaMalloc( (void**) &d_idata, memSize));
sdkStartTimer( &timer );
checkCudaErrors( cudaEventRecord( start, 0 ) );
//copy host memory to device memory
if( PINNED == memMode )
{
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors( cudaMemcpyAsync( d_idata, h_odata, memSize,
cudaMemcpyHostToDevice, 0) );
}
}
else {
for(unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++)
{
checkCudaErrors( cudaMemcpy( d_idata, h_odata, memSize,
cudaMemcpyHostToDevice) );
}
}
checkCudaErrors( cudaEventRecord( stop, 0 ) );
checkCudaErrors( cudaDeviceSynchronize() );
//total elapsed time in ms
sdkStopTimer( &timer );
checkCudaErrors( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if ( PINNED != memMode || bDontUseGPUTiming )
{
elapsedTimeInMs = sdkGetTimerValue( &timer );
}
sdkResetTimer( &timer );
//calculate bandwidth in MB/s
bandwidthInMBs = (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
checkCudaErrors( cudaEventDestroy(stop) );
checkCudaErrors( cudaEventDestroy(start) );
sdkDeleteTimer( &timer );
if( PINNED == memMode )
{
checkCudaErrors( cudaFreeHost(h_odata) );
}
else
{
free(h_odata);
}
free(h_cacheClear1);
free(h_cacheClear2);
checkCudaErrors(cudaFree(d_idata));
return bandwidthInMBs;
}
///////////////////////////////////////////////////////////////////////////////
//! test the bandwidth of a device to device memcopy of a specific size
///////////////////////////////////////////////////////////////////////////////
float
testDeviceToDeviceTransfer(unsigned int memSize)
{
StopWatchInterface *timer = NULL;
float elapsedTimeInMs = 0.0f;
float bandwidthInMBs = 0.0f;
cudaEvent_t start, stop;
sdkCreateTimer( &timer );
checkCudaErrors( cudaEventCreate( &start ) );
checkCudaErrors( cudaEventCreate( &stop ) );
//allocate host memory
unsigned char *h_idata = (unsigned char *)malloc( memSize );
if( h_idata == 0 ) {
fprintf(stderr, "Not enough memory avaialable on host to run test!\n" );
exit(-1);
}
//initialize the host memory
for(unsigned int i = 0; i < memSize/sizeof(unsigned char); i++)
{
h_idata[i] = (unsigned char) (i & 0xff);
}
//allocate device memory
unsigned char *d_idata;
checkCudaErrors( cudaMalloc( (void**) &d_idata, memSize));
unsigned char *d_odata;
checkCudaErrors( cudaMalloc( (void**) &d_odata, memSize));
//initialize memory
checkCudaErrors( cudaMemcpy( d_idata, h_idata, memSize,
cudaMemcpyHostToDevice) );
//run the memcopy
sdkStartTimer( &timer );
checkCudaErrors( cudaEventRecord( start, 0 ) );
for( unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++ )
{
checkCudaErrors( cudaMemcpy( d_odata, d_idata, memSize,
cudaMemcpyDeviceToDevice) );
}
checkCudaErrors( cudaEventRecord( stop, 0 ) );
//Since device to device memory copies are non-blocking,
//cudaDeviceSynchronize() is required in order to get
//proper timing.
checkCudaErrors( cudaDeviceSynchronize() );
//get the the total elapsed time in ms
sdkStopTimer( &timer );
checkCudaErrors( cudaEventElapsedTime( &elapsedTimeInMs, start, stop ) );
if ( bDontUseGPUTiming )
{
elapsedTimeInMs = sdkGetTimerValue( &timer );
}
//calculate bandwidth in MB/s
bandwidthInMBs = 2.0f * (1e3f * memSize * (float)MEMCOPY_ITERATIONS) /
(elapsedTimeInMs * (float)(1 << 20));
//clean up memory
sdkDeleteTimer( &timer );
free(h_idata);
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
return bandwidthInMBs;
}
/////////////////////////////////////////////////////////
//print results in an easily read format
////////////////////////////////////////////////////////
void printResultsReadable(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc)
{
// log config information
if (kind == DEVICE_TO_DEVICE)
{
shrLog(" Device to Device Bandwidth, %i Device(s)\n", iNumDevs);
}
else
{
if (kind == DEVICE_TO_HOST)
{
shrLog(" Device to Host Bandwidth, %i Device(s), ", iNumDevs);
}
else if (kind == HOST_TO_DEVICE)
{
shrLog(" Host to Device Bandwidth, %i Device(s), ", iNumDevs);
}
if(memMode == PAGEABLE)
{
shrLog("Paged memory\n");
}
else if (memMode == PINNED)
{
shrLog("Pinned memory");
if (wc) {
shrLog(", Write-Combined Memory Enabled");
}
shrLog("\n");
}
}
shrLog(" Transfer Size (Bytes)\tBandwidth(MB/s)\n");
unsigned int i;
for(i = 0; i < (count - 1); i++)
{
shrLog(" %u\t\t\t%s%.1f\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]);
}
shrLog(" %u\t\t\t%s%.1f\n\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]);
}
///////////////////////////////////////////////////////////////////////////
//print results in a database format
///////////////////////////////////////////////////////////////////////////
void printResultsCSV(unsigned int *memSizes, double* bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc)
{
std::string sConfig;
// log config information
if (kind == DEVICE_TO_DEVICE)
{
sConfig += "D2D";
}
else
{
if (kind == DEVICE_TO_HOST)
{
sConfig += "D2H";
}
else if (kind == HOST_TO_DEVICE)
{
sConfig += "H2D";
}
if(memMode == PAGEABLE)
{
sConfig += "-Paged";
}
else if (memMode == PINNED)
{
sConfig += "-Pinned";
if (wc)
{
sConfig += "-WriteCombined";
}
}
}
unsigned int i;
double dSeconds = 0.0;
for(i = 0; i < count; i++)
{
dSeconds = (double)memSizes[i] / (bandwidths[i] * (double)(1<<20));
shrLogEx(LOGBOTH | MASTER, 0, "bandwidthTest-%s, Bandwidth = %.1f MB/s, Time = %.5f s, Size = %u bytes, NumDevsUsed = %d\n",
sConfig.c_str(), bandwidths[i], dSeconds, memSizes[i], iNumDevs);
}
}
///////////////////////////////////////////////////////////////////////////
//Print help screen
///////////////////////////////////////////////////////////////////////////
void printHelp(void)
{
shrLog("Usage: bandwidthTest [OPTION]...\n");
shrLog("Test the bandwidth for device to host, host to device, and device to device transfers\n");
shrLog("\n");
shrLog("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n");
shrLog("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n");
shrLog("\n");
shrLog("Options:\n");
shrLog("--help\tDisplay this help menu\n");
shrLog("--csv\tPrint results as a CSV\n");
shrLog("--device=[deviceno]\tSpecify the device device to be used\n");
shrLog(" all - compute cumulative bandwidth on all the devices\n");
shrLog(" 0,1,2,...,n - Specify any particular device to be used\n");
shrLog("--memory=[MEMMODE]\tSpecify which memory mode to use\n");
shrLog(" pageable - pageable memory\n");
shrLog(" pinned - non-pageable system memory\n");
shrLog("--mode=[MODE]\tSpecify the mode to use\n");
shrLog(" quick - performs a quick measurement\n");
shrLog(" range - measures a user-specified range of values\n");
shrLog(" shmoo - performs an intense shmoo of a large range of values\n");
shrLog("--htod\tMeasure host to device transfers\n");
shrLog("--dtoh\tMeasure device to host transfers\n");
shrLog("--dtod\tMeasure device to device transfers\n");
#if CUDART_VERSION >= 2020
shrLog("--wc\tAllocate pinned memory as write-combined\n");
#endif
shrLog("--cputiming\tForce CPU-based timing always\n");
shrLog("Range mode options\n");
shrLog("--start=[SIZE]\tStarting transfer size in bytes\n");
shrLog("--end=[SIZE]\tEnding transfer size in bytes\n");
shrLog("--increment=[SIZE]\tIncrement size in bytes\n");
}
|
fcc4d36dff38fdd5a53fcfdfc3d49697735216cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#define SIZE int(pow(2,10))
#define RANGE int(pow(2,8)) /* Numbers are generated from 0 to RANGE-1*/
#define BLOCKSIZE 1024
#define NUMBLOCKS SIZE/BLOCKSIZE
#define nBITS int(log(RANGE)/log(2)) /* log(n)+1 bits to represent n */
#define digit(n,exp) (n/exp)%2
__global__ void RadixSort(int*,int,int*);
void CheckSolution(int*);
int main()
{
int in=0,out=1;
int** array=new int*[2];
for(int i=0;i<2;i++)
array[i]=new int[SIZE];
for(int i=0;i<SIZE;i++)
array[in][i]=rand()%RANGE;
int* array_d;
hipMalloc((void**)&array_d,SIZE*sizeof(int));
int host_histo[2]={};
int* device_histo;
hipMalloc((void**)&device_histo,2*sizeof(int));
int exp,rank;
for(int i=0;i<nBITS;i++)
{
hipMemcpy(array_d,array[in],SIZE*sizeof(int),hipMemcpyHostToDevice);
exp=pow(2,i);
hipLaunchKernelGGL(( RadixSort), dim3(NUMBLOCKS),dim3(BLOCKSIZE), 0, 0, array_d,exp,device_histo);
hipMemcpy(host_histo,device_histo,2*sizeof(int),hipMemcpyDeviceToHost);
/* The scan part has been moved outside because it should occur at a globally synchronised point */
host_histo[1]+=host_histo[0];
for(int j=SIZE-1;j>=0;j--)
{
rank=host_histo[digit(array[in][j],exp)]-1;
array[out][rank]=array[in][j];
host_histo[digit(array[in][j],exp)]--;
}
in=1-in;
out=1-out;
}
CheckSolution(array[in]);
}
__global__ void RadixSort(int* array_d, int exp, int* device_histo)
{
/* Histogram Calculation*/
int tx=threadIdx.x,bx=blockIdx.x;
int inx=bx*blockDim.x+tx;
if(inx==0)
{
device_histo[0]=0;
device_histo[1]=0;
}
__shared__ int shared_histo[2];
if(tx==0)
{
shared_histo[0]=0;
shared_histo[1]=0;
}
__syncthreads();
atomicAdd(&shared_histo[digit(array_d[inx],exp)],1);
__syncthreads();
if(tx==0)
{
atomicAdd(&device_histo[0],shared_histo[0]);
atomicAdd(&device_histo[1],shared_histo[1]);
}
}
void CheckSolution(int* array)
{
int i;
for(i=0;i<SIZE-1;i++)
if(array[i]>array[i+1])
{
printf("Solution is wrong!\n");
break;
}
if(i==SIZE-1)
printf("Solution is right!\n");
}
| fcc4d36dff38fdd5a53fcfdfc3d49697735216cb.cu | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#define SIZE int(pow(2,10))
#define RANGE int(pow(2,8)) /* Numbers are generated from 0 to RANGE-1*/
#define BLOCKSIZE 1024
#define NUMBLOCKS SIZE/BLOCKSIZE
#define nBITS int(log(RANGE)/log(2)) /* log(n)+1 bits to represent n */
#define digit(n,exp) (n/exp)%2
__global__ void RadixSort(int*,int,int*);
void CheckSolution(int*);
int main()
{
int in=0,out=1;
int** array=new int*[2];
for(int i=0;i<2;i++)
array[i]=new int[SIZE];
for(int i=0;i<SIZE;i++)
array[in][i]=rand()%RANGE;
int* array_d;
cudaMalloc((void**)&array_d,SIZE*sizeof(int));
int host_histo[2]={};
int* device_histo;
cudaMalloc((void**)&device_histo,2*sizeof(int));
int exp,rank;
for(int i=0;i<nBITS;i++)
{
cudaMemcpy(array_d,array[in],SIZE*sizeof(int),cudaMemcpyHostToDevice);
exp=pow(2,i);
RadixSort<<<NUMBLOCKS,BLOCKSIZE>>>(array_d,exp,device_histo);
cudaMemcpy(host_histo,device_histo,2*sizeof(int),cudaMemcpyDeviceToHost);
/* The scan part has been moved outside because it should occur at a globally synchronised point */
host_histo[1]+=host_histo[0];
for(int j=SIZE-1;j>=0;j--)
{
rank=host_histo[digit(array[in][j],exp)]-1;
array[out][rank]=array[in][j];
host_histo[digit(array[in][j],exp)]--;
}
in=1-in;
out=1-out;
}
CheckSolution(array[in]);
}
__global__ void RadixSort(int* array_d, int exp, int* device_histo)
{
/* Histogram Calculation*/
int tx=threadIdx.x,bx=blockIdx.x;
int inx=bx*blockDim.x+tx;
if(inx==0)
{
device_histo[0]=0;
device_histo[1]=0;
}
__shared__ int shared_histo[2];
if(tx==0)
{
shared_histo[0]=0;
shared_histo[1]=0;
}
__syncthreads();
atomicAdd(&shared_histo[digit(array_d[inx],exp)],1);
__syncthreads();
if(tx==0)
{
atomicAdd(&device_histo[0],shared_histo[0]);
atomicAdd(&device_histo[1],shared_histo[1]);
}
}
void CheckSolution(int* array)
{
int i;
for(i=0;i<SIZE-1;i++)
if(array[i]>array[i+1])
{
printf("Solution is wrong!\n");
break;
}
if(i==SIZE-1)
printf("Solution is right!\n");
}
|
74ab9d7a4b73d2beabe1e383d0b50ce8de6add0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/softmax_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
if (has_weights_) {
Forward_cpu(bottom, top);
return;
}
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
if (count > 0) {
loss /= count;
} else {
loss = 0;
}
} else {
loss /= outer_num_;
}
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (has_weights_) {
Backward_cpu(top, propagate_down, bottom);
return;
}
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
const Dtype loss_weight = top[0]->cpu_diff()[0];
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
if (count > 0) {
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
}
} else {
caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
| 74ab9d7a4b73d2beabe1e383d0b50ce8de6add0e.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/softmax_loss_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
if (has_weights_) {
Forward_cpu(bottom, top);
return;
}
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
if (count > 0) {
loss /= count;
} else {
loss = 0;
}
} else {
loss /= outer_num_;
}
top[0]->mutable_cpu_data()[0] = loss;
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (has_weights_) {
Backward_cpu(top, propagate_down, bottom);
return;
}
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
const Dtype loss_weight = top[0]->cpu_diff()[0];
if (normalize_) {
Dtype count;
caffe_gpu_asum(nthreads, counts, &count);
if (count > 0) {
caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff);
}
} else {
caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer);
} // namespace caffe
|
bf03562b29992df0c2c9689f7f725da1f1b1d736.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include "flamegpu/gpu/CUDAScanCompaction.h"
#include "flamegpu/gpu/detail/CUDAErrorChecking.cuh"
#include "flamegpu/gpu/CUDASimulation.h"
namespace flamegpu {
/**
* CUDAScanCompaction methods
*/
void CUDAScanCompaction::purge() {
memset(configs, 0, sizeof(configs));
}
void CUDAScanCompaction::resize(const unsigned int& newCount, const Type& type, const unsigned int& streamId) {
assert(streamId < MAX_STREAMS);
assert(type < MAX_TYPES);
configs[type][streamId].resize_scan_flag(newCount);
}
void CUDAScanCompaction::zero(const Type& type, const unsigned int& streamId) {
assert(streamId < MAX_STREAMS);
assert(type < MAX_TYPES);
configs[type][streamId].zero_scan_flag();
}
const CUDAScanCompactionConfig &CUDAScanCompaction::getConfig(const Type& type, const unsigned int& streamId) {
return configs[type][streamId];
}
CUDAScanCompactionConfig &CUDAScanCompaction::Config(const Type& type, const unsigned int& streamId) {
return configs[type][streamId];
}
/**
*
*/
CUDAScanCompactionConfig::~CUDAScanCompactionConfig() {
free_scan_flag();
}
void CUDAScanCompactionConfig::free_scan_flag() {
if (d_ptrs.scan_flag) {
gpuErrchk(hipFree(d_ptrs.scan_flag));
d_ptrs.scan_flag = nullptr;
}
if (d_ptrs.position) {
gpuErrchk(hipFree(d_ptrs.position));
d_ptrs.position = nullptr;
}
}
void CUDAScanCompactionConfig::zero_scan_flag() {
if (d_ptrs.position) {
gpuErrchk(hipMemset(d_ptrs.position, 0, scan_flag_len * sizeof(unsigned int))); // @todo - make this async + streamSync for less ensbemble blocking.
}
if (d_ptrs.scan_flag) {
gpuErrchk(hipMemset(d_ptrs.scan_flag, 0, scan_flag_len * sizeof(unsigned int))); // @todo - make this async + streamSync for less ensbemble blocking.
}
}
void CUDAScanCompactionConfig::resize_scan_flag(const unsigned int& count) {
if (count + 1 > scan_flag_len) {
free_scan_flag();
gpuErrchk(hipMalloc(&d_ptrs.scan_flag, (count + 1) * sizeof(unsigned int))); // +1 so we can get the total from the scan
gpuErrchk(hipMalloc(&d_ptrs.position, (count + 1) * sizeof(unsigned int))); // +1 so we can get the total from the scan
scan_flag_len = count + 1;
}
}
} // namespace flamegpu
| bf03562b29992df0c2c9689f7f725da1f1b1d736.cu | #include <cassert>
#include "flamegpu/gpu/CUDAScanCompaction.h"
#include "flamegpu/gpu/detail/CUDAErrorChecking.cuh"
#include "flamegpu/gpu/CUDASimulation.h"
namespace flamegpu {
/**
* CUDAScanCompaction methods
*/
void CUDAScanCompaction::purge() {
memset(configs, 0, sizeof(configs));
}
void CUDAScanCompaction::resize(const unsigned int& newCount, const Type& type, const unsigned int& streamId) {
assert(streamId < MAX_STREAMS);
assert(type < MAX_TYPES);
configs[type][streamId].resize_scan_flag(newCount);
}
void CUDAScanCompaction::zero(const Type& type, const unsigned int& streamId) {
assert(streamId < MAX_STREAMS);
assert(type < MAX_TYPES);
configs[type][streamId].zero_scan_flag();
}
const CUDAScanCompactionConfig &CUDAScanCompaction::getConfig(const Type& type, const unsigned int& streamId) {
return configs[type][streamId];
}
CUDAScanCompactionConfig &CUDAScanCompaction::Config(const Type& type, const unsigned int& streamId) {
return configs[type][streamId];
}
/**
*
*/
CUDAScanCompactionConfig::~CUDAScanCompactionConfig() {
free_scan_flag();
}
void CUDAScanCompactionConfig::free_scan_flag() {
if (d_ptrs.scan_flag) {
gpuErrchk(cudaFree(d_ptrs.scan_flag));
d_ptrs.scan_flag = nullptr;
}
if (d_ptrs.position) {
gpuErrchk(cudaFree(d_ptrs.position));
d_ptrs.position = nullptr;
}
}
void CUDAScanCompactionConfig::zero_scan_flag() {
if (d_ptrs.position) {
gpuErrchk(cudaMemset(d_ptrs.position, 0, scan_flag_len * sizeof(unsigned int))); // @todo - make this async + streamSync for less ensbemble blocking.
}
if (d_ptrs.scan_flag) {
gpuErrchk(cudaMemset(d_ptrs.scan_flag, 0, scan_flag_len * sizeof(unsigned int))); // @todo - make this async + streamSync for less ensbemble blocking.
}
}
void CUDAScanCompactionConfig::resize_scan_flag(const unsigned int& count) {
if (count + 1 > scan_flag_len) {
free_scan_flag();
gpuErrchk(cudaMalloc(&d_ptrs.scan_flag, (count + 1) * sizeof(unsigned int))); // +1 so we can get the total from the scan
gpuErrchk(cudaMalloc(&d_ptrs.position, (count + 1) * sizeof(unsigned int))); // +1 so we can get the total from the scan
scan_flag_len = count + 1;
}
}
} // namespace flamegpu
|
0b98b0e3e3d87d109b8b048c55988c349e8d6466.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel3.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int dimx = 1;
int dimy = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel3), dim3(gridBlock),dim3(threadBlock), 0, 0, a,dimx,dimy);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel3), dim3(gridBlock),dim3(threadBlock), 0, 0, a,dimx,dimy);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel3), dim3(gridBlock),dim3(threadBlock), 0, 0, a,dimx,dimy);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0b98b0e3e3d87d109b8b048c55988c349e8d6466.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel3.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int dimx = 1;
int dimy = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel3<<<gridBlock,threadBlock>>>(a,dimx,dimy);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel3<<<gridBlock,threadBlock>>>(a,dimx,dimy);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel3<<<gridBlock,threadBlock>>>(a,dimx,dimy);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
fa4ea6d1e773d2374b2549615d4b05cb6cdced0a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
/// threads per block
static const int blockSize{ 256 };
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// initialize the arrays on the GPU
// returns the addresses of the pointers on the GPU
// dev_idata is a pointer to the address of the dev_idata array that
// gets updated here
// initialize the dev_odata to 0.0; dev_idata has the first
// elements copied and the remainder to make the stream 2^n
// are set to 0. The first input is the size of the arrays
// to allocate and the second input is the size of the array to transfer.
// N the maximum size of the allocated array. n is the size of the data array
// N is one more than the multiple of 2 greater or equal to n,
// 0 is placed at the first element
// in dev_idata, and then the elements are copied inte dev_idata.
// dev_odata has its first element initialized to 0 too.
void initScan(int N, int n, const int *idata, int ** dev_odata, int ** dev_idata)
{
int size {sizeof(int)};
hipMalloc(reinterpret_cast<void**> (dev_idata), N * size);
hipMalloc(reinterpret_cast<void**> (dev_odata), N * size);
checkCUDAError("Allocating Scan Buffer Error");
hipMemset(*dev_idata, 0, N * size);
hipMemset(*dev_odata, 0, N * size);
hipMemcpy(*dev_idata + 1, idata, n *size, hipMemcpyHostToDevice);
// no need to initialize the odata because the loop does that each time
checkCUDAError("Initialize and Copy data to target Error");
hipDeviceSynchronize();
}
// transfer scan data back to host
void transferScan(int N, int * odata, int * dev_odata)
{
hipMemcpy(odata, dev_odata, N * sizeof(int), hipMemcpyDeviceToHost);
}
// end the scan on the device.
void endScan(int * dev_odata, int * dev_idata)
{
hipFree(dev_idata);
hipFree(dev_odata);
}
// TODO:
__global__ void kernOneNaiveScan(int N, int pow2d_1, int * dev_odata, int * dev_idata)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
if (k >= N || (k < pow2d_1 >> 1)) {
return;
}
dev_odata[k] = dev_idata[k];
if ( k >= pow2d_1) {
dev_odata[k] += dev_idata[k - pow2d_1];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata, int indx)
{
int * dev_idata;
int * dev_odata;
// d is the number of scans needed and also the
// upper bound for log2 of the number of elements
int d {ilog2ceil(n)}; //
// add one so that the 0th element is 0 for the
// eclusive Scan
int N { 1 << d };
initScan(N + 1 , n, idata, &dev_odata, &dev_idata);
timer().startGpuTimer();
dim3 fullBlocksPerGrid((N + blockSize - 1)/ blockSize);
// int final { 1 << d - 1};
for (int pow2d_1 {1}; pow2d_1 < N; pow2d_1 *= 2) {
// copy all elements to dev_odata to save
hipLaunchKernelGGL(( kernOneNaiveScan), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0,
N, pow2d_1, dev_odata + 1, dev_idata + 1);
std::swap(dev_odata, dev_idata);
if (pow2d_1 == indx) break;
}
timer().endGpuTimer();
// only transfer tho first n elements of the
// exclusive scan
transferScan(n, odata, dev_idata);
endScan(dev_odata, dev_idata);
}
}
}
| fa4ea6d1e773d2374b2549615d4b05cb6cdced0a.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
/// threads per block
static const int blockSize{ 256 };
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// initialize the arrays on the GPU
// returns the addresses of the pointers on the GPU
// dev_idata is a pointer to the address of the dev_idata array that
// gets updated here
// initialize the dev_odata to 0.0; dev_idata has the first
// elements copied and the remainder to make the stream 2^n
// are set to 0. The first input is the size of the arrays
// to allocate and the second input is the size of the array to transfer.
// N the maximum size of the allocated array. n is the size of the data array
// N is one more than the multiple of 2 greater or equal to n,
// 0 is placed at the first element
// in dev_idata, and then the elements are copied inte dev_idata.
// dev_odata has its first element initialized to 0 too.
void initScan(int N, int n, const int *idata, int ** dev_odata, int ** dev_idata)
{
int size {sizeof(int)};
cudaMalloc(reinterpret_cast<void**> (dev_idata), N * size);
cudaMalloc(reinterpret_cast<void**> (dev_odata), N * size);
checkCUDAError("Allocating Scan Buffer Error");
cudaMemset(*dev_idata, 0, N * size);
cudaMemset(*dev_odata, 0, N * size);
cudaMemcpy(*dev_idata + 1, idata, n *size, cudaMemcpyHostToDevice);
// no need to initialize the odata because the loop does that each time
checkCUDAError("Initialize and Copy data to target Error");
cudaThreadSynchronize();
}
// transfer scan data back to host
void transferScan(int N, int * odata, int * dev_odata)
{
cudaMemcpy(odata, dev_odata, N * sizeof(int), cudaMemcpyDeviceToHost);
}
// end the scan on the device.
void endScan(int * dev_odata, int * dev_idata)
{
cudaFree(dev_idata);
cudaFree(dev_odata);
}
// TODO:
__global__ void kernOneNaiveScan(int N, int pow2d_1, int * dev_odata, int * dev_idata)
{
int k = threadIdx.x + blockIdx.x * blockDim.x;
if (k >= N || (k < pow2d_1 >> 1)) {
return;
}
dev_odata[k] = dev_idata[k];
if ( k >= pow2d_1) {
dev_odata[k] += dev_idata[k - pow2d_1];
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata, int indx)
{
int * dev_idata;
int * dev_odata;
// d is the number of scans needed and also the
// upper bound for log2 of the number of elements
int d {ilog2ceil(n)}; //
// add one so that the 0th element is 0 for the
// eclusive Scan
int N { 1 << d };
initScan(N + 1 , n, idata, &dev_odata, &dev_idata);
timer().startGpuTimer();
dim3 fullBlocksPerGrid((N + blockSize - 1)/ blockSize);
// int final { 1 << d - 1};
for (int pow2d_1 {1}; pow2d_1 < N; pow2d_1 *= 2) {
// copy all elements to dev_odata to save
kernOneNaiveScan<<<fullBlocksPerGrid, blockSize>>>
(N, pow2d_1, dev_odata + 1, dev_idata + 1);
std::swap(dev_odata, dev_idata);
if (pow2d_1 == indx) break;
}
timer().endGpuTimer();
// only transfer tho first n elements of the
// exclusive scan
transferScan(n, odata, dev_idata);
endScan(dev_odata, dev_idata);
}
}
}
|
32ec802e59c5c7cc84b912cad48505077c8b515a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "deInterleave_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_X_out = NULL;
hipMalloc(&d_X_out, XSIZE*YSIZE);
float *d_Y_out = NULL;
hipMalloc(&d_Y_out, XSIZE*YSIZE);
float2 *d_XY_in = NULL;
hipMalloc(&d_XY_in, XSIZE*YSIZE);
int pitch_out = 2;
int pitch_in = 2;
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
deInterleave_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_X_out,d_Y_out,d_XY_in,pitch_out,pitch_in,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
deInterleave_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_X_out,d_Y_out,d_XY_in,pitch_out,pitch_in,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
deInterleave_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_X_out,d_Y_out,d_XY_in,pitch_out,pitch_in,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 32ec802e59c5c7cc84b912cad48505077c8b515a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "deInterleave_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_X_out = NULL;
cudaMalloc(&d_X_out, XSIZE*YSIZE);
float *d_Y_out = NULL;
cudaMalloc(&d_Y_out, XSIZE*YSIZE);
float2 *d_XY_in = NULL;
cudaMalloc(&d_XY_in, XSIZE*YSIZE);
int pitch_out = 2;
int pitch_in = 2;
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
deInterleave_kernel<<<gridBlock,threadBlock>>>(d_X_out,d_Y_out,d_XY_in,pitch_out,pitch_in,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
deInterleave_kernel<<<gridBlock,threadBlock>>>(d_X_out,d_Y_out,d_XY_in,pitch_out,pitch_in,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
deInterleave_kernel<<<gridBlock,threadBlock>>>(d_X_out,d_Y_out,d_XY_in,pitch_out,pitch_in,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c5f6350ce236373f5ef7db2f996e7a83e6a5098f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <kernels.cuh>
#define XSIZE 7
#define YSIZE 128
#define ZSIZE 48
// __restrict__ tells the compiler there is no memory overlap
__device__ float fftfactor = 1.0/32.0 * 1.0/32.0;
__global__ void rearrange(hipTextureObject_t texObj, hipfftComplex * __restrict__ out)
{
// this is currently the ugliest solution I can think of
// xidx is the channel number
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * 128;
int2 word;
//if ((xidx == 0) && (yidx == 0)) printf("In the rearrange kernel\n");
for (int sample = 0; sample < YSIZE; sample++) {
word = tex2D<int2>(texObj, xidx, yidx + sample);
printf("%i ", sample);
out[xidx * 128 + 7 * yidx + sample].x = static_cast<float>(static_cast<short>(((word.y & 0xff000000) >> 24) | ((word.y & 0xff0000) >> 8)));
out[xidx * 128 + 7 * yidx + sample].y = static_cast<float>(static_cast<short>(((word.y & 0xff00) >> 8) | ((word.y & 0xff) << 8)));
out[336 * 128 + xidx * 128 + 7 * yidx + sample].x = static_cast<float>(static_cast<short>(((word.x & 0xff000000) >> 24) | ((word.x & 0xff0000) >> 8)));
out[336 * 128 + xidx * 128 + 7 * yidx + sample].y = static_cast<float>(static_cast<short>(((word.x & 0xff00) >> 8) | ((word.x & 0xff) << 8)));
}
}
__global__ void rearrange2(hipTextureObject_t texObj, hipfftComplex * __restrict__ out, unsigned int acc)
{
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * 128;
int chanidx = threadIdx.x + blockIdx.y * 7;
int skip;
int2 word;
for (int ac = 0; ac < acc; ac++) {
skip = 336 * 128 * 2 * ac;
for (int sample = 0; sample < YSIZE; sample++) {
word = tex2D<int2>(texObj, xidx, yidx + ac * 48 * 128 + sample);
out[skip + chanidx * YSIZE * 2 + sample].x = static_cast<float>(static_cast<short>(((word.y & 0xff000000) >> 24) | ((word.y & 0xff0000) >> 8)));
out[skip + chanidx * YSIZE * 2 + sample].y = static_cast<float>(static_cast<short>(((word.y & 0xff00) >> 8) | ((word.y & 0xff) << 8)));
out[skip + chanidx * YSIZE * 2 + YSIZE + sample].x = static_cast<float>(static_cast<short>(((word.x & 0xff000000) >> 24) | ((word.x & 0xff0000) >> 8)));
out[skip + chanidx * YSIZE * 2 + YSIZE + sample].y = static_cast<float>(static_cast<short>(((word.x & 0xff00) >> 8) | ((word.x & 0xff) << 8)));
}
}
}
__global__ void addtime(float *in, float *out, unsigned int jumpin, unsigned int jumpout, unsigned int factort)
{
// index will tell which 1MHz channel we are taking care or
// use 1 thread per 1MHz channel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//if (idx == 0) printf("In the time kernel\n");
for(int ch = 0; ch < 27; ch++) {
// have to restart to 0, otherwise will add to values from previous execution
out[idx * 27 + ch] = (float)0.0;
out[idx * 27 + ch + jumpout] = (float)0.0;
out[idx * 27 + ch + 2 * jumpout] = (float)0.0;
out[idx * 27 + ch + 3 * jumpout] = (float)0.0;
for (int t = 0; t < factort; t++) {
out[idx * 27 + ch] += in[idx * 128 + ch + t * 32];
//printf("S1 time sum %f\n", out[idx * 27 + ch]);
out[idx * 27 + ch + jumpout] += in[idx * 128 + ch + t * 32 + jumpin];
out[idx * 27 + ch + 2 * jumpout] += in[idx * 128 + ch + t * 32 + 2 * jumpin];
out[idx * 27 + ch + 3 * jumpout] += in[idx * 128 + ch + t * 32 + 3 * jumpin];
}
}
}
/*__global__ void addtime(float* __restrict__ int, float* __restrict__ out, unsigned int jumpin, unsigned int jumpout, unsigned int factort)
{
} */
__global__ void addchannel(float* __restrict__ in, float* __restrict__ out, unsigned int jumpin, unsigned int jumpout, unsigned int factorc) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//if (idx == 0) printf("In the channel kernel\n");
out[idx] = (float)0.0;
out[idx + jumpout] = (float)0.0;
out[idx + 2 * jumpout] = (float)0.0;
out[idx + 3 * jumpout] = (float)0.0;
for (int ch = 0; ch < factorc; ch++) {
out[idx] += in[idx * factorc + ch];
out[idx + jumpout] += in[idx * factorc + ch + jumpin];
out[idx + 2 * jumpout] += in[idx * factorc + ch + 2 * jumpin];
out[idx + 3 * jumpout] += in[idx * factorc + ch + 3 * jumpin];
}
//printf("S1 freq sum %f\n", out[idx]);
}
__global__ void addchannel2(float* __restrict__ in, float** __restrict__ out, short nchans, size_t gulp, size_t totsize, short gulpno, unsigned int jumpin, unsigned int factorc, unsigned int framet, unsigned int acc) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int extra = totsize - gulpno * gulp;
// thats the starting save position for the chunk of length acc time samples
int saveidx;
int inskip;
for (int ac = 0; ac < acc; ac++) {
saveidx = (framet % (gulpno * gulp)) * nchans + idx;
inskip = ac * 27 * 336;
out[0][saveidx] = (float)0.0;
out[1][saveidx] = (float)0.0;
out[2][saveidx] = (float)0.0;
out[3][saveidx] = (float)0.0;
if ((framet % (gulpno * gulp)) >= extra) {
for (int ch = 0; ch < factorc; ch++) {
out[0][saveidx] += in[inskip + idx * factorc + ch];
out[1][saveidx] += in[inskip + idx * factorc + ch + jumpin];
out[2][saveidx] += in[inskip + idx * factorc + ch + 2 * jumpin];
out[3][saveidx] += in[inskip + idx * factorc + ch + 3 * jumpin];
}
} else {
for (int ch = 0; ch < factorc; ch++) {
out[0][saveidx] += in[inskip + idx * factorc + ch];
out[1][saveidx] += in[inskip + idx * factorc + ch + jumpin];
out[2][saveidx] += in[inskip + idx * factorc + ch + 2 * jumpin];
out[3][saveidx] += in[inskip + idx * factorc + ch + 3 * jumpin];
}
// save in two places -save in the extra bit
out[0][saveidx + (gulpno * gulp * nchans)] = out[0][saveidx];
out[1][saveidx + (gulpno * gulp * nchans)] = out[1][saveidx];
out[2][saveidx + (gulpno * gulp * nchans)] = out[2][saveidx];
out[3][saveidx + (gulpno * gulp * nchans)] = out[3][saveidx];
}
framet++;
}
// not a problem - earch thread in a warp uses the same branch
/* if ((framet % totsize) < gulpno * gulp) {
for (int ac = 0; ac < acc; ac++) {
inskip = ac * 27 * 336;
outskip = ac * 27 * 336 / factorc;
for (int ch = 0; ch < factorc; ch++) {
out[0][outskip + saveidx] += in[inskip + idx * factorc + ch];
out[1][outskip + saveidx] += in[inskip + idx * factorc + ch + jumpin];
out[2][outskip + saveidx] += in[inskip + idx * factorc + ch + 2 * jumpin];
out[3][outskip + saveidx] += in[inskip + idx * factorc + ch + 3 * jumpin];
}
}
} else {
for (int ac = 0; ac < acc; ac++) {
for (int ch = 0; ch < factorc; ch++) {
out[0][outskip + saveidx] += in[idx * factorc + ch];
out[1][outskip + saveidx] += in[idx * factorc + ch + jumpin];
out[2][outskip + saveidx] += in[idx * factorc + ch + 2 * jumpin];
out[3][outskip + saveidx] += in[idx * factorc + ch + 3 * jumpin];
}
// save in two places - wrap wround to the start of the buffer
out[0][outskip + saveidx - (gulpno * gulp * nchans)] = out[0][outskip + saveidx];
out[1][outskip + saveidx - (gulpno * gulp * nchans)] = out[1][outskip + saveidx];
out[2][outskip + saveidx - (gulpno * gulp * nchans)] = out[2][outskip + saveidx];
out[3][outskop + saveidx - (gulpno * gulp * nchans)] = out[3][outskip + saveidx];
}
}
*/
}
__global__ void addchanscale(float* __restrict__ in, float** __restrict__ out, short nchans, size_t gulp, size_t totsize, short gulpno, unsigned int jumpin, unsigned int factorc, unsigned int framet, unsigned int acc, float **means, float **rstdevs) {
// the number of threads is equal to the number of output channels
// each 'idx' is responsible for one output frequency channel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int extra = totsize - gulpno * gulp;
float avgfactor = 1.0f / factorc;
// thats the starting save position for the chunk of length acc time samples
int saveidx;
float tmp0, tmp1, tmp2, tmp3;
int inskip;
for (int ac = 0; ac < acc; ac++) {
// channels in increasing order
// saveidx = (framet % (gulpno * gulp)) * nchans + idx;
// channels in decreasing order
saveidx = (framet % (gulpno * gulp)) * nchans + nchans - (idx + 1);
inskip = ac * 27 * 336;
out[0][saveidx] = (float)0.0;
out[1][saveidx] = (float)0.0;
out[2][saveidx] = (float)0.0;
out[3][saveidx] = (float)0.0;
// use scaling of the form
// out = (in - mean) / stdev * 32 + 64;
// rstdev = (1 / stdev) * 32 to reduce the number of operations
if ((framet % (gulpno * gulp)) >= extra) {
for (int ch = 0; ch < factorc; ch++) {
out[0][saveidx] += in[inskip + idx * factorc + ch];
out[1][saveidx] += in[inskip + idx * factorc + ch + jumpin];
out[2][saveidx] += in[inskip + idx * factorc + ch + 2 * jumpin];
out[3][saveidx] += in[inskip + idx * factorc + ch + 3 * jumpin];
}
// scaling
out[0][saveidx] = (out[0][saveidx] * avgfactor - means[0][idx]) * rstdevs[0][idx] + 64.0f;
out[1][saveidx] = (out[1][saveidx] * avgfactor - means[1][idx]) * rstdevs[1][idx] + 64.0f;
out[2][saveidx] = (out[2][saveidx] * avgfactor - means[2][idx]) * rstdevs[2][idx] + 64.0f;
out[3][saveidx] = (out[3][saveidx] * avgfactor - means[3][idx]) * rstdevs[3][idx] + 64.0f;
} else {
for (int ch = 0; ch < factorc; ch++) {
out[0][saveidx] += in[inskip + idx * factorc + ch];
out[1][saveidx] += in[inskip + idx * factorc + ch + jumpin];
out[2][saveidx] += in[inskip + idx * factorc + ch + 2 * jumpin];
out[3][saveidx] += in[inskip + idx * factorc + ch + 3 * jumpin];
}
// scaling
out[0][saveidx] = (out[0][saveidx] * avgfactor - means[0][idx]) * rstdevs[0][idx] + 64.0f;
out[1][saveidx] = (out[1][saveidx] * avgfactor - means[1][idx]) * rstdevs[1][idx] + 64.0f;
out[2][saveidx] = (out[2][saveidx] * avgfactor - means[2][idx]) * rstdevs[2][idx] + 64.0f;
out[3][saveidx] = (out[3][saveidx] * avgfactor - means[3][idx]) * rstdevs[3][idx] + 64.0f;
tmp0 = rintf(fminf(fmaxf(0.0, out[0][saveidx]), 255.0));
out[0][saveidx] = tmp0;
//out[0][saveidx] = fminf(255, out[0][saveidx]);
out[1][saveidx] = fmaxf(0.0, out[0][saveidx]);
out[1][saveidx] = fminf(255, out[0][saveidx]);
out[2][saveidx] = fmaxf(0.0, out[0][saveidx]);
out[2][saveidx] = fminf(255, out[0][saveidx]);
out[3][saveidx] = fmaxf(0.0, out[0][saveidx]);
out[3][saveidx] = fminf(255, out[0][saveidx]);
// save in two places -save in the extra bit
out[0][saveidx + (gulpno * gulp * nchans)] = out[0][saveidx];
out[1][saveidx + (gulpno * gulp * nchans)] = out[1][saveidx];
out[2][saveidx + (gulpno * gulp * nchans)] = out[2][saveidx];
out[3][saveidx + (gulpno * gulp * nchans)] = out[3][saveidx];
}
framet++;
}
}
__global__ void powerscale(hipfftComplex *in, float *out, unsigned int jump)
{
int idx1 = blockIdx.x * blockDim.x + threadIdx.x;
//if (idx1 == 0) printf("In the power kernel\n");
// offset introduced, jump to the B polarisation data - can cause some slowing down
int idx2 = idx1 + jump;
// these calculations assume polarisation is recorded in x,y base
// i think the if statement is unnecessary as the number of threads for this
// kernel 0s fftpoint * timeavg * nchans, which is exactly the size of the output array
if (idx1 < jump) { // half of the input data
float power1 = (in[idx1].x * in[idx1].x + in[idx1].y * in[idx1].y) * fftfactor;
float power2 = (in[idx2].x * in[idx2].x + in[idx2].y * in[idx2].y) * fftfactor;
out[idx1] = (power1 + power2); // I; what was this doing here? / 2.0;
//printf("Input numbers for %i and %i with jump %i: %f %f %f %f, with power %f\n", idx1, idx2, jump, in[idx1].x, in[idx1].y, in[idx2].x, in[idx2].y, out[idx1]);
out[idx1 + jump] = (power1 - power2); // Q
out[idx1 + 2 * jump] = 2 * fftfactor * (in[idx1].x * in[idx2].x + in[idx1].y * in[idx2].y); // U
out[idx1 + 3 * jump] = 2 * fftfactor * (in[idx1].x * in[idx2].y - in[idx1].y * in[idx2].x); // V
}
}
__global__ void powertime(hipfftComplex* __restrict__ in, float* __restrict__ out, unsigned int jump, unsigned int factort)
{
// 1MHz channel ID
int idx1 = blockIdx.x;
// 'small' channel ID
int idx2 = threadIdx.x;
float power1;
float power2;
idx1 = idx1 * YSIZE * 2;
int outidx = 27 * blockIdx.x + threadIdx.x;
out[outidx] = (float)0.0;
out[outidx + jump] = (float)0.0;
out[outidx + 2 * jump] = (float)0.0;
out[outidx + 3 * jump] = (float)0.0;
for (int ii = 0; ii < factort; ii++) {
idx2 = threadIdx.x + ii * 32;
power1 = (in[idx1 + idx2].x * in[idx1 + idx2].x + in[idx1 + idx2].y * in[idx1 + idx2].y) * fftfactor;
power2 = (in[idx1 + 128 + idx2].x * in[idx1 + 128 + idx2].x + in[idx1 + 128 + idx2].y * in[idx1 + 128 + idx2].y) * fftfactor;
out[outidx] += (power1 + power2);
out[outidx + jump] += (power1 - power2);
out[outidx + 2 * jump] += (2 * fftfactor * (in[idx1 + idx2].x * in[idx1 + 128 + idx2].x + in[idx1 + idx2].y * in[idx1 + 128 + idx2].y));
out[outidx + 3 * jump] += (2 * fftfactor * (in[idx1 + idx2].x * in[idx1 + 128 + idx2].y - in[idx1 + idx2].y * in[idx1 + 128 + idx2].x));
}
printf("%i, %i: %i\n", blockIdx.x, threadIdx.x, out[outidx]);
}
__global__ void powertime2(hipfftComplex* __restrict__ in, float* __restrict__ out, unsigned int jump, unsigned int factort, unsigned int acc) {
int idx1, idx2;
int outidx;
int skip1, skip2;
float power1, power2;
float avgfactor= 1.0f / factort;
for (int ac = 0; ac < acc; ac++) {
skip1 = ac * 336 * 128 * 2;
skip2 = ac * 336 * 27;
for (int ii = 0; ii < 7; ii++) {
outidx = skip2 + 7 * 27 * blockIdx.x + ii * 27 + threadIdx.x;
out[outidx] = (float)0.0;
out[outidx + jump] = (float)0.0;
out[outidx + 2 * jump] = (float)0.0;
out[outidx + 3 * jump] = (float)0.0;
idx1 = skip1 + 256 * (blockIdx.x * 7 + ii);
for (int jj = 0; jj < factort; jj++) {
idx2 = threadIdx.x + jj * 32;
power1 = (in[idx1 + idx2].x * in[idx1 + idx2].x + in[idx1 + idx2].y * in[idx1 + idx2].y) * fftfactor;
power2 = (in[idx1 + 128 + idx2].x * in[idx1 + 128 + idx2].x + in[idx1 + 128 + idx2].y * in[idx1 + 128 + idx2].y) * fftfactor;
out[outidx] += (power1 + power2) * avgfactor;
out[outidx + jump] += (power1 - power2) * avgfactor;
out[outidx + 2 * jump] += (2 * fftfactor * (in[idx1 + idx2].x * in[idx1 + 128 + idx2].x + in[idx1 + idx2].y * in[idx1 + 128 + idx2].y)) * avgfactor;
out[outidx + 3 * jump] += (2 * fftfactor * (in[idx1 + idx2].x * in[idx1 + 128 + idx2].y - in[idx1 + idx2].y * in[idx1 + 128 + idx2].x)) * avgfactor;
}
}
}
// printf("%i, %i: %i\n", blockIdx.x, threadIdx.x, out[outidx]);
}
// initialise the scale factors
// memset is slower than custom kernels and not safe for anything else than int
__global__ void initscalefactors(float **means, float **rstdevs, int stokes) {
// the scaling is (in - mean) * rstdev + 64.0f
// and I want to get the original in back in the first running
// will therefore set the mean to 64.0f and rstdev to 1.0f
// each thread responsible for one channel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int ii = 0; ii < stokes; ii++) {
means[ii][idx] = 64.0f;
rstdevs[ii][idx] = 1.0f;
}
}
// filterbank data saved in the format t1c1,t1c2,t1c3,...
// need to transpose to t1c1,t2c1,t3c1,... for easy and efficient scaling kernel
__global__ void transpose(float* __restrict__ in, float* __restrict__ out, unsigned int nchans, unsigned int ntimes) {
// very horrible implementation or matrix transpose
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int start = idx * ntimes;
for (int tsamp = 0; tsamp < ntimes; tsamp++) {
out[start + tsamp] = in[idx + tsamp * nchans];
}
}
__global__ void scale_factors(float *in, float **means, float **rstdevs, unsigned int nchans, unsigned int ntimes, int param) {
// calculates mean and standard deviation in every channel
// assumes the data has been transposed
// for now have one thread per frequency channel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float mean;
float variance;
float ntrec = 1.0f / (float)ntimes;
float ntrec1 = 1.0f / (float)(ntimes - 1.0f);
unsigned int start = idx * ntimes;
mean = 0.0f;
variance = 0.0;
// two-pass solution for now
for (int tsamp = 0; tsamp < ntimes; tsamp++) {
mean += in[start + tsamp] * ntrec;
}
means[param][idx] = mean;
for (int tsamp = 0; tsamp < ntimes; tsamp++) {
variance += (in[start + tsamp] - mean) * (in[start + tsamp] - mean);
}
variance *= ntrec1;
// reciprocal of standard deviation
// multiplied by the desired standard deviation of the scaled data
// reduces the number of operations that have to be done on the GPU
rstdevs[param][idx] = rsqrtf(variance) * 32.0f;
// to avoid inf when there is no data in the channel
if (means[param][idx] == 0)
rstdevs[param][idx] = 0;
}
__global__ void bandpass() {
}
| c5f6350ce236373f5ef7db2f996e7a83e6a5098f.cu | #include <stdio.h>
#include <kernels.cuh>
#define XSIZE 7
#define YSIZE 128
#define ZSIZE 48
// __restrict__ tells the compiler there is no memory overlap
__device__ float fftfactor = 1.0/32.0 * 1.0/32.0;
__global__ void rearrange(cudaTextureObject_t texObj, cufftComplex * __restrict__ out)
{
// this is currently the ugliest solution I can think of
// xidx is the channel number
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * 128;
int2 word;
//if ((xidx == 0) && (yidx == 0)) printf("In the rearrange kernel\n");
for (int sample = 0; sample < YSIZE; sample++) {
word = tex2D<int2>(texObj, xidx, yidx + sample);
printf("%i ", sample);
out[xidx * 128 + 7 * yidx + sample].x = static_cast<float>(static_cast<short>(((word.y & 0xff000000) >> 24) | ((word.y & 0xff0000) >> 8)));
out[xidx * 128 + 7 * yidx + sample].y = static_cast<float>(static_cast<short>(((word.y & 0xff00) >> 8) | ((word.y & 0xff) << 8)));
out[336 * 128 + xidx * 128 + 7 * yidx + sample].x = static_cast<float>(static_cast<short>(((word.x & 0xff000000) >> 24) | ((word.x & 0xff0000) >> 8)));
out[336 * 128 + xidx * 128 + 7 * yidx + sample].y = static_cast<float>(static_cast<short>(((word.x & 0xff00) >> 8) | ((word.x & 0xff) << 8)));
}
}
__global__ void rearrange2(cudaTextureObject_t texObj, cufftComplex * __restrict__ out, unsigned int acc)
{
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * 128;
int chanidx = threadIdx.x + blockIdx.y * 7;
int skip;
int2 word;
for (int ac = 0; ac < acc; ac++) {
skip = 336 * 128 * 2 * ac;
for (int sample = 0; sample < YSIZE; sample++) {
word = tex2D<int2>(texObj, xidx, yidx + ac * 48 * 128 + sample);
out[skip + chanidx * YSIZE * 2 + sample].x = static_cast<float>(static_cast<short>(((word.y & 0xff000000) >> 24) | ((word.y & 0xff0000) >> 8)));
out[skip + chanidx * YSIZE * 2 + sample].y = static_cast<float>(static_cast<short>(((word.y & 0xff00) >> 8) | ((word.y & 0xff) << 8)));
out[skip + chanidx * YSIZE * 2 + YSIZE + sample].x = static_cast<float>(static_cast<short>(((word.x & 0xff000000) >> 24) | ((word.x & 0xff0000) >> 8)));
out[skip + chanidx * YSIZE * 2 + YSIZE + sample].y = static_cast<float>(static_cast<short>(((word.x & 0xff00) >> 8) | ((word.x & 0xff) << 8)));
}
}
}
__global__ void addtime(float *in, float *out, unsigned int jumpin, unsigned int jumpout, unsigned int factort)
{
// index will tell which 1MHz channel we are taking care or
// use 1 thread per 1MHz channel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//if (idx == 0) printf("In the time kernel\n");
for(int ch = 0; ch < 27; ch++) {
// have to restart to 0, otherwise will add to values from previous execution
out[idx * 27 + ch] = (float)0.0;
out[idx * 27 + ch + jumpout] = (float)0.0;
out[idx * 27 + ch + 2 * jumpout] = (float)0.0;
out[idx * 27 + ch + 3 * jumpout] = (float)0.0;
for (int t = 0; t < factort; t++) {
out[idx * 27 + ch] += in[idx * 128 + ch + t * 32];
//printf("S1 time sum %f\n", out[idx * 27 + ch]);
out[idx * 27 + ch + jumpout] += in[idx * 128 + ch + t * 32 + jumpin];
out[idx * 27 + ch + 2 * jumpout] += in[idx * 128 + ch + t * 32 + 2 * jumpin];
out[idx * 27 + ch + 3 * jumpout] += in[idx * 128 + ch + t * 32 + 3 * jumpin];
}
}
}
/*__global__ void addtime(float* __restrict__ int, float* __restrict__ out, unsigned int jumpin, unsigned int jumpout, unsigned int factort)
{
} */
__global__ void addchannel(float* __restrict__ in, float* __restrict__ out, unsigned int jumpin, unsigned int jumpout, unsigned int factorc) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//if (idx == 0) printf("In the channel kernel\n");
out[idx] = (float)0.0;
out[idx + jumpout] = (float)0.0;
out[idx + 2 * jumpout] = (float)0.0;
out[idx + 3 * jumpout] = (float)0.0;
for (int ch = 0; ch < factorc; ch++) {
out[idx] += in[idx * factorc + ch];
out[idx + jumpout] += in[idx * factorc + ch + jumpin];
out[idx + 2 * jumpout] += in[idx * factorc + ch + 2 * jumpin];
out[idx + 3 * jumpout] += in[idx * factorc + ch + 3 * jumpin];
}
//printf("S1 freq sum %f\n", out[idx]);
}
__global__ void addchannel2(float* __restrict__ in, float** __restrict__ out, short nchans, size_t gulp, size_t totsize, short gulpno, unsigned int jumpin, unsigned int factorc, unsigned int framet, unsigned int acc) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int extra = totsize - gulpno * gulp;
// thats the starting save position for the chunk of length acc time samples
int saveidx;
int inskip;
for (int ac = 0; ac < acc; ac++) {
saveidx = (framet % (gulpno * gulp)) * nchans + idx;
inskip = ac * 27 * 336;
out[0][saveidx] = (float)0.0;
out[1][saveidx] = (float)0.0;
out[2][saveidx] = (float)0.0;
out[3][saveidx] = (float)0.0;
if ((framet % (gulpno * gulp)) >= extra) {
for (int ch = 0; ch < factorc; ch++) {
out[0][saveidx] += in[inskip + idx * factorc + ch];
out[1][saveidx] += in[inskip + idx * factorc + ch + jumpin];
out[2][saveidx] += in[inskip + idx * factorc + ch + 2 * jumpin];
out[3][saveidx] += in[inskip + idx * factorc + ch + 3 * jumpin];
}
} else {
for (int ch = 0; ch < factorc; ch++) {
out[0][saveidx] += in[inskip + idx * factorc + ch];
out[1][saveidx] += in[inskip + idx * factorc + ch + jumpin];
out[2][saveidx] += in[inskip + idx * factorc + ch + 2 * jumpin];
out[3][saveidx] += in[inskip + idx * factorc + ch + 3 * jumpin];
}
// save in two places -save in the extra bit
out[0][saveidx + (gulpno * gulp * nchans)] = out[0][saveidx];
out[1][saveidx + (gulpno * gulp * nchans)] = out[1][saveidx];
out[2][saveidx + (gulpno * gulp * nchans)] = out[2][saveidx];
out[3][saveidx + (gulpno * gulp * nchans)] = out[3][saveidx];
}
framet++;
}
// not a problem - earch thread in a warp uses the same branch
/* if ((framet % totsize) < gulpno * gulp) {
for (int ac = 0; ac < acc; ac++) {
inskip = ac * 27 * 336;
outskip = ac * 27 * 336 / factorc;
for (int ch = 0; ch < factorc; ch++) {
out[0][outskip + saveidx] += in[inskip + idx * factorc + ch];
out[1][outskip + saveidx] += in[inskip + idx * factorc + ch + jumpin];
out[2][outskip + saveidx] += in[inskip + idx * factorc + ch + 2 * jumpin];
out[3][outskip + saveidx] += in[inskip + idx * factorc + ch + 3 * jumpin];
}
}
} else {
for (int ac = 0; ac < acc; ac++) {
for (int ch = 0; ch < factorc; ch++) {
out[0][outskip + saveidx] += in[idx * factorc + ch];
out[1][outskip + saveidx] += in[idx * factorc + ch + jumpin];
out[2][outskip + saveidx] += in[idx * factorc + ch + 2 * jumpin];
out[3][outskip + saveidx] += in[idx * factorc + ch + 3 * jumpin];
}
// save in two places - wrap wround to the start of the buffer
out[0][outskip + saveidx - (gulpno * gulp * nchans)] = out[0][outskip + saveidx];
out[1][outskip + saveidx - (gulpno * gulp * nchans)] = out[1][outskip + saveidx];
out[2][outskip + saveidx - (gulpno * gulp * nchans)] = out[2][outskip + saveidx];
out[3][outskop + saveidx - (gulpno * gulp * nchans)] = out[3][outskip + saveidx];
}
}
*/
}
__global__ void addchanscale(float* __restrict__ in, float** __restrict__ out, short nchans, size_t gulp, size_t totsize, short gulpno, unsigned int jumpin, unsigned int factorc, unsigned int framet, unsigned int acc, float **means, float **rstdevs) {
// the number of threads is equal to the number of output channels
// each 'idx' is responsible for one output frequency channel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int extra = totsize - gulpno * gulp;
float avgfactor = 1.0f / factorc;
// thats the starting save position for the chunk of length acc time samples
int saveidx;
float tmp0, tmp1, tmp2, tmp3;
int inskip;
for (int ac = 0; ac < acc; ac++) {
// channels in increasing order
// saveidx = (framet % (gulpno * gulp)) * nchans + idx;
// channels in decreasing order
saveidx = (framet % (gulpno * gulp)) * nchans + nchans - (idx + 1);
inskip = ac * 27 * 336;
out[0][saveidx] = (float)0.0;
out[1][saveidx] = (float)0.0;
out[2][saveidx] = (float)0.0;
out[3][saveidx] = (float)0.0;
// use scaling of the form
// out = (in - mean) / stdev * 32 + 64;
// rstdev = (1 / stdev) * 32 to reduce the number of operations
if ((framet % (gulpno * gulp)) >= extra) {
for (int ch = 0; ch < factorc; ch++) {
out[0][saveidx] += in[inskip + idx * factorc + ch];
out[1][saveidx] += in[inskip + idx * factorc + ch + jumpin];
out[2][saveidx] += in[inskip + idx * factorc + ch + 2 * jumpin];
out[3][saveidx] += in[inskip + idx * factorc + ch + 3 * jumpin];
}
// scaling
out[0][saveidx] = (out[0][saveidx] * avgfactor - means[0][idx]) * rstdevs[0][idx] + 64.0f;
out[1][saveidx] = (out[1][saveidx] * avgfactor - means[1][idx]) * rstdevs[1][idx] + 64.0f;
out[2][saveidx] = (out[2][saveidx] * avgfactor - means[2][idx]) * rstdevs[2][idx] + 64.0f;
out[3][saveidx] = (out[3][saveidx] * avgfactor - means[3][idx]) * rstdevs[3][idx] + 64.0f;
} else {
for (int ch = 0; ch < factorc; ch++) {
out[0][saveidx] += in[inskip + idx * factorc + ch];
out[1][saveidx] += in[inskip + idx * factorc + ch + jumpin];
out[2][saveidx] += in[inskip + idx * factorc + ch + 2 * jumpin];
out[3][saveidx] += in[inskip + idx * factorc + ch + 3 * jumpin];
}
// scaling
out[0][saveidx] = (out[0][saveidx] * avgfactor - means[0][idx]) * rstdevs[0][idx] + 64.0f;
out[1][saveidx] = (out[1][saveidx] * avgfactor - means[1][idx]) * rstdevs[1][idx] + 64.0f;
out[2][saveidx] = (out[2][saveidx] * avgfactor - means[2][idx]) * rstdevs[2][idx] + 64.0f;
out[3][saveidx] = (out[3][saveidx] * avgfactor - means[3][idx]) * rstdevs[3][idx] + 64.0f;
tmp0 = rintf(fminf(fmaxf(0.0, out[0][saveidx]), 255.0));
out[0][saveidx] = tmp0;
//out[0][saveidx] = fminf(255, out[0][saveidx]);
out[1][saveidx] = fmaxf(0.0, out[0][saveidx]);
out[1][saveidx] = fminf(255, out[0][saveidx]);
out[2][saveidx] = fmaxf(0.0, out[0][saveidx]);
out[2][saveidx] = fminf(255, out[0][saveidx]);
out[3][saveidx] = fmaxf(0.0, out[0][saveidx]);
out[3][saveidx] = fminf(255, out[0][saveidx]);
// save in two places -save in the extra bit
out[0][saveidx + (gulpno * gulp * nchans)] = out[0][saveidx];
out[1][saveidx + (gulpno * gulp * nchans)] = out[1][saveidx];
out[2][saveidx + (gulpno * gulp * nchans)] = out[2][saveidx];
out[3][saveidx + (gulpno * gulp * nchans)] = out[3][saveidx];
}
framet++;
}
}
__global__ void powerscale(cufftComplex *in, float *out, unsigned int jump)
{
int idx1 = blockIdx.x * blockDim.x + threadIdx.x;
//if (idx1 == 0) printf("In the power kernel\n");
// offset introduced, jump to the B polarisation data - can cause some slowing down
int idx2 = idx1 + jump;
// these calculations assume polarisation is recorded in x,y base
// i think the if statement is unnecessary as the number of threads for this
// kernel 0s fftpoint * timeavg * nchans, which is exactly the size of the output array
if (idx1 < jump) { // half of the input data
float power1 = (in[idx1].x * in[idx1].x + in[idx1].y * in[idx1].y) * fftfactor;
float power2 = (in[idx2].x * in[idx2].x + in[idx2].y * in[idx2].y) * fftfactor;
out[idx1] = (power1 + power2); // I; what was this doing here? / 2.0;
//printf("Input numbers for %i and %i with jump %i: %f %f %f %f, with power %f\n", idx1, idx2, jump, in[idx1].x, in[idx1].y, in[idx2].x, in[idx2].y, out[idx1]);
out[idx1 + jump] = (power1 - power2); // Q
out[idx1 + 2 * jump] = 2 * fftfactor * (in[idx1].x * in[idx2].x + in[idx1].y * in[idx2].y); // U
out[idx1 + 3 * jump] = 2 * fftfactor * (in[idx1].x * in[idx2].y - in[idx1].y * in[idx2].x); // V
}
}
__global__ void powertime(cufftComplex* __restrict__ in, float* __restrict__ out, unsigned int jump, unsigned int factort)
{
// 1MHz channel ID
int idx1 = blockIdx.x;
// 'small' channel ID
int idx2 = threadIdx.x;
float power1;
float power2;
idx1 = idx1 * YSIZE * 2;
int outidx = 27 * blockIdx.x + threadIdx.x;
out[outidx] = (float)0.0;
out[outidx + jump] = (float)0.0;
out[outidx + 2 * jump] = (float)0.0;
out[outidx + 3 * jump] = (float)0.0;
for (int ii = 0; ii < factort; ii++) {
idx2 = threadIdx.x + ii * 32;
power1 = (in[idx1 + idx2].x * in[idx1 + idx2].x + in[idx1 + idx2].y * in[idx1 + idx2].y) * fftfactor;
power2 = (in[idx1 + 128 + idx2].x * in[idx1 + 128 + idx2].x + in[idx1 + 128 + idx2].y * in[idx1 + 128 + idx2].y) * fftfactor;
out[outidx] += (power1 + power2);
out[outidx + jump] += (power1 - power2);
out[outidx + 2 * jump] += (2 * fftfactor * (in[idx1 + idx2].x * in[idx1 + 128 + idx2].x + in[idx1 + idx2].y * in[idx1 + 128 + idx2].y));
out[outidx + 3 * jump] += (2 * fftfactor * (in[idx1 + idx2].x * in[idx1 + 128 + idx2].y - in[idx1 + idx2].y * in[idx1 + 128 + idx2].x));
}
printf("%i, %i: %i\n", blockIdx.x, threadIdx.x, out[outidx]);
}
__global__ void powertime2(cufftComplex* __restrict__ in, float* __restrict__ out, unsigned int jump, unsigned int factort, unsigned int acc) {
int idx1, idx2;
int outidx;
int skip1, skip2;
float power1, power2;
float avgfactor= 1.0f / factort;
for (int ac = 0; ac < acc; ac++) {
skip1 = ac * 336 * 128 * 2;
skip2 = ac * 336 * 27;
for (int ii = 0; ii < 7; ii++) {
outidx = skip2 + 7 * 27 * blockIdx.x + ii * 27 + threadIdx.x;
out[outidx] = (float)0.0;
out[outidx + jump] = (float)0.0;
out[outidx + 2 * jump] = (float)0.0;
out[outidx + 3 * jump] = (float)0.0;
idx1 = skip1 + 256 * (blockIdx.x * 7 + ii);
for (int jj = 0; jj < factort; jj++) {
idx2 = threadIdx.x + jj * 32;
power1 = (in[idx1 + idx2].x * in[idx1 + idx2].x + in[idx1 + idx2].y * in[idx1 + idx2].y) * fftfactor;
power2 = (in[idx1 + 128 + idx2].x * in[idx1 + 128 + idx2].x + in[idx1 + 128 + idx2].y * in[idx1 + 128 + idx2].y) * fftfactor;
out[outidx] += (power1 + power2) * avgfactor;
out[outidx + jump] += (power1 - power2) * avgfactor;
out[outidx + 2 * jump] += (2 * fftfactor * (in[idx1 + idx2].x * in[idx1 + 128 + idx2].x + in[idx1 + idx2].y * in[idx1 + 128 + idx2].y)) * avgfactor;
out[outidx + 3 * jump] += (2 * fftfactor * (in[idx1 + idx2].x * in[idx1 + 128 + idx2].y - in[idx1 + idx2].y * in[idx1 + 128 + idx2].x)) * avgfactor;
}
}
}
// printf("%i, %i: %i\n", blockIdx.x, threadIdx.x, out[outidx]);
}
// initialise the scale factors
// memset is slower than custom kernels and not safe for anything else than int
__global__ void initscalefactors(float **means, float **rstdevs, int stokes) {
// the scaling is (in - mean) * rstdev + 64.0f
// and I want to get the original in back in the first running
// will therefore set the mean to 64.0f and rstdev to 1.0f
// each thread responsible for one channel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int ii = 0; ii < stokes; ii++) {
means[ii][idx] = 64.0f;
rstdevs[ii][idx] = 1.0f;
}
}
// filterbank data saved in the format t1c1,t1c2,t1c3,...
// need to transpose to t1c1,t2c1,t3c1,... for easy and efficient scaling kernel
__global__ void transpose(float* __restrict__ in, float* __restrict__ out, unsigned int nchans, unsigned int ntimes) {
// very horrible implementation or matrix transpose
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int start = idx * ntimes;
for (int tsamp = 0; tsamp < ntimes; tsamp++) {
out[start + tsamp] = in[idx + tsamp * nchans];
}
}
__global__ void scale_factors(float *in, float **means, float **rstdevs, unsigned int nchans, unsigned int ntimes, int param) {
// calculates mean and standard deviation in every channel
// assumes the data has been transposed
// for now have one thread per frequency channel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float mean;
float variance;
float ntrec = 1.0f / (float)ntimes;
float ntrec1 = 1.0f / (float)(ntimes - 1.0f);
unsigned int start = idx * ntimes;
mean = 0.0f;
variance = 0.0;
// two-pass solution for now
for (int tsamp = 0; tsamp < ntimes; tsamp++) {
mean += in[start + tsamp] * ntrec;
}
means[param][idx] = mean;
for (int tsamp = 0; tsamp < ntimes; tsamp++) {
variance += (in[start + tsamp] - mean) * (in[start + tsamp] - mean);
}
variance *= ntrec1;
// reciprocal of standard deviation
// multiplied by the desired standard deviation of the scaled data
// reduces the number of operations that have to be done on the GPU
rstdevs[param][idx] = rsqrtf(variance) * 32.0f;
// to avoid inf when there is no data in the channel
if (means[param][idx] == 0)
rstdevs[param][idx] = 0;
}
__global__ void bandpass() {
}
|
a36d7d17dfeee156bb8aef139f9175e3ac39afe8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define GAPX (118)
#define EXTENT (5)
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X + GAPX);
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
// Initialize the value
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*0];
t2 = input[__iter_3__+M*1];
}
// Rest of the computation
for (int __iter_1__ = 1; __iter_1__ <= N-1; __iter_1__++) {
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__-__iter_0__];
__tilevar_2__[__iter_3__-__iter_0__] = t2;
t2 = input[__iter_3__+M*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_2__ = b2;
float __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__);
float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 15 * __temp_9__);
float __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = t2;
float __temp_18__ = (__temp_14__ + 5 * __temp_17__);
float __temp_19__ = (__temp_18__ / 118);
b3 = __tilevar_3__[__iter_3__-__iter_0__];
__tilevar_3__[__iter_3__-__iter_0__] = t3;
t3 = __temp_19__;
//printf ("var0[%d][%d] = %.6f\n", __iter_1__, __iter_3__, t3);
}
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
if (__iter_3__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_3__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) {
__copy_arr_0__[__iter_1__+(M)*(__iter_3__)] = t3;
}
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_32__ = b3;
float __temp_35__ = (__tilevar_3__[__iter_3__+(-1)-__iter_0__]);
float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__);
float __temp_39__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_40__ = (__temp_36__ + 15 * __temp_39__);
float __temp_43__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_44__ = (__temp_40__ + 12 * __temp_43__);
float __temp_47__ = t3;
float __temp_48__ = (__temp_44__ + 5 * __temp_47__);
float __temp_49__ = (__temp_48__ / 118);
b4 = __tilevar_4__[__iter_3__-__iter_0__];
__tilevar_4__[__iter_3__-__iter_0__] = t4;
t4 = __temp_49__;
//printf ("var1[%d][%d] = %.6f\n", __iter_1__-1, __iter_3__, t4);
}
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
if (__iter_3__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_3__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) {
__copy_arr_1__[__iter_1__+(M)*(__iter_3__)] = t4;
}
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_60__ = b4;
float __temp_61__ = (__tilevar_4__[__iter_3__+(-1)-__iter_0__]);
float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__);
float __temp_63__ = (__tilevar_4__[__iter_3__-__iter_0__]);
float __temp_64__ = (__temp_62__ + 15 * __temp_63__);
float __temp_65__ = (__tilevar_4__[__iter_3__+1-__iter_0__]);
float __temp_66__ = (__temp_64__ + 12 * __temp_65__);
float __temp_67__ = t4;
float __temp_68__ = (__temp_66__ + 5 * __temp_67__);
float __temp_69__ = (__temp_68__ / 118);
b5 = __tilevar_5__[__iter_3__-__iter_0__];
__tilevar_5__[__iter_3__-__iter_0__] = t5;
t5 = __temp_69__;
//printf ("var2[%d][%d] = %.6f\n", __iter_1__-2, __iter_3__, t5);
}
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
if (__iter_3__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_3__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) {
__copy_arr_2__[__iter_1__+(M)*(__iter_3__)] = t5;
}
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_80__ = b5;
float __temp_81__ = (__tilevar_5__[__iter_3__+(-1)-__iter_0__]);
float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__);
float __temp_83__ = (__tilevar_5__[__iter_3__-__iter_0__]);
float __temp_84__ = (__temp_82__ + 15 * __temp_83__);
float __temp_85__ = (__tilevar_5__[__iter_3__+1-__iter_0__]);
float __temp_86__ = (__temp_84__ + 12 * __temp_85__);
float __temp_87__ = t5;
float __temp_88__ = (__temp_86__ + 5 * __temp_87__);
float __temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_89__;
//printf ("var3[%d][%d] = %.6f\n", __iter_1__-3, __iter_3__, __temp_89__);
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X + GAPX) + (int)FORMA_BLOCKDIM_X;
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
// Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ;
if (__iter_3__ >= FORMA_MAX(__iter_0__-2,0) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1))){
__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)] = input[__iter_3__+(M)*(0)];
t2 = input[__iter_3__+(M)*(1)];
}
// Rest of the computation
for (int __iter_1__ = 1; __iter_1__ <= N-1; __iter_1__++) {
if(__iter_3__ >= FORMA_MAX(__iter_0__-2,0) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)];
__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)] = t2;
t2 = input[__iter_3__+(M)*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__-1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){
float __temp_2__ = b2;
float __temp_5__ = (__tilevar_2__[__iter_3__+(-1)+(EXTENT-__iter_0__)]);
float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__);
float __temp_9__ = (__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)]);
float __temp_10__ = (__temp_6__ + 15 * __temp_9__);
float __temp_13__ = (__tilevar_2__[__iter_3__+1+(EXTENT-__iter_0__)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = t2;
float __temp_18__ = (__temp_14__ + 5 * __temp_17__);
float __temp_19__ = (__temp_18__ / 118);
b3 = __tilevar_3__[__iter_3__+(EXTENT-__iter_0__)];
__tilevar_3__[__iter_3__+(EXTENT-__iter_0__)] = t3;
t3 = __temp_19__;
//printf ("var0[%d][%d] = %.6f\n", __iter_1__-0, __iter_3__, t3);
}
if (__iter_3__ >= FORMA_MAX((__iter_0__-3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) & (__iter_3__ < FORMA_MAX((__iter_0__-1),1) | __iter_3__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)))) {
b3 = __copy_arr_0__[__iter_1__-2+(M)*(__iter_3__)];
__tilevar_3__[__iter_3__+(EXTENT-__iter_0__)] = __copy_arr_0__[__iter_1__-1+(M)*(__iter_3__)];
t3 = __copy_arr_0__[__iter_1__+(M)*(__iter_3__)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__-2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){
float __temp_32__ = b3;
float __temp_35__ = (__tilevar_3__[__iter_3__+(-1)+(EXTENT-__iter_0__)]);
float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__);
float __temp_39__ = (__tilevar_3__[__iter_3__+(EXTENT-__iter_0__)]);
float __temp_40__ = (__temp_36__ + 15 * __temp_39__);
float __temp_43__ = (__tilevar_3__[__iter_3__+1+(EXTENT-__iter_0__)]);
float __temp_44__ = (__temp_40__ + 12 * __temp_43__);
float __temp_47__ = t3;
float __temp_48__ = (__temp_44__ + 5 * __temp_47__);
float __temp_49__ = (__temp_48__ / 118);
b4 = __tilevar_4__[__iter_3__+(EXTENT-__iter_0__)];
__tilevar_4__[__iter_3__+(EXTENT-__iter_0__)] = t4;
t4 = __temp_49__;
//printf ("var1[%d][%d] = %.6f\n", __iter_1__-1, __iter_3__, t4);
}
if (__iter_3__ >= FORMA_MAX((__iter_0__-4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) & (__iter_3__ < FORMA_MAX((__iter_0__-2),1) | __iter_3__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)))) {
b4 = __copy_arr_1__[__iter_1__-2+(M)*(__iter_3__)];
__tilevar_4__[__iter_3__+(EXTENT-__iter_0__)] = __copy_arr_1__[__iter_1__-1+(M)*(__iter_3__)];
t4 = __copy_arr_1__[__iter_1__+(M)*(__iter_3__)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__-3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
float __temp_60__ = b4;
float __temp_61__ = (__tilevar_4__[__iter_3__+(-1)+(EXTENT-__iter_0__)]);
float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__);
float __temp_63__ = (__tilevar_4__[__iter_3__+(EXTENT-__iter_0__)]);
float __temp_64__ = (__temp_62__ + 15 * __temp_63__);
float __temp_65__ = (__tilevar_4__[__iter_3__+1+(EXTENT-__iter_0__)]);
float __temp_66__ = (__temp_64__ + 12 * __temp_65__);
float __temp_67__ = t4;
float __temp_68__ = (__temp_66__ + 5 * __temp_67__);
float __temp_69__ = (__temp_68__ / 118);
b5 = __tilevar_5__[__iter_3__+(EXTENT-__iter_0__)];
__tilevar_5__[__iter_3__+(EXTENT-__iter_0__)] = t5;
t5 = __temp_69__;
}
if (__iter_3__ >= FORMA_MAX((__iter_0__-5),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) & (__iter_3__ < FORMA_MAX((__iter_0__-3),1) | __iter_3__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)))) {
b5 = __copy_arr_2__[__iter_1__-2+(M)*(__iter_3__)];
__tilevar_5__[__iter_3__+(EXTENT-__iter_0__)] = __copy_arr_2__[__iter_1__-1+(M)*(__iter_3__)];
t5 = __copy_arr_2__[__iter_1__+(M)*(__iter_3__)];
}
__syncthreads();
if( __iter_3__ >= FORMA_MAX((__iter_0__-4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
float __temp_80__ = b5;
float __temp_81__ = (__tilevar_5__[__iter_3__+(-1)+(EXTENT-__iter_0__)]);
float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__);
float __temp_83__ = (__tilevar_5__[__iter_3__+(EXTENT-__iter_0__)]);
float __temp_84__ = (__temp_82__ + 15 * __temp_83__);
float __temp_85__ = (__tilevar_5__[__iter_3__+1+(EXTENT-__iter_0__)]);
float __temp_86__ = (__temp_84__ + 12 * __temp_85__);
float __temp_87__ = t5;
float __temp_88__ = (__temp_86__ + 5 * __temp_87__);
float __temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_89__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
hipMalloc(&__copy_arr_0__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
hipMalloc(&__copy_arr_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
hipMalloc(&__copy_arr_2__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__1__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__copy_arr_0__);
hipFree(__copy_arr_1__);
hipFree(__copy_arr_2__);
}
/*Host Free End*/
| a36d7d17dfeee156bb8aef139f9175e3ac39afe8.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define GAPX (118)
#define EXTENT (5)
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X + GAPX);
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
// Initialize the value
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*0];
t2 = input[__iter_3__+M*1];
}
// Rest of the computation
for (int __iter_1__ = 1; __iter_1__ <= N-1; __iter_1__++) {
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__-__iter_0__];
__tilevar_2__[__iter_3__-__iter_0__] = t2;
t2 = input[__iter_3__+M*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_2__ = b2;
float __temp_5__ = (__tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__);
float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__]);
float __temp_10__ = (__temp_6__ + 15 * __temp_9__);
float __temp_13__ = (__tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = t2;
float __temp_18__ = (__temp_14__ + 5 * __temp_17__);
float __temp_19__ = (__temp_18__ / 118);
b3 = __tilevar_3__[__iter_3__-__iter_0__];
__tilevar_3__[__iter_3__-__iter_0__] = t3;
t3 = __temp_19__;
//printf ("var0[%d][%d] = %.6f\n", __iter_1__, __iter_3__, t3);
}
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
if (__iter_3__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_3__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) {
__copy_arr_0__[__iter_1__+(M)*(__iter_3__)] = t3;
}
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_32__ = b3;
float __temp_35__ = (__tilevar_3__[__iter_3__+(-1)-__iter_0__]);
float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__);
float __temp_39__ = (__tilevar_3__[__iter_3__-__iter_0__]);
float __temp_40__ = (__temp_36__ + 15 * __temp_39__);
float __temp_43__ = (__tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_44__ = (__temp_40__ + 12 * __temp_43__);
float __temp_47__ = t3;
float __temp_48__ = (__temp_44__ + 5 * __temp_47__);
float __temp_49__ = (__temp_48__ / 118);
b4 = __tilevar_4__[__iter_3__-__iter_0__];
__tilevar_4__[__iter_3__-__iter_0__] = t4;
t4 = __temp_49__;
//printf ("var1[%d][%d] = %.6f\n", __iter_1__-1, __iter_3__, t4);
}
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
if (__iter_3__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_3__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) {
__copy_arr_1__[__iter_1__+(M)*(__iter_3__)] = t4;
}
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_60__ = b4;
float __temp_61__ = (__tilevar_4__[__iter_3__+(-1)-__iter_0__]);
float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__);
float __temp_63__ = (__tilevar_4__[__iter_3__-__iter_0__]);
float __temp_64__ = (__temp_62__ + 15 * __temp_63__);
float __temp_65__ = (__tilevar_4__[__iter_3__+1-__iter_0__]);
float __temp_66__ = (__temp_64__ + 12 * __temp_65__);
float __temp_67__ = t4;
float __temp_68__ = (__temp_66__ + 5 * __temp_67__);
float __temp_69__ = (__temp_68__ / 118);
b5 = __tilevar_5__[__iter_3__-__iter_0__];
__tilevar_5__[__iter_3__-__iter_0__] = t5;
t5 = __temp_69__;
//printf ("var2[%d][%d] = %.6f\n", __iter_1__-2, __iter_3__, t5);
}
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
if (__iter_3__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_3__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) {
__copy_arr_2__[__iter_1__+(M)*(__iter_3__)] = t5;
}
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_80__ = b5;
float __temp_81__ = (__tilevar_5__[__iter_3__+(-1)-__iter_0__]);
float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__);
float __temp_83__ = (__tilevar_5__[__iter_3__-__iter_0__]);
float __temp_84__ = (__temp_82__ + 15 * __temp_83__);
float __temp_85__ = (__tilevar_5__[__iter_3__+1-__iter_0__]);
float __temp_86__ = (__temp_84__ + 12 * __temp_85__);
float __temp_87__ = t5;
float __temp_88__ = (__temp_86__ + 5 * __temp_87__);
float __temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_89__;
//printf ("var3[%d][%d] = %.6f\n", __iter_1__-3, __iter_3__, __temp_89__);
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X + GAPX) + (int)FORMA_BLOCKDIM_X;
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
// Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ;
if (__iter_3__ >= FORMA_MAX(__iter_0__-2,0) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1))){
__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)] = input[__iter_3__+(M)*(0)];
t2 = input[__iter_3__+(M)*(1)];
}
// Rest of the computation
for (int __iter_1__ = 1; __iter_1__ <= N-1; __iter_1__++) {
if(__iter_3__ >= FORMA_MAX(__iter_0__-2,0) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)];
__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)] = t2;
t2 = input[__iter_3__+(M)*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__-1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){
float __temp_2__ = b2;
float __temp_5__ = (__tilevar_2__[__iter_3__+(-1)+(EXTENT-__iter_0__)]);
float __temp_6__ = (5 * __temp_2__ + 12 * __temp_5__);
float __temp_9__ = (__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)]);
float __temp_10__ = (__temp_6__ + 15 * __temp_9__);
float __temp_13__ = (__tilevar_2__[__iter_3__+1+(EXTENT-__iter_0__)]);
float __temp_14__ = (__temp_10__ + 12 * __temp_13__);
float __temp_17__ = t2;
float __temp_18__ = (__temp_14__ + 5 * __temp_17__);
float __temp_19__ = (__temp_18__ / 118);
b3 = __tilevar_3__[__iter_3__+(EXTENT-__iter_0__)];
__tilevar_3__[__iter_3__+(EXTENT-__iter_0__)] = t3;
t3 = __temp_19__;
//printf ("var0[%d][%d] = %.6f\n", __iter_1__-0, __iter_3__, t3);
}
if (__iter_3__ >= FORMA_MAX((__iter_0__-3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) & (__iter_3__ < FORMA_MAX((__iter_0__-1),1) | __iter_3__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)))) {
b3 = __copy_arr_0__[__iter_1__-2+(M)*(__iter_3__)];
__tilevar_3__[__iter_3__+(EXTENT-__iter_0__)] = __copy_arr_0__[__iter_1__-1+(M)*(__iter_3__)];
t3 = __copy_arr_0__[__iter_1__+(M)*(__iter_3__)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__-2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){
float __temp_32__ = b3;
float __temp_35__ = (__tilevar_3__[__iter_3__+(-1)+(EXTENT-__iter_0__)]);
float __temp_36__ = (5 * __temp_32__ + 12 * __temp_35__);
float __temp_39__ = (__tilevar_3__[__iter_3__+(EXTENT-__iter_0__)]);
float __temp_40__ = (__temp_36__ + 15 * __temp_39__);
float __temp_43__ = (__tilevar_3__[__iter_3__+1+(EXTENT-__iter_0__)]);
float __temp_44__ = (__temp_40__ + 12 * __temp_43__);
float __temp_47__ = t3;
float __temp_48__ = (__temp_44__ + 5 * __temp_47__);
float __temp_49__ = (__temp_48__ / 118);
b4 = __tilevar_4__[__iter_3__+(EXTENT-__iter_0__)];
__tilevar_4__[__iter_3__+(EXTENT-__iter_0__)] = t4;
t4 = __temp_49__;
//printf ("var1[%d][%d] = %.6f\n", __iter_1__-1, __iter_3__, t4);
}
if (__iter_3__ >= FORMA_MAX((__iter_0__-4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) & (__iter_3__ < FORMA_MAX((__iter_0__-2),1) | __iter_3__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)))) {
b4 = __copy_arr_1__[__iter_1__-2+(M)*(__iter_3__)];
__tilevar_4__[__iter_3__+(EXTENT-__iter_0__)] = __copy_arr_1__[__iter_1__-1+(M)*(__iter_3__)];
t4 = __copy_arr_1__[__iter_1__+(M)*(__iter_3__)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__-3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
float __temp_60__ = b4;
float __temp_61__ = (__tilevar_4__[__iter_3__+(-1)+(EXTENT-__iter_0__)]);
float __temp_62__ = (5 * __temp_60__ + 12 * __temp_61__);
float __temp_63__ = (__tilevar_4__[__iter_3__+(EXTENT-__iter_0__)]);
float __temp_64__ = (__temp_62__ + 15 * __temp_63__);
float __temp_65__ = (__tilevar_4__[__iter_3__+1+(EXTENT-__iter_0__)]);
float __temp_66__ = (__temp_64__ + 12 * __temp_65__);
float __temp_67__ = t4;
float __temp_68__ = (__temp_66__ + 5 * __temp_67__);
float __temp_69__ = (__temp_68__ / 118);
b5 = __tilevar_5__[__iter_3__+(EXTENT-__iter_0__)];
__tilevar_5__[__iter_3__+(EXTENT-__iter_0__)] = t5;
t5 = __temp_69__;
}
if (__iter_3__ >= FORMA_MAX((__iter_0__-5),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) & (__iter_3__ < FORMA_MAX((__iter_0__-3),1) | __iter_3__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)))) {
b5 = __copy_arr_2__[__iter_1__-2+(M)*(__iter_3__)];
__tilevar_5__[__iter_3__+(EXTENT-__iter_0__)] = __copy_arr_2__[__iter_1__-1+(M)*(__iter_3__)];
t5 = __copy_arr_2__[__iter_1__+(M)*(__iter_3__)];
}
__syncthreads();
if( __iter_3__ >= FORMA_MAX((__iter_0__-4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
float __temp_80__ = b5;
float __temp_81__ = (__tilevar_5__[__iter_3__+(-1)+(EXTENT-__iter_0__)]);
float __temp_82__ = (5 * __temp_80__ + 12 * __temp_81__);
float __temp_83__ = (__tilevar_5__[__iter_3__+(EXTENT-__iter_0__)]);
float __temp_84__ = (__temp_82__ + 15 * __temp_83__);
float __temp_85__ = (__tilevar_5__[__iter_3__+1+(EXTENT-__iter_0__)]);
float __temp_86__ = (__temp_84__ + 12 * __temp_85__);
float __temp_87__ = t5;
float __temp_88__ = (__temp_86__ + 5 * __temp_87__);
float __temp_89__ = (__temp_88__ / 118);
__var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_89__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void jacobi(float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
cudaMalloc(&__copy_arr_0__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
cudaMalloc(&__copy_arr_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
cudaMalloc(&__copy_arr_2__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__1__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__copy_arr_0__);
cudaFree(__copy_arr_1__);
cudaFree(__copy_arr_2__);
}
/*Host Free End*/
|
268d31da1b80e0f54b2db1193425991c019b34d9.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cudaConvolution_3D_Float_Valid_Kernel.h>
// ========================================================
// KERNEL
// ========================================================
__global__ void cudaConvolution_3D_Float_Valid_X_Kernel(float* in, int dimx, int dimy, int dimz, float* mask, int size2, float* out)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y,
z = blockIdx.z * blockDim.z + threadIdx.z;
// in += baseZ * (dimx * dimy) + baseY * dimx + baseX;
// out += baseZ * ((dimx - 2 * size2) * dimy) + baseY * (dimx - 2 * size2) + baseX - size2;
if (x >= size2 && x < dimx-size2 && y < dimy && z < dimz)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * in[(long)dimx*dimy* z+(long)dimx*y+(long)x+i];
out[(long)z * ((dimx - 2 * size2) * dimy) + (long)y * (dimx - 2 * size2) + (long)x-size2] = sum;
}
}
__global__ void cudaConvolution_3D_Float_Valid_Y_Kernel(float* in, int dimx, int dimy, int dimz, float* mask, int size2, float* out)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y,
z = blockIdx.z * blockDim.z + threadIdx.z;
if (x < dimx && y >= size2 && y < dimy - size2 && z < dimz)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * in[dimx * dimy * z + dimx * (y+i) + x];
out[z * (dimx * (dimy-2*size2)) + (y-size2) *dimx + x] = sum;
}
}
__global__ void cudaConvolution_3D_Float_Valid_Z_Kernel(float* in, int dimx, int dimy, int dimz, float* mask, int size2, float* out)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y,
z = blockIdx.z * blockDim.z + threadIdx.z;
if (x < dimx && y < dimy && z >= size2 && z < dimz - size2)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * in[dimx * dimy * (z+i) + dimx * y + x];
out[(z-size2) * dimx * dimy + y * dimx + x] = sum;
}
}
void cudaConvolution_3D_Float_Valid_X(float* d_in, int dimx, int dimy, int dimz, float* d_mask, int maskSize, float* d_out)
{
dim3 block(10, 10, 10);
dim3 grid((dimx - 1) / block.x + 1, (dimy - 1) / block.y + 1, (dimz - 1) / block.z + 1);
cudaConvolution_3D_Float_Valid_X_Kernel << <grid, block >> > (d_in, dimx, dimy, dimz, d_mask, maskSize / 2, d_out);
hipDeviceSynchronize();
}
void cudaConvolution_3D_Float_Valid_Y(float* d_in, int dimx, int dimy, int dimz, float* d_mask, int maskSize, float* d_out)
{
dim3 block(10, 10, 10);
dim3 grid((dimx - 1) / block.x + 1, (dimy - 1) / block.y + 1, (dimz - 1) / block.z + 1);
cudaConvolution_3D_Float_Valid_Y_Kernel << <grid, block >> > (d_in, dimx, dimy, dimz, d_mask, maskSize / 2, d_out);
hipDeviceSynchronize();
}
void cudaConvolution_3D_Float_Valid_Z(float* d_in, int dimx, int dimy, int dimz, float* d_mask, int maskSize, float* d_out)
{
dim3 block(10, 10, 10);
dim3 grid((dimx - 1) / block.x + 1, (dimy - 1) / block.y + 1, (dimz - 1) / block.z + 1);
cudaConvolution_3D_Float_Valid_Z_Kernel << <grid, block >> > (d_in, dimx, dimy, dimz, d_mask, maskSize / 2, d_out);
hipDeviceSynchronize();
}
// #define CUDA_MEM_CPY_TO_SYMBOL_FLOAT(_dst, _src, _size) hipMemcpyToSymbol(_dst, _src, _size*sizeof(float));
/*
__global__ void cudaConvolution3DValidXKernel(float* in, int dimx, int dimy, int dimz, float *mask, int size2, float* out)
{
__shared__ float s_Data[BLOCKDIMZ][BLOCKDIMY][BLOCKDIMX * 3];
const int baseX = (blockIdx.x - 1) * blockDim.x + threadIdx.x,
baseY = blockIdx.y * blockDim.y + threadIdx.y,
baseZ = blockIdx.z * blockDim.z + threadIdx.z;
in += baseZ * (dimx * dimy) + baseY * dimx + baseX;
out += baseZ * ((dimx - 2 * size2) * dimy) + baseY * (dimx - 2 * size2) + baseX - size2;
if (baseX < dimx && baseY < dimy && baseZ < dimz)
{
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + BLOCKDIMX] = (baseX + BLOCKDIMX < dimx) ? in[BLOCKDIMX] : 0.0f;
s_Data[threadIdx.z][threadIdx.y][threadIdx.x] = (baseX > 0) ? in[0] : 0.0f;
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + 2 * BLOCKDIMX] = (dimx - baseX > 2 * BLOCKDIMX) ? in[2 * BLOCKDIMX] : 0.0f;
__syncthreads();
if (baseX + BLOCKDIMX >= size2 && baseX + BLOCKDIMX + size2 < dimx)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * s_Data[threadIdx.z][threadIdx.y][threadIdx.x + BLOCKDIMX + i];
out[BLOCKDIMX] = sum;
}
}
}
*/ | 268d31da1b80e0f54b2db1193425991c019b34d9.cu |
#include <cuda_runtime.h>
#include <cuda.h>
#include <cudaConvolution_3D_Float_Valid_Kernel.h>
// ========================================================
// KERNEL
// ========================================================
__global__ void cudaConvolution_3D_Float_Valid_X_Kernel(float* in, int dimx, int dimy, int dimz, float* mask, int size2, float* out)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y,
z = blockIdx.z * blockDim.z + threadIdx.z;
// in += baseZ * (dimx * dimy) + baseY * dimx + baseX;
// out += baseZ * ((dimx - 2 * size2) * dimy) + baseY * (dimx - 2 * size2) + baseX - size2;
if (x >= size2 && x < dimx-size2 && y < dimy && z < dimz)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * in[(long)dimx*dimy* z+(long)dimx*y+(long)x+i];
out[(long)z * ((dimx - 2 * size2) * dimy) + (long)y * (dimx - 2 * size2) + (long)x-size2] = sum;
}
}
__global__ void cudaConvolution_3D_Float_Valid_Y_Kernel(float* in, int dimx, int dimy, int dimz, float* mask, int size2, float* out)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y,
z = blockIdx.z * blockDim.z + threadIdx.z;
if (x < dimx && y >= size2 && y < dimy - size2 && z < dimz)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * in[dimx * dimy * z + dimx * (y+i) + x];
out[z * (dimx * (dimy-2*size2)) + (y-size2) *dimx + x] = sum;
}
}
__global__ void cudaConvolution_3D_Float_Valid_Z_Kernel(float* in, int dimx, int dimy, int dimz, float* mask, int size2, float* out)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x,
y = blockIdx.y * blockDim.y + threadIdx.y,
z = blockIdx.z * blockDim.z + threadIdx.z;
if (x < dimx && y < dimy && z >= size2 && z < dimz - size2)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * in[dimx * dimy * (z+i) + dimx * y + x];
out[(z-size2) * dimx * dimy + y * dimx + x] = sum;
}
}
void cudaConvolution_3D_Float_Valid_X(float* d_in, int dimx, int dimy, int dimz, float* d_mask, int maskSize, float* d_out)
{
dim3 block(10, 10, 10);
dim3 grid((dimx - 1) / block.x + 1, (dimy - 1) / block.y + 1, (dimz - 1) / block.z + 1);
cudaConvolution_3D_Float_Valid_X_Kernel << <grid, block >> > (d_in, dimx, dimy, dimz, d_mask, maskSize / 2, d_out);
cudaThreadSynchronize();
}
void cudaConvolution_3D_Float_Valid_Y(float* d_in, int dimx, int dimy, int dimz, float* d_mask, int maskSize, float* d_out)
{
dim3 block(10, 10, 10);
dim3 grid((dimx - 1) / block.x + 1, (dimy - 1) / block.y + 1, (dimz - 1) / block.z + 1);
cudaConvolution_3D_Float_Valid_Y_Kernel << <grid, block >> > (d_in, dimx, dimy, dimz, d_mask, maskSize / 2, d_out);
cudaThreadSynchronize();
}
void cudaConvolution_3D_Float_Valid_Z(float* d_in, int dimx, int dimy, int dimz, float* d_mask, int maskSize, float* d_out)
{
dim3 block(10, 10, 10);
dim3 grid((dimx - 1) / block.x + 1, (dimy - 1) / block.y + 1, (dimz - 1) / block.z + 1);
cudaConvolution_3D_Float_Valid_Z_Kernel << <grid, block >> > (d_in, dimx, dimy, dimz, d_mask, maskSize / 2, d_out);
cudaThreadSynchronize();
}
// #define CUDA_MEM_CPY_TO_SYMBOL_FLOAT(_dst, _src, _size) cudaMemcpyToSymbol(_dst, _src, _size*sizeof(float));
/*
__global__ void cudaConvolution3DValidXKernel(float* in, int dimx, int dimy, int dimz, float *mask, int size2, float* out)
{
__shared__ float s_Data[BLOCKDIMZ][BLOCKDIMY][BLOCKDIMX * 3];
const int baseX = (blockIdx.x - 1) * blockDim.x + threadIdx.x,
baseY = blockIdx.y * blockDim.y + threadIdx.y,
baseZ = blockIdx.z * blockDim.z + threadIdx.z;
in += baseZ * (dimx * dimy) + baseY * dimx + baseX;
out += baseZ * ((dimx - 2 * size2) * dimy) + baseY * (dimx - 2 * size2) + baseX - size2;
if (baseX < dimx && baseY < dimy && baseZ < dimz)
{
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + BLOCKDIMX] = (baseX + BLOCKDIMX < dimx) ? in[BLOCKDIMX] : 0.0f;
s_Data[threadIdx.z][threadIdx.y][threadIdx.x] = (baseX > 0) ? in[0] : 0.0f;
s_Data[threadIdx.z][threadIdx.y][threadIdx.x + 2 * BLOCKDIMX] = (dimx - baseX > 2 * BLOCKDIMX) ? in[2 * BLOCKDIMX] : 0.0f;
__syncthreads();
if (baseX + BLOCKDIMX >= size2 && baseX + BLOCKDIMX + size2 < dimx)
{
float sum = 0.0f;
#pragma unroll
for (int i = -size2; i <= size2; i++)
sum += mask[size2 - i] * s_Data[threadIdx.z][threadIdx.y][threadIdx.x + BLOCKDIMX + i];
out[BLOCKDIMX] = sum;
}
}
}
*/ |
52ff5232682e02835d27e0c4c5d2fcf4f7936e81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file check_handshaking.cu
* @date Spring 2020, revised Spring 2021
* @author Hugo De Moraes
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void check_handshaking_gpu(int * strongNeighbor, int * matches, int numNodes) {
// Get Thread ID
const int NUM_THREADS = blockDim.x * gridDim.x;
const int COL = blockIdx.x * blockDim.x + threadIdx.x;
const int ROW = blockIdx.y * blockDim.y + threadIdx.y;
const int FIRST_T_ID = COL + ROW * NUM_THREADS;
for(int curTID = FIRST_T_ID; curTID <= numNodes; curTID += NUM_THREADS) {
if(matches[curTID] == -1) {
if(curTID == strongNeighbor[strongNeighbor[curTID]]) {
matches[curTID] = strongNeighbor[curTID];
}
}
}
}
| 52ff5232682e02835d27e0c4c5d2fcf4f7936e81.cu | /**
* @file check_handshaking.cu
* @date Spring 2020, revised Spring 2021
* @author Hugo De Moraes
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void check_handshaking_gpu(int * strongNeighbor, int * matches, int numNodes) {
// Get Thread ID
const int NUM_THREADS = blockDim.x * gridDim.x;
const int COL = blockIdx.x * blockDim.x + threadIdx.x;
const int ROW = blockIdx.y * blockDim.y + threadIdx.y;
const int FIRST_T_ID = COL + ROW * NUM_THREADS;
for(int curTID = FIRST_T_ID; curTID <= numNodes; curTID += NUM_THREADS) {
if(matches[curTID] == -1) {
if(curTID == strongNeighbor[strongNeighbor[curTID]]) {
matches[curTID] = strongNeighbor[curTID];
}
}
}
}
|
149d2e5741bce21f5b86a84c97fc750fc4b93003.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \file ppmc_cuda.cu
* \brief Functions definitions for the ppm kernels, using characteristic tracing.
Written following Stone et al. 2008. */
#ifdef CUDA
#ifdef PPMC
#include<cuda.h>
#include<math.h>
#include"global.h"
#include"global_cuda.h"
#include"ppmc_cuda.h"
/*! \fn void PPMC_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir, int n_fields)
* \brief When passed a stencil of conserved variables, returns the left and right
boundary values for the interface calculated using ppm. */
__global__ void PPMC_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir, int n_fields)
{
int n_cells = nx*ny*nz;
int o1, o2, o3;
if (dir == 0 ) {
o1 = 1; o2 = 2; o3 = 3;
}
if (dir == 1 ) {
o1 = 2; o2 = 3; o3 = 1;
}
if (dir == 2 ) {
o1 = 3; o2 = 1; o3 = 2;
}
// declare primative variables for each stencil
// these will be placed into registers for each thread
Real d_i, vx_i, vy_i, vz_i, p_i;
Real d_imo, vx_imo, vy_imo, vz_imo, p_imo;
Real d_ipo, vx_ipo, vy_ipo, vz_ipo, p_ipo;
Real d_imt, vx_imt, vy_imt, vz_imt, p_imt;
Real d_ipt, vx_ipt, vy_ipt, vz_ipt, p_ipt;
// declare other variables to be used
Real a;
Real del_d_L, del_vx_L, del_vy_L, del_vz_L, del_p_L;
Real del_d_R, del_vx_R, del_vy_R, del_vz_R, del_p_R;
Real del_d_C, del_vx_C, del_vy_C, del_vz_C, del_p_C;
Real del_d_G, del_vx_G, del_vy_G, del_vz_G, del_p_G;
Real del_a_0_L, del_a_1_L, del_a_2_L, del_a_3_L, del_a_4_L;
Real del_a_0_R, del_a_1_R, del_a_2_R, del_a_3_R, del_a_4_R;
Real del_a_0_C, del_a_1_C, del_a_2_C, del_a_3_C, del_a_4_C;
Real del_a_0_G, del_a_1_G, del_a_2_G, del_a_3_G, del_a_4_G;
Real del_a_0_m, del_a_1_m, del_a_2_m, del_a_3_m, del_a_4_m;
Real lim_slope_a, lim_slope_b;
Real del_d_m_imo, del_vx_m_imo, del_vy_m_imo, del_vz_m_imo, del_p_m_imo;
Real del_d_m_i, del_vx_m_i, del_vy_m_i, del_vz_m_i, del_p_m_i;
Real del_d_m_ipo, del_vx_m_ipo, del_vy_m_ipo, del_vz_m_ipo, del_p_m_ipo;
Real d_L, vx_L, vy_L, vz_L, p_L;
Real d_R, vx_R, vy_R, vz_R, p_R;
#ifdef CTU
Real dtodx = dt/dx;
Real d_6, vx_6, vy_6, vz_6, p_6;
Real lambda_m, lambda_0, lambda_p;
Real lambda_max, lambda_min;
Real A, B, C, D;
Real chi_1, chi_2, chi_3, chi_4, chi_5;
Real sum_1, sum_2, sum_3, sum_4, sum_5;
#endif //CTU
#ifdef DE
Real ge_i, ge_imo, ge_ipo, ge_imt, ge_ipt;
Real del_ge_L, del_ge_R, del_ge_C, del_ge_G;
Real del_ge_m_imo, del_ge_m_i, del_ge_m_ipo;
Real ge_L, ge_R;
#ifdef CTU
Real chi_ge, sum_ge, ge_6;
#endif
#endif
#ifdef SCALAR
Real scalar_i[NSCALARS], scalar_imo[NSCALARS], scalar_ipo[NSCALARS], scalar_imt[NSCALARS], scalar_ipt[NSCALARS];
Real del_scalar_L[NSCALARS], del_scalar_R[NSCALARS], del_scalar_C[NSCALARS], del_scalar_G[NSCALARS];
Real del_scalar_m_imo[NSCALARS], del_scalar_m_i[NSCALARS], del_scalar_m_ipo[NSCALARS];
Real scalar_L[NSCALARS], scalar_R[NSCALARS];
#ifdef CTU
Real chi_scalar[NSCALARS], sum_scalar[NSCALARS], scalar_6[NSCALARS];
#endif
#endif
// get a thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
int tid = threadIdx.x + blockId * blockDim.x;
int id;
int zid = tid / (nx*ny);
int yid = (tid - zid*nx*ny) / nx;
int xid = tid - zid*nx*ny - yid*nx;
int xs, xe, ys, ye, zs, ze;
if (dir == 0) {
xs = 2; xe = nx-3;
ys = 0; ye = ny;
zs = 0; ze = nz;
}
if (dir == 1) {
xs = 0; xe = nx;
ys = 2; ye = ny-3;
zs = 0; ze = nz;
}
if (dir == 2) {
xs = 0; xe = nx;
ys = 0; ye = ny;
zs = 2; ze = nz-3;
}
if (xid >= xs && xid < xe && yid >= ys && yid < ye && zid >= zs && zid < ze)
{
// load the 5-cell stencil into registers
// cell i
id = xid + yid*nx + zid*nx*ny;
d_i = dev_conserved[ id];
vx_i = dev_conserved[o1*n_cells + id] / d_i;
vy_i = dev_conserved[o2*n_cells + id] / d_i;
vz_i = dev_conserved[o3*n_cells + id] / d_i;
p_i = (dev_conserved[4*n_cells + id] - 0.5*d_i*(vx_i*vx_i + vy_i*vy_i + vz_i*vz_i)) * (gamma - 1.0);
p_i = fmax(p_i, (Real) TINY_NUMBER);
#ifdef DE
ge_i = dev_conserved[(n_fields-1)*n_cells + id] / d_i;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_i[i] = dev_conserved[(5+i)*n_cells + id] / d_i;
}
#endif
// cell i-1
if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny;
d_imo = dev_conserved[ id];
vx_imo = dev_conserved[o1*n_cells + id] / d_imo;
vy_imo = dev_conserved[o2*n_cells + id] / d_imo;
vz_imo = dev_conserved[o3*n_cells + id] / d_imo;
p_imo = (dev_conserved[4*n_cells + id] - 0.5*d_imo*(vx_imo*vx_imo + vy_imo*vy_imo + vz_imo*vz_imo)) * (gamma - 1.0);
p_imo = fmax(p_imo, (Real) TINY_NUMBER);
#ifdef DE
ge_imo = dev_conserved[(n_fields-1)*n_cells + id] / d_imo;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_imo[i] = dev_conserved[(5+i)*n_cells + id] / d_imo;
}
#endif
// cell i+1
if (dir == 0) id = xid+1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid+1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid+1)*nx*ny;
d_ipo = dev_conserved[ id];
vx_ipo = dev_conserved[o1*n_cells + id] / d_ipo;
vy_ipo = dev_conserved[o2*n_cells + id] / d_ipo;
vz_ipo = dev_conserved[o3*n_cells + id] / d_ipo;
p_ipo = (dev_conserved[4*n_cells + id] - 0.5*d_ipo*(vx_ipo*vx_ipo + vy_ipo*vy_ipo + vz_ipo*vz_ipo)) * (gamma - 1.0);
p_ipo = fmax(p_ipo, (Real) TINY_NUMBER);
#ifdef DE
ge_ipo = dev_conserved[(n_fields-1)*n_cells + id] / d_ipo;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_ipo[i] = dev_conserved[(5+i)*n_cells + id] / d_ipo;
}
#endif
// cell i-2
if (dir == 0) id = xid-2 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-2)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-2)*nx*ny;
d_imt = dev_conserved[ id];
vx_imt = dev_conserved[o1*n_cells + id] / d_imt;
vy_imt = dev_conserved[o2*n_cells + id] / d_imt;
vz_imt = dev_conserved[o3*n_cells + id] / d_imt;
p_imt = (dev_conserved[4*n_cells + id] - 0.5*d_imt*(vx_imt*vx_imt + vy_imt*vy_imt + vz_imt*vz_imt)) * (gamma - 1.0);
p_imt = fmax(p_imt, (Real) TINY_NUMBER);
#ifdef DE
ge_imt = dev_conserved[(n_fields-1)*n_cells + id] / d_imt;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_imt[i] = dev_conserved[(5+i)*n_cells + id] / d_imt;
}
#endif
// cell i+2
if (dir == 0) id = xid+2 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid+2)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid+2)*nx*ny;
d_ipt = dev_conserved[ id];
vx_ipt = dev_conserved[o1*n_cells + id] / d_ipt;
vy_ipt = dev_conserved[o2*n_cells + id] / d_ipt;
vz_ipt = dev_conserved[o3*n_cells + id] / d_ipt;
p_ipt = (dev_conserved[4*n_cells + id] - 0.5*d_ipt*(vx_ipt*vx_ipt + vy_ipt*vy_ipt + vz_ipt*vz_ipt)) * (gamma - 1.0);
p_ipt = fmax(p_ipt, (Real) TINY_NUMBER);
#ifdef DE
ge_ipt = dev_conserved[(n_fields-1)*n_cells + id] / d_ipt;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_ipt[i] = dev_conserved[(5+i)*n_cells + id] / d_ipt;
}
#endif
//printf("%d %d %d %f %f %f %f %f\n", xid, yid, zid, d_i, vx_i, vy_i, vz_i, p_i);
// Steps 2 - 5 are repeated for cell i-1, i, and i+1
// Step 2 - Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// Stone Eqn 36
// calculate the adiabatic sound speed in cell imo
a = sqrt(gamma*p_imo/d_imo);
// left
del_d_L = d_imo - d_imt;
del_vx_L = vx_imo - vx_imt;
del_vy_L = vy_imo - vy_imt;
del_vz_L = vz_imo - vz_imt;
del_p_L = p_imo - p_imt;
// right
del_d_R = d_i - d_imo;
del_vx_R = vx_i - vx_imo;
del_vy_R = vy_i - vy_imo;
del_vz_R = vz_i - vz_imo;
del_p_R = p_i - p_imo;
// centered
del_d_C = 0.5*(d_i - d_imt);
del_vx_C = 0.5*(vx_i - vx_imt);
del_vy_C = 0.5*(vy_i - vy_imt);
del_vz_C = 0.5*(vz_i - vz_imt);
del_p_C = 0.5*(p_i - p_imt);
// Van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_imo - ge_imt;
del_ge_R = ge_i - ge_imo;
del_ge_C = 0.5*(ge_i - ge_imt);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_imo[i] - scalar_imt[i];
del_scalar_R[i] = scalar_i[i] - scalar_imo[i];
del_scalar_C[i] = 0.5*(scalar_i[i] - scalar_imt[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Step 3 - Project the left, right, centered and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -0.5*d_imo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_1_L = del_d_L - del_p_L/(a*a);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = 0.5*d_imo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_0_R = -0.5*d_imo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_1_R = del_d_R - del_p_R/(a*a);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = 0.5*d_imo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_0_C = -0.5*d_imo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_1_C = del_d_C - del_p_C/(a*a);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = 0.5*d_imo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_0_G = -0.5*d_imo*del_vx_G/a + 0.5*del_p_G/(a*a);
del_a_1_G = del_d_G - del_p_G/(a*a);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = 0.5*d_imo*del_vx_G/a + 0.5*del_p_G/(a*a);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Stone Eqn 38
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_imo = sgn_CUDA(del_ge_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_ge_m_imo = 0.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_imo[i] = sgn_CUDA(del_scalar_C[i]) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_scalar_m_imo[i] = 0.0;
}
#endif
// Step 5 - Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_imo = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_imo = -a*del_a_0_m/d_imo + a*del_a_4_m/d_imo;
del_vy_m_imo = del_a_2_m;
del_vz_m_imo = del_a_3_m;
del_p_m_imo = a*a*del_a_0_m + a*a*del_a_4_m;
// Step 2 - Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// Stone Eqn 36
// calculate the adiabatic sound speed in cell i
a = sqrt(gamma*p_i/d_i);
// left
del_d_L = d_i - d_imo;
del_vx_L = vx_i - vx_imo;
del_vy_L = vy_i - vy_imo;
del_vz_L = vz_i - vz_imo;
del_p_L = p_i - p_imo;
// right
del_d_R = d_ipo - d_i;
del_vx_R = vx_ipo - vx_i;
del_vy_R = vy_ipo - vy_i;
del_vz_R = vz_ipo - vz_i;
del_p_R = p_ipo - p_i;
// centered
del_d_C = 0.5*(d_ipo - d_imo);
del_vx_C = 0.5*(vx_ipo - vx_imo);
del_vy_C = 0.5*(vy_ipo - vy_imo);
del_vz_C = 0.5*(vz_ipo - vz_imo);
del_p_C = 0.5*(p_ipo - p_imo);
// van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_i - ge_imo;
del_ge_R = ge_ipo - ge_i;
del_ge_C = 0.5*(ge_ipo - ge_imo);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_i[i] - scalar_imo[i];
del_scalar_R[i] = scalar_ipo[i] - scalar_i[i];
del_scalar_C[i] = 0.5*(scalar_ipo[i] - scalar_imo[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Step 3 - Project the left, right, centered, and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -0.5*d_i*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_1_L = del_d_L - del_p_L/(a*a);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = 0.5*d_i*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_0_R = -0.5*d_i*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_1_R = del_d_R - del_p_R/(a*a);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = 0.5*d_i*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_0_C = -0.5*d_i*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_1_C = del_d_C - del_p_C/(a*a);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = 0.5*d_i*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_0_G = -0.5*d_i*del_vx_G/a + 0.5*del_p_G/(a*a);
del_a_1_G = del_d_G - del_p_G/(a*a);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = 0.5*d_i*del_vx_G/a + 0.5*del_p_G/(a*a);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Stone Eqn 38
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_i = sgn_CUDA(del_ge_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_ge_m_i = 0.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_i[i] = sgn_CUDA(del_scalar_C[i]) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_scalar_m_i[i] = 0.0;
}
#endif
// Step 5 - Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_i = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_i = -a*del_a_0_m/d_i + a*del_a_4_m/d_i;
del_vy_m_i = del_a_2_m;
del_vz_m_i = del_a_3_m;
del_p_m_i = a*a*del_a_0_m + a*a*del_a_4_m;
// Step 2 - Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// Stone Eqn 36
// calculate the adiabatic sound speed in cell ipo
a = sqrt(gamma*p_ipo/d_ipo);
// left
del_d_L = d_ipo - d_i;
del_vx_L = vx_ipo - vx_i;
del_vy_L = vy_ipo - vy_i;
del_vz_L = vz_ipo - vz_i;
del_p_L = p_ipo - p_i;
// right
del_d_R = d_ipt - d_ipo;
del_vx_R = vx_ipt - vx_ipo;
del_vy_R = vy_ipt - vy_ipo;
del_vz_R = vz_ipt - vz_ipo;
del_p_R = p_ipt - p_ipo;
// centered
del_d_C = 0.5*(d_ipt - d_i);
del_vx_C = 0.5*(vx_ipt- vx_i);
del_vy_C = 0.5*(vy_ipt - vy_i);
del_vz_C = 0.5*(vz_ipt - vz_i);
del_p_C = 0.5*(p_ipt - p_i);
// van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_ipo - ge_i;
del_ge_R = ge_ipt - ge_ipo;
del_ge_C = 0.5*(ge_ipt- ge_i);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_ipo[i] - scalar_i[i];
del_scalar_R[i] = scalar_ipt[i] - scalar_ipo[i];
del_scalar_C[i] = 0.5*(scalar_ipt[i]- scalar_i[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Step 3 - Project the left, right, centered, and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -0.5*d_ipo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_1_L = del_d_L - del_p_L/(a*a);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = 0.5*d_ipo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_0_R = -0.5*d_ipo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_1_R = del_d_R - del_p_R/(a*a);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = 0.5*d_ipo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_0_C = -0.5*d_ipo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_1_C = del_d_C - del_p_C/(a*a);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = 0.5*d_ipo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_0_G = -0.5*d_ipo*del_vx_G/a + 0.5*del_p_G/(a*a);
del_a_1_G = del_d_G - del_p_G/(a*a);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = 0.5*d_ipo*del_vx_G/a + 0.5*del_p_G/(a*a);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Stone Eqn 38
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_ipo = sgn_CUDA(del_ge_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_ge_m_ipo = 0.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_ipo[i] = sgn_CUDA(del_scalar_C[i]) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_scalar_m_ipo[i] = 0.0;
}
#endif
// Step 5 - Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_ipo = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_ipo = -a*del_a_0_m / d_ipo + a* del_a_4_m / d_ipo;
del_vy_m_ipo = del_a_2_m;
del_vz_m_ipo = del_a_3_m;
del_p_m_ipo = a*a*del_a_0_m + a*a*del_a_4_m;
// Step 6 - Use parabolic interpolation to compute values at the left and right of each cell center
// Here, the subscripts L and R refer to the left and right side of the ith cell center
// Stone Eqn 46
d_L = 0.5*(d_i + d_imo) - (del_d_m_i - del_d_m_imo) / 6.0;
vx_L = 0.5*(vx_i + vx_imo) - (del_vx_m_i - del_vx_m_imo) / 6.0;
vy_L = 0.5*(vy_i + vy_imo) - (del_vy_m_i - del_vy_m_imo) / 6.0;
vz_L = 0.5*(vz_i + vz_imo) - (del_vz_m_i - del_vz_m_imo) / 6.0;
p_L = 0.5*(p_i + p_imo) - (del_p_m_i - del_p_m_imo) / 6.0;
d_R = 0.5*(d_ipo + d_i) - (del_d_m_ipo - del_d_m_i) / 6.0;
vx_R = 0.5*(vx_ipo + vx_i) - (del_vx_m_ipo - del_vx_m_i) / 6.0;
vy_R = 0.5*(vy_ipo + vy_i) - (del_vy_m_ipo - del_vy_m_i) / 6.0;
vz_R = 0.5*(vz_ipo + vz_i) - (del_vz_m_ipo - del_vz_m_i) / 6.0;
p_R = 0.5*(p_ipo + p_i) - (del_p_m_ipo - del_p_m_i) / 6.0;
#ifdef DE
ge_L = 0.5*(ge_i + ge_imo) - (del_ge_m_i - del_ge_m_imo) / 6.0;
ge_R = 0.5*(ge_ipo + ge_i) - (del_ge_m_ipo - del_ge_m_i) / 6.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_L[i] = 0.5*(scalar_i[i] + scalar_imo[i]) - (del_scalar_m_i[i] - del_scalar_m_imo[i]) / 6.0;
scalar_R[i] = 0.5*(scalar_ipo[i] + scalar_i[i]) - (del_scalar_m_ipo[i] - del_scalar_m_i[i]) / 6.0;
}
#endif
// Step 7 - Apply further monotonicity constraints to ensure the values on the left and right side
// of cell center lie between neighboring cell-centered values
// Stone Eqns 47 - 53
if ((d_R - d_i) *(d_i - d_L) <= 0) d_L = d_R = d_i;
if ((vx_R - vx_i)*(vx_i - vx_L) <= 0) vx_L = vx_R = vx_i;
if ((vy_R - vy_i)*(vy_i - vy_L) <= 0) vy_L = vy_R = vy_i;
if ((vz_R - vz_i)*(vz_i - vz_L) <= 0) vz_L = vz_R = vz_i;
if ((p_R - p_i) *(p_i - p_L) <= 0) p_L = p_R = p_i;
if ( 6.0*(d_R - d_L) *(d_i - 0.5*(d_L + d_R)) > (d_R - d_L) *(d_R - d_L)) d_L = 3.0*d_i - 2.0*d_R;
if ( 6.0*(vx_R - vx_L)*(vx_i - 0.5*(vx_L + vx_R)) > (vx_R - vx_L)*(vx_R - vx_L)) vx_L = 3.0*vx_i - 2.0*vx_R;
if ( 6.0*(vy_R - vy_L)*(vy_i - 0.5*(vy_L + vy_R)) > (vy_R - vy_L)*(vy_R - vy_L)) vy_L = 3.0*vy_i - 2.0*vy_R;
if ( 6.0*(vz_R - vz_L)*(vz_i - 0.5*(vz_L + vz_R)) > (vz_R - vz_L)*(vz_R - vz_L)) vz_L = 3.0*vz_i - 2.0*vz_R;
if ( 6.0*(p_R - p_L) *(p_i - 0.5*(p_L + p_R)) > (p_R - p_L) *(p_R - p_L)) p_L = 3.0*p_i - 2.0*p_R;
if ( 6.0*(d_R - d_L) *(d_i - 0.5*(d_L + d_R)) < -(d_R - d_L) *(d_R - d_L)) d_R = 3.0*d_i - 2.0*d_L;
if ( 6.0*(vx_R - vx_L)*(vx_i - 0.5*(vx_L + vx_R)) < -(vx_R - vx_L)*(vx_R - vx_L)) vx_R = 3.0*vx_i - 2.0*vx_L;
if ( 6.0*(vy_R - vy_L)*(vy_i - 0.5*(vy_L + vy_R)) < -(vy_R - vy_L)*(vy_R - vy_L)) vy_R = 3.0*vy_i - 2.0*vy_L;
if ( 6.0*(vz_R - vz_L)*(vz_i - 0.5*(vz_L + vz_R)) < -(vz_R - vz_L)*(vz_R - vz_L)) vz_R = 3.0*vz_i - 2.0*vz_L;
if ( 6.0*(p_R - p_L) *(p_i - 0.5*(p_L + p_R)) < -(p_R - p_L) *(p_R - p_L)) p_R = 3.0*p_i - 2.0*p_L;
d_L = fmax( fmin(d_i, d_imo), d_L );
d_L = fmin( fmax(d_i, d_imo), d_L );
d_R = fmax( fmin(d_i, d_ipo), d_R );
d_R = fmin( fmax(d_i, d_ipo), d_R );
vx_L = fmax( fmin(vx_i, vx_imo), vx_L );
vx_L = fmin( fmax(vx_i, vx_imo), vx_L );
vx_R = fmax( fmin(vx_i, vx_ipo), vx_R );
vx_R = fmin( fmax(vx_i, vx_ipo), vx_R );
vy_L = fmax( fmin(vy_i, vy_imo), vy_L );
vy_L = fmin( fmax(vy_i, vy_imo), vy_L );
vy_R = fmax( fmin(vy_i, vy_ipo), vy_R );
vy_R = fmin( fmax(vy_i, vy_ipo), vy_R );
vz_L = fmax( fmin(vz_i, vz_imo), vz_L );
vz_L = fmin( fmax(vz_i, vz_imo), vz_L );
vz_R = fmax( fmin(vz_i, vz_ipo), vz_R );
vz_R = fmin( fmax(vz_i, vz_ipo), vz_R );
p_L = fmax( fmin(p_i, p_imo), p_L );
p_L = fmin( fmax(p_i, p_imo), p_L );
p_R = fmax( fmin(p_i, p_ipo), p_R );
p_R = fmin( fmax(p_i, p_ipo), p_R );
#ifdef DE
if ((ge_R - ge_i) *(ge_i - ge_L) <= 0) ge_L = ge_R = ge_i;
if ( 6.0*(ge_R - ge_L) *(ge_i - 0.5*(ge_L + ge_R)) > (ge_R - ge_L) *(ge_R - ge_L)) ge_L = 3.0*ge_i - 2.0*ge_R;
if ( 6.0*(ge_R - ge_L) *(ge_i - 0.5*(ge_L + ge_R)) < -(ge_R - ge_L) *(ge_R - ge_L)) ge_R = 3.0*ge_i - 2.0*ge_L;
ge_L = fmax( fmin(ge_i, ge_imo), ge_L );
ge_L = fmin( fmax(ge_i, ge_imo), ge_L );
ge_R = fmax( fmin(ge_i, ge_ipo), ge_R );
ge_R = fmin( fmax(ge_i, ge_ipo), ge_R );
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if ((scalar_R[i] - scalar_i[i]) *(scalar_i[i] - scalar_L[i]) <= 0) scalar_L[i] = scalar_R[i] = scalar_i[i];
if ( 6.0*(scalar_R[i] - scalar_L[i]) *(scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i])) > (scalar_R[i] - scalar_L[i]) *(scalar_R[i] - scalar_L[i])) scalar_L[i] = 3.0*scalar_i[i] - 2.0*scalar_R[i];
if ( 6.0*(scalar_R[i] - scalar_L[i]) *(scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i])) < -(scalar_R[i] - scalar_L[i]) *(scalar_R[i] - scalar_L[i])) scalar_R[i] = 3.0*scalar_i[i] - 2.0*scalar_L[i];
scalar_L[i] = fmax( fmin(scalar_i[i], scalar_imo[i]), scalar_L[i] );
scalar_L[i] = fmin( fmax(scalar_i[i], scalar_imo[i]), scalar_L[i] );
scalar_R[i] = fmax( fmin(scalar_i[i], scalar_ipo[i]), scalar_R[i] );
scalar_R[i] = fmin( fmax(scalar_i[i], scalar_ipo[i]), scalar_R[i] );
}
#endif
#ifdef CTU
// Step 8 - Compute the coefficients for the monotonized parabolic interpolation function
// Stone Eqn 54
del_d_m_i = d_R - d_L;
del_vx_m_i = vx_R - vx_L;
del_vy_m_i = vy_R - vy_L;
del_vz_m_i = vz_R - vz_L;
del_p_m_i = p_R - p_L;
d_6 = 6.0*(d_i - 0.5*(d_L + d_R));
vx_6 = 6.0*(vx_i - 0.5*(vx_L + vx_R));
vy_6 = 6.0*(vy_i - 0.5*(vy_L + vy_R));
vz_6 = 6.0*(vz_i - 0.5*(vz_L + vz_R));
p_6 = 6.0*(p_i - 0.5*(p_L + p_R));
#ifdef DE
del_ge_m_i = ge_R - ge_L;
ge_6 = 6.0*(ge_i - 0.5*(ge_L + ge_R));
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_m_i[i] = scalar_R[i] - scalar_L[i];
scalar_6[i] = 6.0*(scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i]));
}
#endif
// Compute the eigenvalues of the linearized equations in the
// primative variables using the cell-centered primative variables
// recalculate the adiabatic sound speed in cell i
a = sqrt(gamma*p_i/d_i);
lambda_m = vx_i-a;
lambda_0 = vx_i;
lambda_p = vx_i+a;
// Step 9 - Compute the left and right interface values using monotonized parabolic interpolation
// Stone Eqns 55 & 56
// largest eigenvalue
lambda_max = fmax(lambda_p, (Real) 0);
// smallest eigenvalue
lambda_min = fmin(lambda_m, (Real) 0);
// left interface value, i+1/2
d_R = d_R - lambda_max * (0.5*dtodx)*(del_d_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*d_6);
vx_R = vx_R - lambda_max * (0.5*dtodx)*(del_vx_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*vx_6);
vy_R = vy_R - lambda_max * (0.5*dtodx)*(del_vy_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*vy_6);
vz_R = vz_R - lambda_max * (0.5*dtodx)*(del_vz_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*vz_6);
p_R = p_R - lambda_max * (0.5*dtodx)*(del_p_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*p_6);
// right interface value, i-1/2
d_L = d_L - lambda_min * (0.5*dtodx)*(del_d_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*d_6);
vx_L = vx_L - lambda_min * (0.5*dtodx)*(del_vx_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*vx_6);
vy_L = vy_L - lambda_min * (0.5*dtodx)*(del_vy_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*vy_6);
vz_L = vz_L - lambda_min * (0.5*dtodx)*(del_vz_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*vz_6);
p_L = p_L - lambda_min * (0.5*dtodx)*(del_p_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*p_6);
#ifdef DE
ge_R = ge_R - lambda_max * (0.5*dtodx)*(del_ge_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*ge_6);
ge_L = ge_L - lambda_min * (0.5*dtodx)*(del_ge_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*ge_6);
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R[i] = scalar_R[i] - lambda_max * (0.5*dtodx)*(del_scalar_m_i[i] - (1.0 - (2.0/3.0)*lambda_max*dtodx)*scalar_6[i]);
scalar_L[i] = scalar_L[i] - lambda_min * (0.5*dtodx)*(del_scalar_m_i[i] + (1.0 + (2.0/3.0)*lambda_min*dtodx)*scalar_6[i]);
}
#endif
// Step 10 - Perform the characteristic tracing
// Stone Eqns 57 - 60
// left-hand interface value, i+1/2
sum_1 = 0;
sum_2 = 0;
sum_3 = 0;
sum_4 = 0;
sum_5 = 0;
#ifdef DE
sum_ge = 0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] = 0;
}
#endif
if (lambda_m >= 0)
{
A = (0.5*dtodx) * (lambda_p - lambda_m);
B = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_p*lambda_p - lambda_m*lambda_m);
chi_1 = A*(del_d_m_i - d_6) + B*d_6;
chi_2 = A*(del_vx_m_i - vx_6) + B*vx_6;
chi_3 = A*(del_vy_m_i - vy_6) + B*vy_6;
chi_4 = A*(del_vz_m_i - vz_6) + B*vz_6;
chi_5 = A*(del_p_m_i - p_6) + B*p_6;
sum_1 += -0.5*(d_i*chi_2/a - chi_5/(a*a));
sum_2 += 0.5*(chi_2 - chi_5/(a*d_i));
sum_5 += -0.5*(d_i*chi_2*a - chi_5);
}
if (lambda_0 >= 0)
{
A = (0.5*dtodx) * (lambda_p - lambda_0);
B = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_p*lambda_p - lambda_0*lambda_0);
chi_1 = A*(del_d_m_i - d_6) + B*d_6;
chi_2 = A*(del_vx_m_i - vx_6) + B*vx_6;
chi_3 = A*(del_vy_m_i - vy_6) + B*vy_6;
chi_4 = A*(del_vz_m_i - vz_6) + B*vz_6;
chi_5 = A*(del_p_m_i - p_6) + B*p_6;
#ifdef DE
chi_ge = A*(del_ge_m_i - ge_6) + B*ge_6;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
chi_scalar[i] = A*(del_scalar_m_i[i] - scalar_6[i]) + B*scalar_6[i];
}
#endif
sum_1 += chi_1 - chi_5/(a*a);
sum_3 += chi_3;
sum_4 += chi_4;
#ifdef DE
sum_ge += chi_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] += chi_scalar[i];
}
#endif
}
if (lambda_p >= 0)
{
A = (0.5*dtodx) * (lambda_p - lambda_p);
B = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_p*lambda_p - lambda_p*lambda_p);
chi_1 = A*(del_d_m_i - d_6) + B*d_6;
chi_2 = A*(del_vx_m_i - vx_6) + B*vx_6;
chi_3 = A*(del_vy_m_i - vy_6) + B*vy_6;
chi_4 = A*(del_vz_m_i - vz_6) + B*vz_6;
chi_5 = A*(del_p_m_i - p_6) + B*p_6;
sum_1 += 0.5*(d_i*chi_2/a + chi_5/(a*a));
sum_2 += 0.5*(chi_2 + chi_5/(a*d_i));
sum_5 += 0.5*(d_i*chi_2*a + chi_5);
}
// add the corrections to the initial guesses for the interface values
d_R += sum_1;
vx_R += sum_2;
vy_R += sum_3;
vz_R += sum_4;
p_R += sum_5;
#ifdef DE
ge_R += sum_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R[i] += sum_scalar[i];
}
#endif
// right-hand interface value, i-1/2
sum_1 = 0;
sum_2 = 0;
sum_3 = 0;
sum_4 = 0;
sum_5 = 0;
#ifdef DE
sum_ge = 0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] = 0;
}
#endif
if (lambda_m <= 0)
{
C = (0.5*dtodx) * (lambda_m - lambda_m);
D = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_m*lambda_m - lambda_m*lambda_m);
chi_1 = C*(del_d_m_i + d_6) + D*d_6;
chi_2 = C*(del_vx_m_i + vx_6) + D*vx_6;
chi_3 = C*(del_vy_m_i + vy_6) + D*vy_6;
chi_4 = C*(del_vz_m_i + vz_6) + D*vz_6;
chi_5 = C*(del_p_m_i + p_6) + D*p_6;
sum_1 += -0.5*(d_i*chi_2/a - chi_5/(a*a));
sum_2 += 0.5*(chi_2 - chi_5/(a*d_i));
sum_5 += -0.5*(d_i*chi_2*a - chi_5);
}
if (lambda_0 <= 0)
{
C = (0.5*dtodx) * (lambda_m - lambda_0);
D = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_m*lambda_m - lambda_0*lambda_0);
chi_1 = C*(del_d_m_i + d_6) + D*d_6;
chi_2 = C*(del_vx_m_i + vx_6) + D*vx_6;
chi_3 = C*(del_vy_m_i + vy_6) + D*vy_6;
chi_4 = C*(del_vz_m_i + vz_6) + D*vz_6;
chi_5 = C*(del_p_m_i + p_6) + D*p_6;
#ifdef DE
chi_ge = C*(del_ge_m_i + ge_6) + D*ge_6;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
chi_scalar[i] = C*(del_scalar_m_i[i] + scalar_6[i]) + D*scalar_6[i];
}
#endif
sum_1 += chi_1 - chi_5/(a*a);
sum_3 += chi_3;
sum_4 += chi_4;
#ifdef DE
sum_ge += chi_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] += chi_scalar[i];
}
#endif
}
if (lambda_p <= 0)
{
C = (0.5*dtodx) * (lambda_m - lambda_p);
D = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_m*lambda_m - lambda_p*lambda_p);
chi_1 = C*(del_d_m_i + d_6) + D*d_6;
chi_2 = C*(del_vx_m_i + vx_6) + D*vx_6;
chi_3 = C*(del_vy_m_i + vy_6) + D*vy_6;
chi_4 = C*(del_vz_m_i + vz_6) + D*vz_6;
chi_5 = C*(del_p_m_i + p_6) + D*p_6;
sum_1 += 0.5*(d_i*chi_2/a + chi_5/(a*a));
sum_2 += 0.5*(chi_2 + chi_5/(a*d_i));
sum_5 += 0.5*(d_i*chi_2*a + chi_5);
}
// add the corrections
d_L += sum_1;
vx_L += sum_2;
vy_L += sum_3;
vz_L += sum_4;
p_L += sum_5;
#ifdef DE
ge_L += sum_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_L[i] += sum_scalar[i];
}
#endif
#endif //CTU
// enforce minimum values
d_L = fmax(d_L, (Real) TINY_NUMBER);
d_R = fmax(d_R, (Real) TINY_NUMBER);
p_L = fmax(p_L, (Real) TINY_NUMBER);
p_R = fmax(p_R, (Real) TINY_NUMBER);
// Step 11 - Send final values back from kernel
// bounds_R refers to the right side of the i-1/2 interface
if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny;
dev_bounds_R[ id] = d_L;
dev_bounds_R[o1*n_cells + id] = d_L*vx_L;
dev_bounds_R[o2*n_cells + id] = d_L*vy_L;
dev_bounds_R[o3*n_cells + id] = d_L*vz_L;
dev_bounds_R[4*n_cells + id] = p_L/(gamma-1.0) + 0.5*d_L*(vx_L*vx_L + vy_L*vy_L + vz_L*vz_L);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_bounds_R[(5+i)*n_cells + id] = d_L*scalar_L[i];
}
#endif
#ifdef DE
dev_bounds_R[(n_fields-1)*n_cells + id] = d_L*ge_L;
#endif
// bounds_L refers to the left side of the i+1/2 interface
id = xid + yid*nx + zid*nx*ny;
dev_bounds_L[ id] = d_R;
dev_bounds_L[o1*n_cells + id] = d_R*vx_R;
dev_bounds_L[o2*n_cells + id] = d_R*vy_R;
dev_bounds_L[o3*n_cells + id] = d_R*vz_R;
dev_bounds_L[4*n_cells + id] = p_R/(gamma-1.0) + 0.5*d_R*(vx_R*vx_R + vy_R*vy_R + vz_R*vz_R);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_bounds_L[(5+i)*n_cells + id] = d_R*scalar_R[i];
}
#endif
#ifdef DE
dev_bounds_L[(n_fields-1)*n_cells + id] = d_R*ge_R;
#endif
}
}
#endif //PPMC
#endif //CUDA
| 149d2e5741bce21f5b86a84c97fc750fc4b93003.cu | /*! \file ppmc_cuda.cu
* \brief Functions definitions for the ppm kernels, using characteristic tracing.
Written following Stone et al. 2008. */
#ifdef CUDA
#ifdef PPMC
#include<cuda.h>
#include<math.h>
#include"global.h"
#include"global_cuda.h"
#include"ppmc_cuda.h"
/*! \fn void PPMC_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir, int n_fields)
* \brief When passed a stencil of conserved variables, returns the left and right
boundary values for the interface calculated using ppm. */
__global__ void PPMC_cuda(Real *dev_conserved, Real *dev_bounds_L, Real *dev_bounds_R, int nx, int ny, int nz, int n_ghost, Real dx, Real dt, Real gamma, int dir, int n_fields)
{
int n_cells = nx*ny*nz;
int o1, o2, o3;
if (dir == 0 ) {
o1 = 1; o2 = 2; o3 = 3;
}
if (dir == 1 ) {
o1 = 2; o2 = 3; o3 = 1;
}
if (dir == 2 ) {
o1 = 3; o2 = 1; o3 = 2;
}
// declare primative variables for each stencil
// these will be placed into registers for each thread
Real d_i, vx_i, vy_i, vz_i, p_i;
Real d_imo, vx_imo, vy_imo, vz_imo, p_imo;
Real d_ipo, vx_ipo, vy_ipo, vz_ipo, p_ipo;
Real d_imt, vx_imt, vy_imt, vz_imt, p_imt;
Real d_ipt, vx_ipt, vy_ipt, vz_ipt, p_ipt;
// declare other variables to be used
Real a;
Real del_d_L, del_vx_L, del_vy_L, del_vz_L, del_p_L;
Real del_d_R, del_vx_R, del_vy_R, del_vz_R, del_p_R;
Real del_d_C, del_vx_C, del_vy_C, del_vz_C, del_p_C;
Real del_d_G, del_vx_G, del_vy_G, del_vz_G, del_p_G;
Real del_a_0_L, del_a_1_L, del_a_2_L, del_a_3_L, del_a_4_L;
Real del_a_0_R, del_a_1_R, del_a_2_R, del_a_3_R, del_a_4_R;
Real del_a_0_C, del_a_1_C, del_a_2_C, del_a_3_C, del_a_4_C;
Real del_a_0_G, del_a_1_G, del_a_2_G, del_a_3_G, del_a_4_G;
Real del_a_0_m, del_a_1_m, del_a_2_m, del_a_3_m, del_a_4_m;
Real lim_slope_a, lim_slope_b;
Real del_d_m_imo, del_vx_m_imo, del_vy_m_imo, del_vz_m_imo, del_p_m_imo;
Real del_d_m_i, del_vx_m_i, del_vy_m_i, del_vz_m_i, del_p_m_i;
Real del_d_m_ipo, del_vx_m_ipo, del_vy_m_ipo, del_vz_m_ipo, del_p_m_ipo;
Real d_L, vx_L, vy_L, vz_L, p_L;
Real d_R, vx_R, vy_R, vz_R, p_R;
#ifdef CTU
Real dtodx = dt/dx;
Real d_6, vx_6, vy_6, vz_6, p_6;
Real lambda_m, lambda_0, lambda_p;
Real lambda_max, lambda_min;
Real A, B, C, D;
Real chi_1, chi_2, chi_3, chi_4, chi_5;
Real sum_1, sum_2, sum_3, sum_4, sum_5;
#endif //CTU
#ifdef DE
Real ge_i, ge_imo, ge_ipo, ge_imt, ge_ipt;
Real del_ge_L, del_ge_R, del_ge_C, del_ge_G;
Real del_ge_m_imo, del_ge_m_i, del_ge_m_ipo;
Real ge_L, ge_R;
#ifdef CTU
Real chi_ge, sum_ge, ge_6;
#endif
#endif
#ifdef SCALAR
Real scalar_i[NSCALARS], scalar_imo[NSCALARS], scalar_ipo[NSCALARS], scalar_imt[NSCALARS], scalar_ipt[NSCALARS];
Real del_scalar_L[NSCALARS], del_scalar_R[NSCALARS], del_scalar_C[NSCALARS], del_scalar_G[NSCALARS];
Real del_scalar_m_imo[NSCALARS], del_scalar_m_i[NSCALARS], del_scalar_m_ipo[NSCALARS];
Real scalar_L[NSCALARS], scalar_R[NSCALARS];
#ifdef CTU
Real chi_scalar[NSCALARS], sum_scalar[NSCALARS], scalar_6[NSCALARS];
#endif
#endif
// get a thread ID
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
int tid = threadIdx.x + blockId * blockDim.x;
int id;
int zid = tid / (nx*ny);
int yid = (tid - zid*nx*ny) / nx;
int xid = tid - zid*nx*ny - yid*nx;
int xs, xe, ys, ye, zs, ze;
if (dir == 0) {
xs = 2; xe = nx-3;
ys = 0; ye = ny;
zs = 0; ze = nz;
}
if (dir == 1) {
xs = 0; xe = nx;
ys = 2; ye = ny-3;
zs = 0; ze = nz;
}
if (dir == 2) {
xs = 0; xe = nx;
ys = 0; ye = ny;
zs = 2; ze = nz-3;
}
if (xid >= xs && xid < xe && yid >= ys && yid < ye && zid >= zs && zid < ze)
{
// load the 5-cell stencil into registers
// cell i
id = xid + yid*nx + zid*nx*ny;
d_i = dev_conserved[ id];
vx_i = dev_conserved[o1*n_cells + id] / d_i;
vy_i = dev_conserved[o2*n_cells + id] / d_i;
vz_i = dev_conserved[o3*n_cells + id] / d_i;
p_i = (dev_conserved[4*n_cells + id] - 0.5*d_i*(vx_i*vx_i + vy_i*vy_i + vz_i*vz_i)) * (gamma - 1.0);
p_i = fmax(p_i, (Real) TINY_NUMBER);
#ifdef DE
ge_i = dev_conserved[(n_fields-1)*n_cells + id] / d_i;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_i[i] = dev_conserved[(5+i)*n_cells + id] / d_i;
}
#endif
// cell i-1
if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny;
d_imo = dev_conserved[ id];
vx_imo = dev_conserved[o1*n_cells + id] / d_imo;
vy_imo = dev_conserved[o2*n_cells + id] / d_imo;
vz_imo = dev_conserved[o3*n_cells + id] / d_imo;
p_imo = (dev_conserved[4*n_cells + id] - 0.5*d_imo*(vx_imo*vx_imo + vy_imo*vy_imo + vz_imo*vz_imo)) * (gamma - 1.0);
p_imo = fmax(p_imo, (Real) TINY_NUMBER);
#ifdef DE
ge_imo = dev_conserved[(n_fields-1)*n_cells + id] / d_imo;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_imo[i] = dev_conserved[(5+i)*n_cells + id] / d_imo;
}
#endif
// cell i+1
if (dir == 0) id = xid+1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid+1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid+1)*nx*ny;
d_ipo = dev_conserved[ id];
vx_ipo = dev_conserved[o1*n_cells + id] / d_ipo;
vy_ipo = dev_conserved[o2*n_cells + id] / d_ipo;
vz_ipo = dev_conserved[o3*n_cells + id] / d_ipo;
p_ipo = (dev_conserved[4*n_cells + id] - 0.5*d_ipo*(vx_ipo*vx_ipo + vy_ipo*vy_ipo + vz_ipo*vz_ipo)) * (gamma - 1.0);
p_ipo = fmax(p_ipo, (Real) TINY_NUMBER);
#ifdef DE
ge_ipo = dev_conserved[(n_fields-1)*n_cells + id] / d_ipo;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_ipo[i] = dev_conserved[(5+i)*n_cells + id] / d_ipo;
}
#endif
// cell i-2
if (dir == 0) id = xid-2 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-2)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-2)*nx*ny;
d_imt = dev_conserved[ id];
vx_imt = dev_conserved[o1*n_cells + id] / d_imt;
vy_imt = dev_conserved[o2*n_cells + id] / d_imt;
vz_imt = dev_conserved[o3*n_cells + id] / d_imt;
p_imt = (dev_conserved[4*n_cells + id] - 0.5*d_imt*(vx_imt*vx_imt + vy_imt*vy_imt + vz_imt*vz_imt)) * (gamma - 1.0);
p_imt = fmax(p_imt, (Real) TINY_NUMBER);
#ifdef DE
ge_imt = dev_conserved[(n_fields-1)*n_cells + id] / d_imt;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_imt[i] = dev_conserved[(5+i)*n_cells + id] / d_imt;
}
#endif
// cell i+2
if (dir == 0) id = xid+2 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid+2)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid+2)*nx*ny;
d_ipt = dev_conserved[ id];
vx_ipt = dev_conserved[o1*n_cells + id] / d_ipt;
vy_ipt = dev_conserved[o2*n_cells + id] / d_ipt;
vz_ipt = dev_conserved[o3*n_cells + id] / d_ipt;
p_ipt = (dev_conserved[4*n_cells + id] - 0.5*d_ipt*(vx_ipt*vx_ipt + vy_ipt*vy_ipt + vz_ipt*vz_ipt)) * (gamma - 1.0);
p_ipt = fmax(p_ipt, (Real) TINY_NUMBER);
#ifdef DE
ge_ipt = dev_conserved[(n_fields-1)*n_cells + id] / d_ipt;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_ipt[i] = dev_conserved[(5+i)*n_cells + id] / d_ipt;
}
#endif
//printf("%d %d %d %f %f %f %f %f\n", xid, yid, zid, d_i, vx_i, vy_i, vz_i, p_i);
// Steps 2 - 5 are repeated for cell i-1, i, and i+1
// Step 2 - Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// Stone Eqn 36
// calculate the adiabatic sound speed in cell imo
a = sqrt(gamma*p_imo/d_imo);
// left
del_d_L = d_imo - d_imt;
del_vx_L = vx_imo - vx_imt;
del_vy_L = vy_imo - vy_imt;
del_vz_L = vz_imo - vz_imt;
del_p_L = p_imo - p_imt;
// right
del_d_R = d_i - d_imo;
del_vx_R = vx_i - vx_imo;
del_vy_R = vy_i - vy_imo;
del_vz_R = vz_i - vz_imo;
del_p_R = p_i - p_imo;
// centered
del_d_C = 0.5*(d_i - d_imt);
del_vx_C = 0.5*(vx_i - vx_imt);
del_vy_C = 0.5*(vy_i - vy_imt);
del_vz_C = 0.5*(vz_i - vz_imt);
del_p_C = 0.5*(p_i - p_imt);
// Van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_imo - ge_imt;
del_ge_R = ge_i - ge_imo;
del_ge_C = 0.5*(ge_i - ge_imt);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_imo[i] - scalar_imt[i];
del_scalar_R[i] = scalar_i[i] - scalar_imo[i];
del_scalar_C[i] = 0.5*(scalar_i[i] - scalar_imt[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Step 3 - Project the left, right, centered and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -0.5*d_imo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_1_L = del_d_L - del_p_L/(a*a);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = 0.5*d_imo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_0_R = -0.5*d_imo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_1_R = del_d_R - del_p_R/(a*a);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = 0.5*d_imo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_0_C = -0.5*d_imo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_1_C = del_d_C - del_p_C/(a*a);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = 0.5*d_imo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_0_G = -0.5*d_imo*del_vx_G/a + 0.5*del_p_G/(a*a);
del_a_1_G = del_d_G - del_p_G/(a*a);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = 0.5*d_imo*del_vx_G/a + 0.5*del_p_G/(a*a);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Stone Eqn 38
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_imo = sgn_CUDA(del_ge_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_ge_m_imo = 0.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_imo[i] = sgn_CUDA(del_scalar_C[i]) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_scalar_m_imo[i] = 0.0;
}
#endif
// Step 5 - Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_imo = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_imo = -a*del_a_0_m/d_imo + a*del_a_4_m/d_imo;
del_vy_m_imo = del_a_2_m;
del_vz_m_imo = del_a_3_m;
del_p_m_imo = a*a*del_a_0_m + a*a*del_a_4_m;
// Step 2 - Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// Stone Eqn 36
// calculate the adiabatic sound speed in cell i
a = sqrt(gamma*p_i/d_i);
// left
del_d_L = d_i - d_imo;
del_vx_L = vx_i - vx_imo;
del_vy_L = vy_i - vy_imo;
del_vz_L = vz_i - vz_imo;
del_p_L = p_i - p_imo;
// right
del_d_R = d_ipo - d_i;
del_vx_R = vx_ipo - vx_i;
del_vy_R = vy_ipo - vy_i;
del_vz_R = vz_ipo - vz_i;
del_p_R = p_ipo - p_i;
// centered
del_d_C = 0.5*(d_ipo - d_imo);
del_vx_C = 0.5*(vx_ipo - vx_imo);
del_vy_C = 0.5*(vy_ipo - vy_imo);
del_vz_C = 0.5*(vz_ipo - vz_imo);
del_p_C = 0.5*(p_ipo - p_imo);
// van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_i - ge_imo;
del_ge_R = ge_ipo - ge_i;
del_ge_C = 0.5*(ge_ipo - ge_imo);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_i[i] - scalar_imo[i];
del_scalar_R[i] = scalar_ipo[i] - scalar_i[i];
del_scalar_C[i] = 0.5*(scalar_ipo[i] - scalar_imo[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Step 3 - Project the left, right, centered, and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -0.5*d_i*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_1_L = del_d_L - del_p_L/(a*a);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = 0.5*d_i*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_0_R = -0.5*d_i*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_1_R = del_d_R - del_p_R/(a*a);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = 0.5*d_i*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_0_C = -0.5*d_i*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_1_C = del_d_C - del_p_C/(a*a);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = 0.5*d_i*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_0_G = -0.5*d_i*del_vx_G/a + 0.5*del_p_G/(a*a);
del_a_1_G = del_d_G - del_p_G/(a*a);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = 0.5*d_i*del_vx_G/a + 0.5*del_p_G/(a*a);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Stone Eqn 38
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_i = sgn_CUDA(del_ge_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_ge_m_i = 0.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_i[i] = sgn_CUDA(del_scalar_C[i]) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_scalar_m_i[i] = 0.0;
}
#endif
// Step 5 - Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_i = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_i = -a*del_a_0_m/d_i + a*del_a_4_m/d_i;
del_vy_m_i = del_a_2_m;
del_vz_m_i = del_a_3_m;
del_p_m_i = a*a*del_a_0_m + a*a*del_a_4_m;
// Step 2 - Compute the left, right, centered, and van Leer differences of the primative variables
// Note that here L and R refer to locations relative to the cell center
// Stone Eqn 36
// calculate the adiabatic sound speed in cell ipo
a = sqrt(gamma*p_ipo/d_ipo);
// left
del_d_L = d_ipo - d_i;
del_vx_L = vx_ipo - vx_i;
del_vy_L = vy_ipo - vy_i;
del_vz_L = vz_ipo - vz_i;
del_p_L = p_ipo - p_i;
// right
del_d_R = d_ipt - d_ipo;
del_vx_R = vx_ipt - vx_ipo;
del_vy_R = vy_ipt - vy_ipo;
del_vz_R = vz_ipt - vz_ipo;
del_p_R = p_ipt - p_ipo;
// centered
del_d_C = 0.5*(d_ipt - d_i);
del_vx_C = 0.5*(vx_ipt- vx_i);
del_vy_C = 0.5*(vy_ipt - vy_i);
del_vz_C = 0.5*(vz_ipt - vz_i);
del_p_C = 0.5*(p_ipt - p_i);
// van Leer
if (del_d_L*del_d_R > 0.0) { del_d_G = 2.0*del_d_L*del_d_R / (del_d_L+del_d_R); }
else { del_d_G = 0.0; }
if (del_vx_L*del_vx_R > 0.0) { del_vx_G = 2.0*del_vx_L*del_vx_R / (del_vx_L+del_vx_R); }
else { del_vx_G = 0.0; }
if (del_vy_L*del_vy_R > 0.0) { del_vy_G = 2.0*del_vy_L*del_vy_R / (del_vy_L+del_vy_R); }
else { del_vy_G = 0.0; }
if (del_vz_L*del_vz_R > 0.0) { del_vz_G = 2.0*del_vz_L*del_vz_R / (del_vz_L+del_vz_R); }
else { del_vz_G = 0.0; }
if (del_p_L*del_p_R > 0.0) { del_p_G = 2.0*del_p_L*del_p_R / (del_p_L+del_p_R); }
else { del_p_G = 0.0; }
#ifdef DE
del_ge_L = ge_ipo - ge_i;
del_ge_R = ge_ipt - ge_ipo;
del_ge_C = 0.5*(ge_ipt- ge_i);
if (del_ge_L*del_ge_R > 0.0) { del_ge_G = 2.0*del_ge_L*del_ge_R / (del_ge_L+del_ge_R); }
else { del_ge_G = 0.0; }
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_L[i] = scalar_ipo[i] - scalar_i[i];
del_scalar_R[i] = scalar_ipt[i] - scalar_ipo[i];
del_scalar_C[i] = 0.5*(scalar_ipt[i]- scalar_i[i]);
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) { del_scalar_G[i] = 2.0*del_scalar_L[i]*del_scalar_R[i] / (del_scalar_L[i]+del_scalar_R[i]); }
else { del_scalar_G[i] = 0.0; }
}
#endif
// Step 3 - Project the left, right, centered, and van Leer differences onto the characteristic variables
// Stone Eqn 37 (del_a are differences in characteristic variables, see Stone for notation)
// Use the eigenvectors given in Stone 2008, Appendix A
del_a_0_L = -0.5*d_ipo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_1_L = del_d_L - del_p_L/(a*a);
del_a_2_L = del_vy_L;
del_a_3_L = del_vz_L;
del_a_4_L = 0.5*d_ipo*del_vx_L/a + 0.5*del_p_L/(a*a);
del_a_0_R = -0.5*d_ipo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_1_R = del_d_R - del_p_R/(a*a);
del_a_2_R = del_vy_R;
del_a_3_R = del_vz_R;
del_a_4_R = 0.5*d_ipo*del_vx_R/a + 0.5*del_p_R/(a*a);
del_a_0_C = -0.5*d_ipo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_1_C = del_d_C - del_p_C/(a*a);
del_a_2_C = del_vy_C;
del_a_3_C = del_vz_C;
del_a_4_C = 0.5*d_ipo*del_vx_C/a + 0.5*del_p_C/(a*a);
del_a_0_G = -0.5*d_ipo*del_vx_G/a + 0.5*del_p_G/(a*a);
del_a_1_G = del_d_G - del_p_G/(a*a);
del_a_2_G = del_vy_G;
del_a_3_G = del_vz_G;
del_a_4_G = 0.5*d_ipo*del_vx_G/a + 0.5*del_p_G/(a*a);
// Step 4 - Apply monotonicity constraints to the differences in the characteristic variables
// Stone Eqn 38
del_a_0_m = del_a_1_m = del_a_2_m = del_a_3_m = del_a_4_m = 0.0;
if (del_a_0_L*del_a_0_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_0_L), fabs(del_a_0_R));
lim_slope_b = fmin(fabs(del_a_0_C), fabs(del_a_0_G));
del_a_0_m = sgn_CUDA(del_a_0_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_1_L*del_a_1_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_1_L), fabs(del_a_1_R));
lim_slope_b = fmin(fabs(del_a_1_C), fabs(del_a_1_G));
del_a_1_m = sgn_CUDA(del_a_1_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_2_L*del_a_2_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_2_L), fabs(del_a_2_R));
lim_slope_b = fmin(fabs(del_a_2_C), fabs(del_a_2_G));
del_a_2_m = sgn_CUDA(del_a_2_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_3_L*del_a_3_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_3_L), fabs(del_a_3_R));
lim_slope_b = fmin(fabs(del_a_3_C), fabs(del_a_3_G));
del_a_3_m = sgn_CUDA(del_a_3_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
if (del_a_4_L*del_a_4_R > 0.0) {
lim_slope_a = fmin(fabs(del_a_4_L), fabs(del_a_4_R));
lim_slope_b = fmin(fabs(del_a_4_C), fabs(del_a_4_G));
del_a_4_m = sgn_CUDA(del_a_4_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
#ifdef DE
if (del_ge_L*del_ge_R > 0.0) {
lim_slope_a = fmin(fabs(del_ge_L), fabs(del_ge_R));
lim_slope_b = fmin(fabs(del_ge_C), fabs(del_ge_G));
del_ge_m_ipo = sgn_CUDA(del_ge_C) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_ge_m_ipo = 0.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if (del_scalar_L[i]*del_scalar_R[i] > 0.0) {
lim_slope_a = fmin(fabs(del_scalar_L[i]), fabs(del_scalar_R[i]));
lim_slope_b = fmin(fabs(del_scalar_C[i]), fabs(del_scalar_G[i]));
del_scalar_m_ipo[i] = sgn_CUDA(del_scalar_C[i]) * fmin((Real) 2.0*lim_slope_a, lim_slope_b);
}
else del_scalar_m_ipo[i] = 0.0;
}
#endif
// Step 5 - Project the monotonized difference in the characteristic variables back onto the
// primative variables
// Stone Eqn 39
del_d_m_ipo = del_a_0_m + del_a_1_m + del_a_4_m;
del_vx_m_ipo = -a*del_a_0_m / d_ipo + a* del_a_4_m / d_ipo;
del_vy_m_ipo = del_a_2_m;
del_vz_m_ipo = del_a_3_m;
del_p_m_ipo = a*a*del_a_0_m + a*a*del_a_4_m;
// Step 6 - Use parabolic interpolation to compute values at the left and right of each cell center
// Here, the subscripts L and R refer to the left and right side of the ith cell center
// Stone Eqn 46
d_L = 0.5*(d_i + d_imo) - (del_d_m_i - del_d_m_imo) / 6.0;
vx_L = 0.5*(vx_i + vx_imo) - (del_vx_m_i - del_vx_m_imo) / 6.0;
vy_L = 0.5*(vy_i + vy_imo) - (del_vy_m_i - del_vy_m_imo) / 6.0;
vz_L = 0.5*(vz_i + vz_imo) - (del_vz_m_i - del_vz_m_imo) / 6.0;
p_L = 0.5*(p_i + p_imo) - (del_p_m_i - del_p_m_imo) / 6.0;
d_R = 0.5*(d_ipo + d_i) - (del_d_m_ipo - del_d_m_i) / 6.0;
vx_R = 0.5*(vx_ipo + vx_i) - (del_vx_m_ipo - del_vx_m_i) / 6.0;
vy_R = 0.5*(vy_ipo + vy_i) - (del_vy_m_ipo - del_vy_m_i) / 6.0;
vz_R = 0.5*(vz_ipo + vz_i) - (del_vz_m_ipo - del_vz_m_i) / 6.0;
p_R = 0.5*(p_ipo + p_i) - (del_p_m_ipo - del_p_m_i) / 6.0;
#ifdef DE
ge_L = 0.5*(ge_i + ge_imo) - (del_ge_m_i - del_ge_m_imo) / 6.0;
ge_R = 0.5*(ge_ipo + ge_i) - (del_ge_m_ipo - del_ge_m_i) / 6.0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_L[i] = 0.5*(scalar_i[i] + scalar_imo[i]) - (del_scalar_m_i[i] - del_scalar_m_imo[i]) / 6.0;
scalar_R[i] = 0.5*(scalar_ipo[i] + scalar_i[i]) - (del_scalar_m_ipo[i] - del_scalar_m_i[i]) / 6.0;
}
#endif
// Step 7 - Apply further monotonicity constraints to ensure the values on the left and right side
// of cell center lie between neighboring cell-centered values
// Stone Eqns 47 - 53
if ((d_R - d_i) *(d_i - d_L) <= 0) d_L = d_R = d_i;
if ((vx_R - vx_i)*(vx_i - vx_L) <= 0) vx_L = vx_R = vx_i;
if ((vy_R - vy_i)*(vy_i - vy_L) <= 0) vy_L = vy_R = vy_i;
if ((vz_R - vz_i)*(vz_i - vz_L) <= 0) vz_L = vz_R = vz_i;
if ((p_R - p_i) *(p_i - p_L) <= 0) p_L = p_R = p_i;
if ( 6.0*(d_R - d_L) *(d_i - 0.5*(d_L + d_R)) > (d_R - d_L) *(d_R - d_L)) d_L = 3.0*d_i - 2.0*d_R;
if ( 6.0*(vx_R - vx_L)*(vx_i - 0.5*(vx_L + vx_R)) > (vx_R - vx_L)*(vx_R - vx_L)) vx_L = 3.0*vx_i - 2.0*vx_R;
if ( 6.0*(vy_R - vy_L)*(vy_i - 0.5*(vy_L + vy_R)) > (vy_R - vy_L)*(vy_R - vy_L)) vy_L = 3.0*vy_i - 2.0*vy_R;
if ( 6.0*(vz_R - vz_L)*(vz_i - 0.5*(vz_L + vz_R)) > (vz_R - vz_L)*(vz_R - vz_L)) vz_L = 3.0*vz_i - 2.0*vz_R;
if ( 6.0*(p_R - p_L) *(p_i - 0.5*(p_L + p_R)) > (p_R - p_L) *(p_R - p_L)) p_L = 3.0*p_i - 2.0*p_R;
if ( 6.0*(d_R - d_L) *(d_i - 0.5*(d_L + d_R)) < -(d_R - d_L) *(d_R - d_L)) d_R = 3.0*d_i - 2.0*d_L;
if ( 6.0*(vx_R - vx_L)*(vx_i - 0.5*(vx_L + vx_R)) < -(vx_R - vx_L)*(vx_R - vx_L)) vx_R = 3.0*vx_i - 2.0*vx_L;
if ( 6.0*(vy_R - vy_L)*(vy_i - 0.5*(vy_L + vy_R)) < -(vy_R - vy_L)*(vy_R - vy_L)) vy_R = 3.0*vy_i - 2.0*vy_L;
if ( 6.0*(vz_R - vz_L)*(vz_i - 0.5*(vz_L + vz_R)) < -(vz_R - vz_L)*(vz_R - vz_L)) vz_R = 3.0*vz_i - 2.0*vz_L;
if ( 6.0*(p_R - p_L) *(p_i - 0.5*(p_L + p_R)) < -(p_R - p_L) *(p_R - p_L)) p_R = 3.0*p_i - 2.0*p_L;
d_L = fmax( fmin(d_i, d_imo), d_L );
d_L = fmin( fmax(d_i, d_imo), d_L );
d_R = fmax( fmin(d_i, d_ipo), d_R );
d_R = fmin( fmax(d_i, d_ipo), d_R );
vx_L = fmax( fmin(vx_i, vx_imo), vx_L );
vx_L = fmin( fmax(vx_i, vx_imo), vx_L );
vx_R = fmax( fmin(vx_i, vx_ipo), vx_R );
vx_R = fmin( fmax(vx_i, vx_ipo), vx_R );
vy_L = fmax( fmin(vy_i, vy_imo), vy_L );
vy_L = fmin( fmax(vy_i, vy_imo), vy_L );
vy_R = fmax( fmin(vy_i, vy_ipo), vy_R );
vy_R = fmin( fmax(vy_i, vy_ipo), vy_R );
vz_L = fmax( fmin(vz_i, vz_imo), vz_L );
vz_L = fmin( fmax(vz_i, vz_imo), vz_L );
vz_R = fmax( fmin(vz_i, vz_ipo), vz_R );
vz_R = fmin( fmax(vz_i, vz_ipo), vz_R );
p_L = fmax( fmin(p_i, p_imo), p_L );
p_L = fmin( fmax(p_i, p_imo), p_L );
p_R = fmax( fmin(p_i, p_ipo), p_R );
p_R = fmin( fmax(p_i, p_ipo), p_R );
#ifdef DE
if ((ge_R - ge_i) *(ge_i - ge_L) <= 0) ge_L = ge_R = ge_i;
if ( 6.0*(ge_R - ge_L) *(ge_i - 0.5*(ge_L + ge_R)) > (ge_R - ge_L) *(ge_R - ge_L)) ge_L = 3.0*ge_i - 2.0*ge_R;
if ( 6.0*(ge_R - ge_L) *(ge_i - 0.5*(ge_L + ge_R)) < -(ge_R - ge_L) *(ge_R - ge_L)) ge_R = 3.0*ge_i - 2.0*ge_L;
ge_L = fmax( fmin(ge_i, ge_imo), ge_L );
ge_L = fmin( fmax(ge_i, ge_imo), ge_L );
ge_R = fmax( fmin(ge_i, ge_ipo), ge_R );
ge_R = fmin( fmax(ge_i, ge_ipo), ge_R );
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
if ((scalar_R[i] - scalar_i[i]) *(scalar_i[i] - scalar_L[i]) <= 0) scalar_L[i] = scalar_R[i] = scalar_i[i];
if ( 6.0*(scalar_R[i] - scalar_L[i]) *(scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i])) > (scalar_R[i] - scalar_L[i]) *(scalar_R[i] - scalar_L[i])) scalar_L[i] = 3.0*scalar_i[i] - 2.0*scalar_R[i];
if ( 6.0*(scalar_R[i] - scalar_L[i]) *(scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i])) < -(scalar_R[i] - scalar_L[i]) *(scalar_R[i] - scalar_L[i])) scalar_R[i] = 3.0*scalar_i[i] - 2.0*scalar_L[i];
scalar_L[i] = fmax( fmin(scalar_i[i], scalar_imo[i]), scalar_L[i] );
scalar_L[i] = fmin( fmax(scalar_i[i], scalar_imo[i]), scalar_L[i] );
scalar_R[i] = fmax( fmin(scalar_i[i], scalar_ipo[i]), scalar_R[i] );
scalar_R[i] = fmin( fmax(scalar_i[i], scalar_ipo[i]), scalar_R[i] );
}
#endif
#ifdef CTU
// Step 8 - Compute the coefficients for the monotonized parabolic interpolation function
// Stone Eqn 54
del_d_m_i = d_R - d_L;
del_vx_m_i = vx_R - vx_L;
del_vy_m_i = vy_R - vy_L;
del_vz_m_i = vz_R - vz_L;
del_p_m_i = p_R - p_L;
d_6 = 6.0*(d_i - 0.5*(d_L + d_R));
vx_6 = 6.0*(vx_i - 0.5*(vx_L + vx_R));
vy_6 = 6.0*(vy_i - 0.5*(vy_L + vy_R));
vz_6 = 6.0*(vz_i - 0.5*(vz_L + vz_R));
p_6 = 6.0*(p_i - 0.5*(p_L + p_R));
#ifdef DE
del_ge_m_i = ge_R - ge_L;
ge_6 = 6.0*(ge_i - 0.5*(ge_L + ge_R));
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
del_scalar_m_i[i] = scalar_R[i] - scalar_L[i];
scalar_6[i] = 6.0*(scalar_i[i] - 0.5*(scalar_L[i] + scalar_R[i]));
}
#endif
// Compute the eigenvalues of the linearized equations in the
// primative variables using the cell-centered primative variables
// recalculate the adiabatic sound speed in cell i
a = sqrt(gamma*p_i/d_i);
lambda_m = vx_i-a;
lambda_0 = vx_i;
lambda_p = vx_i+a;
// Step 9 - Compute the left and right interface values using monotonized parabolic interpolation
// Stone Eqns 55 & 56
// largest eigenvalue
lambda_max = fmax(lambda_p, (Real) 0);
// smallest eigenvalue
lambda_min = fmin(lambda_m, (Real) 0);
// left interface value, i+1/2
d_R = d_R - lambda_max * (0.5*dtodx)*(del_d_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*d_6);
vx_R = vx_R - lambda_max * (0.5*dtodx)*(del_vx_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*vx_6);
vy_R = vy_R - lambda_max * (0.5*dtodx)*(del_vy_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*vy_6);
vz_R = vz_R - lambda_max * (0.5*dtodx)*(del_vz_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*vz_6);
p_R = p_R - lambda_max * (0.5*dtodx)*(del_p_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*p_6);
// right interface value, i-1/2
d_L = d_L - lambda_min * (0.5*dtodx)*(del_d_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*d_6);
vx_L = vx_L - lambda_min * (0.5*dtodx)*(del_vx_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*vx_6);
vy_L = vy_L - lambda_min * (0.5*dtodx)*(del_vy_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*vy_6);
vz_L = vz_L - lambda_min * (0.5*dtodx)*(del_vz_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*vz_6);
p_L = p_L - lambda_min * (0.5*dtodx)*(del_p_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*p_6);
#ifdef DE
ge_R = ge_R - lambda_max * (0.5*dtodx)*(del_ge_m_i - (1.0 - (2.0/3.0)*lambda_max*dtodx)*ge_6);
ge_L = ge_L - lambda_min * (0.5*dtodx)*(del_ge_m_i + (1.0 + (2.0/3.0)*lambda_min*dtodx)*ge_6);
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R[i] = scalar_R[i] - lambda_max * (0.5*dtodx)*(del_scalar_m_i[i] - (1.0 - (2.0/3.0)*lambda_max*dtodx)*scalar_6[i]);
scalar_L[i] = scalar_L[i] - lambda_min * (0.5*dtodx)*(del_scalar_m_i[i] + (1.0 + (2.0/3.0)*lambda_min*dtodx)*scalar_6[i]);
}
#endif
// Step 10 - Perform the characteristic tracing
// Stone Eqns 57 - 60
// left-hand interface value, i+1/2
sum_1 = 0;
sum_2 = 0;
sum_3 = 0;
sum_4 = 0;
sum_5 = 0;
#ifdef DE
sum_ge = 0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] = 0;
}
#endif
if (lambda_m >= 0)
{
A = (0.5*dtodx) * (lambda_p - lambda_m);
B = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_p*lambda_p - lambda_m*lambda_m);
chi_1 = A*(del_d_m_i - d_6) + B*d_6;
chi_2 = A*(del_vx_m_i - vx_6) + B*vx_6;
chi_3 = A*(del_vy_m_i - vy_6) + B*vy_6;
chi_4 = A*(del_vz_m_i - vz_6) + B*vz_6;
chi_5 = A*(del_p_m_i - p_6) + B*p_6;
sum_1 += -0.5*(d_i*chi_2/a - chi_5/(a*a));
sum_2 += 0.5*(chi_2 - chi_5/(a*d_i));
sum_5 += -0.5*(d_i*chi_2*a - chi_5);
}
if (lambda_0 >= 0)
{
A = (0.5*dtodx) * (lambda_p - lambda_0);
B = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_p*lambda_p - lambda_0*lambda_0);
chi_1 = A*(del_d_m_i - d_6) + B*d_6;
chi_2 = A*(del_vx_m_i - vx_6) + B*vx_6;
chi_3 = A*(del_vy_m_i - vy_6) + B*vy_6;
chi_4 = A*(del_vz_m_i - vz_6) + B*vz_6;
chi_5 = A*(del_p_m_i - p_6) + B*p_6;
#ifdef DE
chi_ge = A*(del_ge_m_i - ge_6) + B*ge_6;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
chi_scalar[i] = A*(del_scalar_m_i[i] - scalar_6[i]) + B*scalar_6[i];
}
#endif
sum_1 += chi_1 - chi_5/(a*a);
sum_3 += chi_3;
sum_4 += chi_4;
#ifdef DE
sum_ge += chi_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] += chi_scalar[i];
}
#endif
}
if (lambda_p >= 0)
{
A = (0.5*dtodx) * (lambda_p - lambda_p);
B = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_p*lambda_p - lambda_p*lambda_p);
chi_1 = A*(del_d_m_i - d_6) + B*d_6;
chi_2 = A*(del_vx_m_i - vx_6) + B*vx_6;
chi_3 = A*(del_vy_m_i - vy_6) + B*vy_6;
chi_4 = A*(del_vz_m_i - vz_6) + B*vz_6;
chi_5 = A*(del_p_m_i - p_6) + B*p_6;
sum_1 += 0.5*(d_i*chi_2/a + chi_5/(a*a));
sum_2 += 0.5*(chi_2 + chi_5/(a*d_i));
sum_5 += 0.5*(d_i*chi_2*a + chi_5);
}
// add the corrections to the initial guesses for the interface values
d_R += sum_1;
vx_R += sum_2;
vy_R += sum_3;
vz_R += sum_4;
p_R += sum_5;
#ifdef DE
ge_R += sum_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_R[i] += sum_scalar[i];
}
#endif
// right-hand interface value, i-1/2
sum_1 = 0;
sum_2 = 0;
sum_3 = 0;
sum_4 = 0;
sum_5 = 0;
#ifdef DE
sum_ge = 0;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] = 0;
}
#endif
if (lambda_m <= 0)
{
C = (0.5*dtodx) * (lambda_m - lambda_m);
D = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_m*lambda_m - lambda_m*lambda_m);
chi_1 = C*(del_d_m_i + d_6) + D*d_6;
chi_2 = C*(del_vx_m_i + vx_6) + D*vx_6;
chi_3 = C*(del_vy_m_i + vy_6) + D*vy_6;
chi_4 = C*(del_vz_m_i + vz_6) + D*vz_6;
chi_5 = C*(del_p_m_i + p_6) + D*p_6;
sum_1 += -0.5*(d_i*chi_2/a - chi_5/(a*a));
sum_2 += 0.5*(chi_2 - chi_5/(a*d_i));
sum_5 += -0.5*(d_i*chi_2*a - chi_5);
}
if (lambda_0 <= 0)
{
C = (0.5*dtodx) * (lambda_m - lambda_0);
D = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_m*lambda_m - lambda_0*lambda_0);
chi_1 = C*(del_d_m_i + d_6) + D*d_6;
chi_2 = C*(del_vx_m_i + vx_6) + D*vx_6;
chi_3 = C*(del_vy_m_i + vy_6) + D*vy_6;
chi_4 = C*(del_vz_m_i + vz_6) + D*vz_6;
chi_5 = C*(del_p_m_i + p_6) + D*p_6;
#ifdef DE
chi_ge = C*(del_ge_m_i + ge_6) + D*ge_6;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
chi_scalar[i] = C*(del_scalar_m_i[i] + scalar_6[i]) + D*scalar_6[i];
}
#endif
sum_1 += chi_1 - chi_5/(a*a);
sum_3 += chi_3;
sum_4 += chi_4;
#ifdef DE
sum_ge += chi_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
sum_scalar[i] += chi_scalar[i];
}
#endif
}
if (lambda_p <= 0)
{
C = (0.5*dtodx) * (lambda_m - lambda_p);
D = (1.0/3.0)*(dtodx)*(dtodx)*(lambda_m*lambda_m - lambda_p*lambda_p);
chi_1 = C*(del_d_m_i + d_6) + D*d_6;
chi_2 = C*(del_vx_m_i + vx_6) + D*vx_6;
chi_3 = C*(del_vy_m_i + vy_6) + D*vy_6;
chi_4 = C*(del_vz_m_i + vz_6) + D*vz_6;
chi_5 = C*(del_p_m_i + p_6) + D*p_6;
sum_1 += 0.5*(d_i*chi_2/a + chi_5/(a*a));
sum_2 += 0.5*(chi_2 + chi_5/(a*d_i));
sum_5 += 0.5*(d_i*chi_2*a + chi_5);
}
// add the corrections
d_L += sum_1;
vx_L += sum_2;
vy_L += sum_3;
vz_L += sum_4;
p_L += sum_5;
#ifdef DE
ge_L += sum_ge;
#endif
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
scalar_L[i] += sum_scalar[i];
}
#endif
#endif //CTU
// enforce minimum values
d_L = fmax(d_L, (Real) TINY_NUMBER);
d_R = fmax(d_R, (Real) TINY_NUMBER);
p_L = fmax(p_L, (Real) TINY_NUMBER);
p_R = fmax(p_R, (Real) TINY_NUMBER);
// Step 11 - Send final values back from kernel
// bounds_R refers to the right side of the i-1/2 interface
if (dir == 0) id = xid-1 + yid*nx + zid*nx*ny;
if (dir == 1) id = xid + (yid-1)*nx + zid*nx*ny;
if (dir == 2) id = xid + yid*nx + (zid-1)*nx*ny;
dev_bounds_R[ id] = d_L;
dev_bounds_R[o1*n_cells + id] = d_L*vx_L;
dev_bounds_R[o2*n_cells + id] = d_L*vy_L;
dev_bounds_R[o3*n_cells + id] = d_L*vz_L;
dev_bounds_R[4*n_cells + id] = p_L/(gamma-1.0) + 0.5*d_L*(vx_L*vx_L + vy_L*vy_L + vz_L*vz_L);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_bounds_R[(5+i)*n_cells + id] = d_L*scalar_L[i];
}
#endif
#ifdef DE
dev_bounds_R[(n_fields-1)*n_cells + id] = d_L*ge_L;
#endif
// bounds_L refers to the left side of the i+1/2 interface
id = xid + yid*nx + zid*nx*ny;
dev_bounds_L[ id] = d_R;
dev_bounds_L[o1*n_cells + id] = d_R*vx_R;
dev_bounds_L[o2*n_cells + id] = d_R*vy_R;
dev_bounds_L[o3*n_cells + id] = d_R*vz_R;
dev_bounds_L[4*n_cells + id] = p_R/(gamma-1.0) + 0.5*d_R*(vx_R*vx_R + vy_R*vy_R + vz_R*vz_R);
#ifdef SCALAR
for (int i=0; i<NSCALARS; i++) {
dev_bounds_L[(5+i)*n_cells + id] = d_R*scalar_R[i];
}
#endif
#ifdef DE
dev_bounds_L[(n_fields-1)*n_cells + id] = d_R*ge_R;
#endif
}
}
#endif //PPMC
#endif //CUDA
|
728c5c8264193af1212d50d388fc13ed49c78508.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n)
{
__shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if (idx >= n*n)
{
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if (idx >= n * n)
{
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if (row < n && col < n)
{
d_result[row * n + col] = tmp;
}
} | 728c5c8264193af1212d50d388fc13ed49c78508.cu | #include "includes.h"
__global__ void gpu_square_matrix_mult(int *d_a, int *d_b, int *d_result, int n)
{
__shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if (idx >= n*n)
{
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if (idx >= n * n)
{
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else
{
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if (row < n && col < n)
{
d_result[row * n + col] = tmp;
}
} |
2bfbb7a035bdbe84c0afc54b5fdccbb6f4d87094.hip | // !!! This is a file automatically generated by hipify!!!
// This file is distributed under the MIT license.
// See the LICENSE file for details.
#include <cstddef>
#include <cstdlib>
#include <iomanip>
#include <iostream>
#include <memory>
#include <ostream>
#include <random>
#include <string>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <Support/CmdLine.h>
#include <Support/CmdLineUtil.h>
#include <visionaray/cuda/managed_vector.h>
#include <visionaray/math/aabb.h>
#include <visionaray/math/sphere.h>
#include <visionaray/bvh.h>
#include <visionaray/gpu_buffer_rt.h>
#include <visionaray/pinhole_camera.h>
#include <visionaray/result_record.h>
#include <visionaray/scheduler.h>
#include <visionaray/traverse.h>
#include <common/timer.h>
using namespace support;
using namespace visionaray;
using cmdline_options = std::vector<std::shared_ptr<cl::OptionBase>>;
namespace visionaray
{
namespace cuda
{
//-------------------------------------------------------------------------------------------------
// Typedef so we can just write cuda::managed_bvh
//
template <typename P>
using managed_bvh = index_bvh_t<managed_vector<P>, managed_vector<bvh_node>, managed_vector<unsigned>>;
} // cuda
} // visionaray
template <typename Cont>
static void create_random_spheres(Cont& spheres, aabb bbox, float min_radius, float max_radius)
{
std::random_device rd;
std::mt19937 mt(rd());
std::uniform_real_distribution<float> dist_x(bbox.min.x, bbox.max.x);
std::uniform_real_distribution<float> dist_y(bbox.min.y, bbox.max.y);
std::uniform_real_distribution<float> dist_z(bbox.min.z, bbox.max.z);
std::uniform_real_distribution<float> dist_r(min_radius, max_radius);
// Spheres
for (size_t i = 0; i < spheres.size(); ++i)
{
spheres[i] = typename Cont::value_type(vec3(dist_x(mt), dist_y(mt), dist_z(mt)), dist_r(mt));
spheres[i].prim_id = static_cast<int>(i);
spheres[i].geom_id = static_cast<int>(i);
}
}
//-------------------------------------------------------------------------------------------------
// Most simple Visionaray ray tracing kernel
//
template <typename It>
struct raytracing_kernel
{
__host__ __device__
result_record<float> operator()(ray r)
{
auto hr = closest_hit(r, first, last);
result_record<float> result;
result.hit = hr.hit;
result.color = hr.hit ? vec4(1.0f) : vec4(0.0f);
result.depth = hr.t;
return result;
}
It first;
It last;
};
//-------------------------------------------------------------------------------------------------
// Main function
//
int main(int argc, char** argv)
{
// Application state ----------------------------------
size_t num_spheres = 300000;
aabb bbox({ -1.0f, -1.0f, -1.0f}, { 1.0f, 1.0f, 1.0f });
float min_radius = 0.001f;
float max_radius = 0.002f;
int width = 512;
int height = 512;
bool explicit_copy_mode = false;
// Setup ----------------------------------------------
std::cout << std::fixed;
std::cout << std::setprecision(4);
// Read command line options
cl::CmdLine cmd;
cmdline_options options;
options.emplace_back( cl::makeOption<size_t&>(
cl::Parser<>(),
"num_spheres",
cl::Desc("Number of random spheres to traverse"),
cl::ArgRequired,
cl::init(num_spheres)
) );
options.emplace_back( cl::makeOption<bool&>(
cl::Parser<>(),
"explicit_copy",
cl::Desc("Use explicit memory transfers instead of unified memory"),
cl::ArgDisallowed,
cl::init(explicit_copy_mode)
) );
options.emplace_back( cl::makeOption<int&>(
cl::Parser<>(),
"width",
cl::Desc("Image width"),
cl::ArgRequired,
cl::init(width)
) );
options.emplace_back( cl::makeOption<int&>(
cl::Parser<>(),
"height",
cl::Desc("Image height"),
cl::ArgRequired,
cl::init(height)
) );
for (auto& opt : options)
{
cmd.add(*opt);
}
auto args = std::vector<std::string>(argv + 1, argv + argc);
cl::expandWildcards(args);
cl::expandResponseFiles(args, cl::TokenizeUnix());
try
{
cmd.parse(args);
}
catch (...)
{
std::cout << cmd.help(argv[0]) << '\n';
exit(EXIT_FAILURE);
}
// Don't measure runtime API initialization overhead
hipDeviceSynchronize();
std::cout << "\n*** CUDA unified memory example ***\n\n";
if (!explicit_copy_mode)
{
std::cout << "Using memory mode: CUDA unified memory\n\n";
// Create data in unified memory ------------------
cuda::timer t;
cuda::managed_vector<basic_sphere<float>> spheres(num_spheres);
std::cout << "Creating " << num_spheres << " random spheres...\n";
create_random_spheres(spheres, bbox, min_radius, max_radius);
hipDeviceSynchronize();
std::cout << "Time elapsed: " << t.elapsed() << "s\n\n";
// Create BVH -------------------------------------
std::cout << "Creating BVH...\n";
t.reset();
binned_sah_builder builder;
auto bvh = builder.build(cuda::managed_bvh<basic_sphere<float>>{}, spheres.data(), spheres.size(), true /* spatial splits */);
hipDeviceSynchronize();
std::cout << "Time elapsed: " << t.elapsed() << "s\n\n";
// Prepare for ray tracing ------------------------
using bvh_ref_t = typename cuda::managed_bvh<basic_sphere<float>>::bvh_ref;
cuda::managed_vector<bvh_ref_t> bvh_refs(1);
bvh_refs[0] = bvh.ref();
pinhole_camera cam;
cam.set_viewport(0, 0, width, height);
cam.perspective(45.0f * constants::degrees_to_radians<float>(), 1.0f, 0.001f, 1000.0f);
cam.view_all(bbox);
gpu_buffer_rt<PF_RGBA8, PF_UNSPECIFIED> rendertarget;
rendertarget.resize(width, height);
auto sparams = make_sched_params(cam, rendertarget);
raytracing_kernel<bvh_ref_t const*> kern = { bvh_refs.data(), bvh_refs.data() + 1 };
cuda_sched<ray> sched;
// Ray tracing on the GPU -------------------------
std::cout << "Calculating primary visibility with " << width << " x " << height << " rays...\n";
t.reset();
sched.frame(kern, sparams);
hipDeviceSynchronize();
std::cout << "Time eplased: " << t.elapsed() << "s\n\n";
}
else
{
std::cout << "Using memory mode: explicit memory transfers\n\n";
// Create data in host memory ---------------------
timer t;
thrust::host_vector<basic_sphere<float>> spheres(num_spheres);
std::cout << "Creating " << num_spheres << " random spheres...\n";
create_random_spheres(spheres, bbox, min_radius, max_radius);
std::cout << "Time elapsed: " << t.elapsed() << "s\n\n";
// Create BVH -------------------------------------
std::cout << "Creating BVH...\n";
t.reset();
binned_sah_builder builder;
auto h_bvh = builder.build(index_bvh<basic_sphere<float>>{}, spheres.data(), spheres.size(), true /* spatial splits */);
std::cout << "Time elapsed: " << t.elapsed() << "s\n\n";
// Upload data to GPU -----------------------------
cuda_index_bvh<basic_sphere<float>> d_bvh(h_bvh);
// Prepare for ray tracing ------------------------
using bvh_ref_t = typename cuda_index_bvh<basic_sphere<float>>::bvh_ref;
thrust::device_vector<bvh_ref_t> bvh_refs;
bvh_refs.push_back(d_bvh.ref());
pinhole_camera cam;
cam.set_viewport(0, 0, width, height);
cam.perspective(45.0f * constants::degrees_to_radians<float>(), 1.0f, 0.001f, 1000.0f);
cam.view_all(bbox);
gpu_buffer_rt<PF_RGBA8, PF_UNSPECIFIED> rendertarget;
rendertarget.resize(width, height);
auto sparams = make_sched_params(cam, rendertarget);
raytracing_kernel<bvh_ref_t const*> kern = {
thrust::raw_pointer_cast(bvh_refs.data()),
thrust::raw_pointer_cast(bvh_refs.data()) + 1
};
cuda_sched<ray> sched;
// Ray tracing on the GPU -------------------------
std::cout << "Calculating primary visibility with " << width << " x " << height << " rays...\n";
cuda::timer ct;
sched.frame(kern, sparams);
std::cout << "Time eplased: " << ct.elapsed() << "s\n\n";
}
}
| 2bfbb7a035bdbe84c0afc54b5fdccbb6f4d87094.cu | // This file is distributed under the MIT license.
// See the LICENSE file for details.
#include <cstddef>
#include <cstdlib>
#include <iomanip>
#include <iostream>
#include <memory>
#include <ostream>
#include <random>
#include <string>
#include <vector>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <Support/CmdLine.h>
#include <Support/CmdLineUtil.h>
#include <visionaray/cuda/managed_vector.h>
#include <visionaray/math/aabb.h>
#include <visionaray/math/sphere.h>
#include <visionaray/bvh.h>
#include <visionaray/gpu_buffer_rt.h>
#include <visionaray/pinhole_camera.h>
#include <visionaray/result_record.h>
#include <visionaray/scheduler.h>
#include <visionaray/traverse.h>
#include <common/timer.h>
using namespace support;
using namespace visionaray;
using cmdline_options = std::vector<std::shared_ptr<cl::OptionBase>>;
namespace visionaray
{
namespace cuda
{
//-------------------------------------------------------------------------------------------------
// Typedef so we can just write cuda::managed_bvh
//
template <typename P>
using managed_bvh = index_bvh_t<managed_vector<P>, managed_vector<bvh_node>, managed_vector<unsigned>>;
} // cuda
} // visionaray
template <typename Cont>
static void create_random_spheres(Cont& spheres, aabb bbox, float min_radius, float max_radius)
{
std::random_device rd;
std::mt19937 mt(rd());
std::uniform_real_distribution<float> dist_x(bbox.min.x, bbox.max.x);
std::uniform_real_distribution<float> dist_y(bbox.min.y, bbox.max.y);
std::uniform_real_distribution<float> dist_z(bbox.min.z, bbox.max.z);
std::uniform_real_distribution<float> dist_r(min_radius, max_radius);
// Spheres
for (size_t i = 0; i < spheres.size(); ++i)
{
spheres[i] = typename Cont::value_type(vec3(dist_x(mt), dist_y(mt), dist_z(mt)), dist_r(mt));
spheres[i].prim_id = static_cast<int>(i);
spheres[i].geom_id = static_cast<int>(i);
}
}
//-------------------------------------------------------------------------------------------------
// Most simple Visionaray ray tracing kernel
//
template <typename It>
struct raytracing_kernel
{
__host__ __device__
result_record<float> operator()(ray r)
{
auto hr = closest_hit(r, first, last);
result_record<float> result;
result.hit = hr.hit;
result.color = hr.hit ? vec4(1.0f) : vec4(0.0f);
result.depth = hr.t;
return result;
}
It first;
It last;
};
//-------------------------------------------------------------------------------------------------
// Main function
//
int main(int argc, char** argv)
{
// Application state ----------------------------------
size_t num_spheres = 300000;
aabb bbox({ -1.0f, -1.0f, -1.0f}, { 1.0f, 1.0f, 1.0f });
float min_radius = 0.001f;
float max_radius = 0.002f;
int width = 512;
int height = 512;
bool explicit_copy_mode = false;
// Setup ----------------------------------------------
std::cout << std::fixed;
std::cout << std::setprecision(4);
// Read command line options
cl::CmdLine cmd;
cmdline_options options;
options.emplace_back( cl::makeOption<size_t&>(
cl::Parser<>(),
"num_spheres",
cl::Desc("Number of random spheres to traverse"),
cl::ArgRequired,
cl::init(num_spheres)
) );
options.emplace_back( cl::makeOption<bool&>(
cl::Parser<>(),
"explicit_copy",
cl::Desc("Use explicit memory transfers instead of unified memory"),
cl::ArgDisallowed,
cl::init(explicit_copy_mode)
) );
options.emplace_back( cl::makeOption<int&>(
cl::Parser<>(),
"width",
cl::Desc("Image width"),
cl::ArgRequired,
cl::init(width)
) );
options.emplace_back( cl::makeOption<int&>(
cl::Parser<>(),
"height",
cl::Desc("Image height"),
cl::ArgRequired,
cl::init(height)
) );
for (auto& opt : options)
{
cmd.add(*opt);
}
auto args = std::vector<std::string>(argv + 1, argv + argc);
cl::expandWildcards(args);
cl::expandResponseFiles(args, cl::TokenizeUnix());
try
{
cmd.parse(args);
}
catch (...)
{
std::cout << cmd.help(argv[0]) << '\n';
exit(EXIT_FAILURE);
}
// Don't measure runtime API initialization overhead
cudaDeviceSynchronize();
std::cout << "\n*** CUDA unified memory example ***\n\n";
if (!explicit_copy_mode)
{
std::cout << "Using memory mode: CUDA unified memory\n\n";
// Create data in unified memory ------------------
cuda::timer t;
cuda::managed_vector<basic_sphere<float>> spheres(num_spheres);
std::cout << "Creating " << num_spheres << " random spheres...\n";
create_random_spheres(spheres, bbox, min_radius, max_radius);
cudaDeviceSynchronize();
std::cout << "Time elapsed: " << t.elapsed() << "s\n\n";
// Create BVH -------------------------------------
std::cout << "Creating BVH...\n";
t.reset();
binned_sah_builder builder;
auto bvh = builder.build(cuda::managed_bvh<basic_sphere<float>>{}, spheres.data(), spheres.size(), true /* spatial splits */);
cudaDeviceSynchronize();
std::cout << "Time elapsed: " << t.elapsed() << "s\n\n";
// Prepare for ray tracing ------------------------
using bvh_ref_t = typename cuda::managed_bvh<basic_sphere<float>>::bvh_ref;
cuda::managed_vector<bvh_ref_t> bvh_refs(1);
bvh_refs[0] = bvh.ref();
pinhole_camera cam;
cam.set_viewport(0, 0, width, height);
cam.perspective(45.0f * constants::degrees_to_radians<float>(), 1.0f, 0.001f, 1000.0f);
cam.view_all(bbox);
gpu_buffer_rt<PF_RGBA8, PF_UNSPECIFIED> rendertarget;
rendertarget.resize(width, height);
auto sparams = make_sched_params(cam, rendertarget);
raytracing_kernel<bvh_ref_t const*> kern = { bvh_refs.data(), bvh_refs.data() + 1 };
cuda_sched<ray> sched;
// Ray tracing on the GPU -------------------------
std::cout << "Calculating primary visibility with " << width << " x " << height << " rays...\n";
t.reset();
sched.frame(kern, sparams);
cudaDeviceSynchronize();
std::cout << "Time eplased: " << t.elapsed() << "s\n\n";
}
else
{
std::cout << "Using memory mode: explicit memory transfers\n\n";
// Create data in host memory ---------------------
timer t;
thrust::host_vector<basic_sphere<float>> spheres(num_spheres);
std::cout << "Creating " << num_spheres << " random spheres...\n";
create_random_spheres(spheres, bbox, min_radius, max_radius);
std::cout << "Time elapsed: " << t.elapsed() << "s\n\n";
// Create BVH -------------------------------------
std::cout << "Creating BVH...\n";
t.reset();
binned_sah_builder builder;
auto h_bvh = builder.build(index_bvh<basic_sphere<float>>{}, spheres.data(), spheres.size(), true /* spatial splits */);
std::cout << "Time elapsed: " << t.elapsed() << "s\n\n";
// Upload data to GPU -----------------------------
cuda_index_bvh<basic_sphere<float>> d_bvh(h_bvh);
// Prepare for ray tracing ------------------------
using bvh_ref_t = typename cuda_index_bvh<basic_sphere<float>>::bvh_ref;
thrust::device_vector<bvh_ref_t> bvh_refs;
bvh_refs.push_back(d_bvh.ref());
pinhole_camera cam;
cam.set_viewport(0, 0, width, height);
cam.perspective(45.0f * constants::degrees_to_radians<float>(), 1.0f, 0.001f, 1000.0f);
cam.view_all(bbox);
gpu_buffer_rt<PF_RGBA8, PF_UNSPECIFIED> rendertarget;
rendertarget.resize(width, height);
auto sparams = make_sched_params(cam, rendertarget);
raytracing_kernel<bvh_ref_t const*> kern = {
thrust::raw_pointer_cast(bvh_refs.data()),
thrust::raw_pointer_cast(bvh_refs.data()) + 1
};
cuda_sched<ray> sched;
// Ray tracing on the GPU -------------------------
std::cout << "Calculating primary visibility with " << width << " x " << height << " rays...\n";
cuda::timer ct;
sched.frame(kern, sparams);
std::cout << "Time eplased: " << ct.elapsed() << "s\n\n";
}
}
|
0da7d193913b4aa74b98dce90ceeffe2e4c7a158.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/tabulate.h>
#include "cudf.h"
#include "rmm/rmm.h"
#include "utilities/error_utils.h"
#include "join/joining.h"
#include "dataframe/cudf_table.cuh"
#include "hash/hash_functions.cuh"
#include "utilities/int_fastdiv.h"
#include "utilities/nvtx/nvtx_utils.h"
constexpr int BLOCK_SIZE = 256;
constexpr int ROWS_PER_THREAD = 1;
/* --------------------------------------------------------------------------*/
/**
* @Synopsis This function determines if a number is a power of 2.
*
* @Param number The number to check.
*
* @Returns True if the number is a power of 2.
*/
/* ----------------------------------------------------------------------------*/
template <typename T>
bool is_power_two( T number )
{
return (0 == (number & (number - 1)));
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis This functor is used to compute the hash value for the rows
* of a gdf_table
*/
/* ----------------------------------------------------------------------------*/
template <template <typename> class hash_function,
typename size_type>
struct row_hasher
{
row_hasher(gdf_table<size_type> const & table_to_hash)
: the_table{table_to_hash}
{}
__device__
hash_value_type operator()(size_type row_index) const
{
return the_table.template hash_row<hash_function>(row_index);
}
gdf_table<size_type> const & the_table;
};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Computes the hash value of each row in the input set of columns.
*
* @Param num_cols The number of columns in the input set
* @Param input The list of columns whose rows will be hashed
* @Param hash The hash function to use
* @Param output The hash value of each row of the input
*
* @Returns
*/
/* ----------------------------------------------------------------------------*/
gdf_error gdf_hash(int num_cols, gdf_column **input, gdf_hash_func hash, gdf_column *output)
{
// Ensure inputs aren't null
if((0 == num_cols)
|| (nullptr == input)
|| (nullptr == output))
{
return GDF_DATASET_EMPTY;
}
// check that the output dtype is int32
// TODO: do we need to support int64 as well?
if (output->dtype != GDF_INT32)
{
return GDF_UNSUPPORTED_DTYPE;
}
// Return immediately for empty input/output
if(nullptr != input[0]) {
if(0 == input[0]->size){
return GDF_SUCCESS;
}
}
if(0 == output->size) {
return GDF_SUCCESS;
}
else if(nullptr == output->data) {
return GDF_DATASET_EMPTY;
}
using size_type = int64_t;
// Wrap input columns in gdf_table
std::unique_ptr< gdf_table<size_type> > input_table{new gdf_table<size_type>(num_cols, input)};
const size_type num_rows = input_table->get_column_length();
// Wrap output buffer in Thrust device_ptr
hash_value_type * p_output = static_cast<hash_value_type*>(output->data);
thrust::device_ptr<hash_value_type> row_hash_values = thrust::device_pointer_cast(p_output);
// Compute the hash value for each row depending on the specified hash function
switch(hash)
{
case GDF_HASH_MURMUR3:
{
thrust::tabulate(rmm::exec_policy()->on(0),
row_hash_values,
row_hash_values + num_rows,
row_hasher<MurmurHash3_32,size_type>(*input_table));
break;
}
case GDF_HASH_IDENTITY:
{
thrust::tabulate(rmm::exec_policy()->on(0),
row_hash_values,
row_hash_values + num_rows,
row_hasher<IdentityHash,size_type>(*input_table));
break;
}
default:
return GDF_INVALID_HASH_FUNCTION;
}
CUDA_CHECK_LAST();
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Functor to map a hash value to a particular 'bin' or partition number
* that uses the FAST modulo operation implemented in int_fastdiv from here:
* https://github.com/milakov/int_fastdiv
*/
/* ----------------------------------------------------------------------------*/
template <typename hash_value_t,
typename size_type,
typename output_type>
struct fast_modulo_partitioner
{
fast_modulo_partitioner(int num_partitions) : fast_divisor{num_partitions}{}
__host__ __device__
output_type operator()(hash_value_t hash_value) const
{
// Using int_fastdiv casts 'hash_value' to an int, which can
// result in negative modulos, requiring taking the absolute value
// Because of the casting it can also return results that are not
// the same as using the normal % operator
output_type partition_number = std::abs(hash_value % fast_divisor);
return partition_number;
}
const int_fastdiv fast_divisor;
};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Functor to map a hash value to a particular 'bin' or partition number
* that uses the modulo operation.
*/
/* ----------------------------------------------------------------------------*/
template <typename hash_value_t,
typename size_type,
typename output_type>
struct modulo_partitioner
{
modulo_partitioner(size_type num_partitions) : divisor{num_partitions}{}
__host__ __device__
output_type operator()(hash_value_t hash_value) const
{
return hash_value % divisor;
}
const size_type divisor;
};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Functor to map a hash value to a particular 'bin' or partition number
* that uses bitshifts. Only works when num_partitions is a power of 2.
*
* For n % d, if d is a power of two, then it can be computed more efficiently via
* a single bitwise AND as:
* n & (d - 1)
*/
/* ----------------------------------------------------------------------------*/
template <typename hash_value_t,
typename size_type,
typename output_type>
struct bitwise_partitioner
{
bitwise_partitioner(size_type num_partitions) : divisor{(num_partitions - 1)}
{
assert( is_power_two(num_partitions) );
}
__host__ __device__
output_type operator()(hash_value_t hash_value) const
{
return hash_value & (divisor);
}
const size_type divisor;
};
/* --------------------------------------------------------------------------*/
/**
* @brief Computes which partition each row of a gdf_table will belong to based
on hashing each row, and applying a partition function to the hash value.
Records the size of each partition for each thread block as well as the global
size of each partition across all thread blocks.
*
* @Param[in] the_table The table whose rows will be partitioned
* @Param[in] num_rows The number of rows in the table
* @Param[in] num_partitions The number of partitions to divide the rows into
* @Param[in] the_partitioner The functor that maps a rows hash value to a partition number
* @Param[out] row_partition_numbers Array that holds which partition each row belongs to
* @Param[out] block_partition_sizes Array that holds the size of each partition for each block,
* i.e., { {block0 partition0 size, block1 partition0 size, ...},
{block0 partition1 size, block1 partition1 size, ...},
...
{block0 partition(num_partitions-1) size, block1 partition(num_partitions -1) size, ...} }
* @Param[out] global_partition_sizes The number of rows in each partition.
*/
/* ----------------------------------------------------------------------------*/
template <template <typename> class hash_function,
typename partitioner_type,
typename size_type>
__global__
void compute_row_partition_numbers(gdf_table<size_type> const & the_table,
const size_type num_rows,
const size_type num_partitions,
const partitioner_type the_partitioner,
size_type * row_partition_numbers,
size_type * block_partition_sizes,
size_type * global_partition_sizes)
{
// Accumulate histogram of the size of each partition in shared memory
extern __shared__ size_type shared_partition_sizes[];
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize local histogram
size_type partition_number = threadIdx.x;
while(partition_number < num_partitions)
{
shared_partition_sizes[partition_number] = 0;
partition_number += blockDim.x;
}
__syncthreads();
// Compute the hash value for each row, store it to the array of hash values
// and compute the partition to which the hash value belongs and increment
// the shared memory counter for that partition
while( row_number < num_rows)
{
// See here why template disambiguator is required:
// https://stackoverflow.com/questions/4077110/template-disambiguator
const hash_value_type row_hash_value = the_table.template hash_row<hash_function>(row_number);
const size_type partition_number = the_partitioner(row_hash_value);
row_partition_numbers[row_number] = partition_number;
atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1));
row_number += blockDim.x * gridDim.x;
}
__syncthreads();
// Flush shared memory histogram to global memory
partition_number = threadIdx.x;
while(partition_number < num_partitions)
{
const size_type block_partition_size = shared_partition_sizes[partition_number];
// Update global size of each partition
atomicAdd(&global_partition_sizes[partition_number], block_partition_size);
// Record the size of this partition in this block
const size_type write_location = partition_number * gridDim.x + blockIdx.x;
block_partition_sizes[write_location] = block_partition_size;
partition_number += blockDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Given an array of partition numbers, computes the final output location
for each element in the output such that all rows with the same partition are
contiguous in memory.
*
* @Param row_partition_numbers The array that records the partition number for each row
* @Param num_rows The number of rows
* @Param num_partitions THe number of partitions
* @Param[out] block_partition_offsets Array that holds the offset of each partition for each thread block,
* i.e., { {block0 partition0 offset, block1 partition0 offset, ...},
{block0 partition1 offset, block1 partition1 offset, ...},
...
{block0 partition(num_partitions-1) offset, block1 partition(num_partitions -1) offset, ...} }
*/
/* ----------------------------------------------------------------------------*/
template <typename size_type>
__global__
void compute_row_output_locations(size_type * row_partition_numbers,
const size_type num_rows,
const size_type num_partitions,
size_type * block_partition_offsets)
{
// Shared array that holds the offset of this blocks partitions in
// global memory
extern __shared__ size_type shared_partition_offsets[];
// Initialize array of this blocks offsets from global array
size_type partition_number= threadIdx.x;
while(partition_number < num_partitions)
{
shared_partition_offsets[partition_number] = block_partition_offsets[partition_number * gridDim.x + blockIdx.x];
partition_number += blockDim.x;
}
__syncthreads();
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Get each row's partition number, and get it's output location by
// incrementing block's offset counter for that partition number
// and store the row's output location in-place
while( row_number < num_rows )
{
// Get partition number of this row
const size_type partition_number = row_partition_numbers[row_number];
// Get output location based on partition number by incrementing the corresponding
// partition offset for this block
const size_type row_output_location = atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1));
// Store the row's output location in-place
row_partition_numbers[row_number] = row_output_location;
row_number += blockDim.x * gridDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Partitions an input gdf_table into a specified number of partitions.
* A hash value is computed for each row in a sub-set of the columns of the
* input table. Each hash value is placed in a bin from [0, number of partitions).
* A copy of the input table is created where the rows are rearranged such that
* rows with hash values in the same bin are contiguous.
*
* @Param[in] input_table The table to partition
* @Param[in] table_to_hash Sub-table of the input table with only the columns
* that will be hashed
* @Param[in] num_partitions The number of partitions that table will be rearranged into
* @Param[out] partition_offsets Preallocated array the size of the number of
* partitions. Where partition_offsets[i] indicates the starting position
* of partition 'i'
* @Param[out] partitioned_output Preallocated gdf_columns to hold the rearrangement
* of the input columns into the desired number of partitions
* @tparam hash_function The hash function that will be used to hash the rows
*/
/* ----------------------------------------------------------------------------*/
template < template <typename> class hash_function,
typename size_type>
gdf_error hash_partition_gdf_table(gdf_table<size_type> const & input_table,
gdf_table<size_type> const & table_to_hash,
const size_type num_partitions,
size_type * partition_offsets,
gdf_table<size_type> & partitioned_output)
{
const size_type num_rows = table_to_hash.get_column_length();
constexpr int rows_per_block = BLOCK_SIZE * ROWS_PER_THREAD;
const size_type grid_size = (num_rows + rows_per_block - 1) / rows_per_block;
// Allocate array to hold which partition each row belongs to
size_type * row_partition_numbers{nullptr};
RMM_TRY( RMM_ALLOC((void**)&row_partition_numbers, num_rows * sizeof(hash_value_type), 0) ); // TODO: non-default stream?
// Array to hold the size of each partition computed by each block
// i.e., { {block0 partition0 size, block1 partition0 size, ...},
// {block0 partition1 size, block1 partition1 size, ...},
// ...
// {block0 partition(num_partitions-1) size, block1 partition(num_partitions -1) size, ...} }
size_type * block_partition_sizes{nullptr};
RMM_TRY(RMM_ALLOC((void**)&block_partition_sizes, (grid_size * num_partitions) * sizeof(size_type), 0) );
// Holds the total number of rows in each partition
size_type * global_partition_sizes{nullptr};
RMM_TRY( RMM_ALLOC((void**)&global_partition_sizes, num_partitions * sizeof(size_type), 0) );
CUDA_TRY( hipMemsetAsync(global_partition_sizes, 0, num_partitions * sizeof(size_type)) );
// If the number of partitions is a power of two, we can compute the partition
// number of each row more efficiently with bitwise operations
if( true == is_power_two(num_partitions) )
{
// Determines how the mapping between hash value and partition number is computed
using partitioner_type = bitwise_partitioner<hash_value_type, size_type, size_type>;
// Computes which partition each row belongs to by hashing the row and performing
// a partitioning operator on the hash value. Also computes the number of
// rows in each partition both for each thread block as well as across all blocks
hipLaunchKernelGGL(( compute_row_partition_numbers<hash_function>)
, dim3(grid_size), dim3(BLOCK_SIZE), num_partitions * sizeof(size_type), 0, table_to_hash,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers,
block_partition_sizes,
global_partition_sizes);
}
else
{
// Determines how the mapping between hash value and partition number is computed
using partitioner_type = modulo_partitioner<hash_value_type, size_type, size_type>;
// Computes which partition each row belongs to by hashing the row and performing
// a partitioning operator on the hash value. Also computes the number of
// rows in each partition both for each thread block as well as across all blocks
hipLaunchKernelGGL(( compute_row_partition_numbers<hash_function>)
, dim3(grid_size), dim3(BLOCK_SIZE), num_partitions * sizeof(size_type), 0, table_to_hash,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers,
block_partition_sizes,
global_partition_sizes);
}
CUDA_CHECK_LAST();
// Compute exclusive scan of all blocks' partition sizes in-place to determine
// the starting point for each blocks portion of each partition in the output
size_type * scanned_block_partition_sizes{block_partition_sizes};
thrust::exclusive_scan(rmm::exec_policy()->on(0),
block_partition_sizes,
block_partition_sizes + (grid_size * num_partitions),
scanned_block_partition_sizes);
CUDA_CHECK_LAST();
// Compute exclusive scan of size of each partition to determine offset location
// of each partition in final output. This can be done independently on a separate stream
hipStream_t s1{};
hipStreamCreate(&s1);
size_type * scanned_global_partition_sizes{global_partition_sizes};
thrust::exclusive_scan(rmm::exec_policy(s1)->on(s1),
global_partition_sizes,
global_partition_sizes + num_partitions,
scanned_global_partition_sizes);
CUDA_CHECK_LAST();
// Copy the result of the exlusive scan to the output offsets array
// to indicate the starting point for each partition in the output
CUDA_TRY(hipMemcpyAsync(partition_offsets,
scanned_global_partition_sizes,
num_partitions * sizeof(size_type),
hipMemcpyDeviceToHost,
s1));
// Compute the output location for each row in-place based on it's
// partition number such that each partition will be contiguous in memory
size_type * row_output_locations{row_partition_numbers};
hipLaunchKernelGGL(( compute_row_output_locations)
, dim3(grid_size), dim3(BLOCK_SIZE), num_partitions * sizeof(size_type), 0, row_output_locations,
num_rows,
num_partitions,
scanned_block_partition_sizes);
CUDA_CHECK_LAST();
// Creates the partitioned output table by scattering the rows of
// the input table to rows of the output table based on each rows
// output location
gdf_error gdf_error_code = input_table.scatter(partitioned_output,
row_output_locations);
if(GDF_SUCCESS != gdf_error_code){
return gdf_error_code;
}
CUDA_CHECK_LAST();
RMM_TRY(RMM_FREE(row_partition_numbers, 0));
RMM_TRY(RMM_FREE(block_partition_sizes, 0));
hipStreamSynchronize(s1);
hipStreamDestroy(s1);
RMM_TRY(RMM_FREE(global_partition_sizes, 0));
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the hash values of the specified rows in the input columns and
* bins the hash values into the desired number of partitions. Rearranges the input
* columns such that rows with hash values in the same bin are contiguous.
*
* @Param[in] num_input_cols The number of columns in the input columns
* @Param[in] input[] The input set of columns
* @Param[in] columns_to_hash[] Indices of the columns in the input set to hash
* @Param[in] num_cols_to_hash The number of columns to hash
* @Param[in] num_partitions The number of partitions to rearrange the input rows into
* @Param[out] partitioned_output Preallocated gdf_columns to hold the rearrangement
* of the input columns into the desired number of partitions
* @Param[out] partition_offsets Preallocated array the size of the number of
* partitions. Where partition_offsets[i] indicates the starting position
* of partition 'i'
* @Param[in] hash The hash function to use
*
* @Returns If the operation was successful, returns GDF_SUCCESS
*/
/* ----------------------------------------------------------------------------*/
gdf_error gdf_hash_partition(int num_input_cols,
gdf_column * input[],
int columns_to_hash[],
int num_cols_to_hash,
int num_partitions,
gdf_column * partitioned_output[],
int partition_offsets[],
gdf_hash_func hash)
{
// Use int until gdf API is updated to use something other than int
// for ordinal variables
using size_type = int;
// Ensure all the inputs are non-zero and not null
if((0 == num_input_cols)
|| (0 == num_cols_to_hash)
|| (0 == num_partitions)
|| (nullptr == input)
|| (nullptr == partitioned_output)
|| (nullptr == columns_to_hash)
|| (nullptr == partition_offsets))
{
return GDF_INVALID_API_CALL;
}
const gdf_size_type num_rows{input[0]->size};
// If the input is empty, return immediately
if(0 == num_rows)
{
return GDF_SUCCESS;
}
// TODO Check if the num_rows is > MAX_ROWS (MAX_INT)
// check that the columns data are not null, have matching types,
// and the same number of rows
for (size_type i = 0; i < num_input_cols; i++) {
if( (nullptr == input[i]->data)
|| (nullptr == partitioned_output[i]->data))
return GDF_DATASET_EMPTY;
if(input[i]->dtype != partitioned_output[i]->dtype)
return GDF_PARTITION_DTYPE_MISMATCH;
if((num_rows != input[i]->size)
|| (num_rows != partitioned_output[i]->size))
return GDF_COLUMN_SIZE_MISMATCH;
}
PUSH_RANGE("LIBGDF_HASH_PARTITION", PARTITION_COLOR);
// Wrap input and output columns in gdf_table
std::unique_ptr< const gdf_table<size_type> > input_table{new gdf_table<size_type>(num_input_cols, input)};
std::unique_ptr< gdf_table<size_type> > output_table{new gdf_table<size_type>(num_input_cols, partitioned_output)};
// Create vector of pointers to columns that will be hashed
std::vector<gdf_column *> gdf_columns_to_hash(num_cols_to_hash);
for(size_type i = 0; i < num_cols_to_hash; ++i)
{
gdf_columns_to_hash[i] = input[columns_to_hash[i]];
}
// Create a separate table of the columns to be hashed
std::unique_ptr< const gdf_table<size_type> > table_to_hash {new gdf_table<size_type>(num_cols_to_hash,
gdf_columns_to_hash.data())};
gdf_error gdf_status{GDF_SUCCESS};
switch(hash)
{
case GDF_HASH_MURMUR3:
{
gdf_status = hash_partition_gdf_table<MurmurHash3_32>(*input_table,
*table_to_hash,
num_partitions,
partition_offsets,
*output_table);
break;
}
case GDF_HASH_IDENTITY:
{
gdf_status = hash_partition_gdf_table<IdentityHash>(*input_table,
*table_to_hash,
num_partitions,
partition_offsets,
*output_table);
break;
}
default:
gdf_status = GDF_INVALID_HASH_FUNCTION;
}
POP_RANGE();
return gdf_status;
}
| 0da7d193913b4aa74b98dce90ceeffe2e4c7a158.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/tabulate.h>
#include "cudf.h"
#include "rmm/rmm.h"
#include "utilities/error_utils.h"
#include "join/joining.h"
#include "dataframe/cudf_table.cuh"
#include "hash/hash_functions.cuh"
#include "utilities/int_fastdiv.h"
#include "utilities/nvtx/nvtx_utils.h"
constexpr int BLOCK_SIZE = 256;
constexpr int ROWS_PER_THREAD = 1;
/* --------------------------------------------------------------------------*/
/**
* @Synopsis This function determines if a number is a power of 2.
*
* @Param number The number to check.
*
* @Returns True if the number is a power of 2.
*/
/* ----------------------------------------------------------------------------*/
template <typename T>
bool is_power_two( T number )
{
return (0 == (number & (number - 1)));
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis This functor is used to compute the hash value for the rows
* of a gdf_table
*/
/* ----------------------------------------------------------------------------*/
template <template <typename> class hash_function,
typename size_type>
struct row_hasher
{
row_hasher(gdf_table<size_type> const & table_to_hash)
: the_table{table_to_hash}
{}
__device__
hash_value_type operator()(size_type row_index) const
{
return the_table.template hash_row<hash_function>(row_index);
}
gdf_table<size_type> const & the_table;
};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Computes the hash value of each row in the input set of columns.
*
* @Param num_cols The number of columns in the input set
* @Param input The list of columns whose rows will be hashed
* @Param hash The hash function to use
* @Param output The hash value of each row of the input
*
* @Returns
*/
/* ----------------------------------------------------------------------------*/
gdf_error gdf_hash(int num_cols, gdf_column **input, gdf_hash_func hash, gdf_column *output)
{
// Ensure inputs aren't null
if((0 == num_cols)
|| (nullptr == input)
|| (nullptr == output))
{
return GDF_DATASET_EMPTY;
}
// check that the output dtype is int32
// TODO: do we need to support int64 as well?
if (output->dtype != GDF_INT32)
{
return GDF_UNSUPPORTED_DTYPE;
}
// Return immediately for empty input/output
if(nullptr != input[0]) {
if(0 == input[0]->size){
return GDF_SUCCESS;
}
}
if(0 == output->size) {
return GDF_SUCCESS;
}
else if(nullptr == output->data) {
return GDF_DATASET_EMPTY;
}
using size_type = int64_t;
// Wrap input columns in gdf_table
std::unique_ptr< gdf_table<size_type> > input_table{new gdf_table<size_type>(num_cols, input)};
const size_type num_rows = input_table->get_column_length();
// Wrap output buffer in Thrust device_ptr
hash_value_type * p_output = static_cast<hash_value_type*>(output->data);
thrust::device_ptr<hash_value_type> row_hash_values = thrust::device_pointer_cast(p_output);
// Compute the hash value for each row depending on the specified hash function
switch(hash)
{
case GDF_HASH_MURMUR3:
{
thrust::tabulate(rmm::exec_policy()->on(0),
row_hash_values,
row_hash_values + num_rows,
row_hasher<MurmurHash3_32,size_type>(*input_table));
break;
}
case GDF_HASH_IDENTITY:
{
thrust::tabulate(rmm::exec_policy()->on(0),
row_hash_values,
row_hash_values + num_rows,
row_hasher<IdentityHash,size_type>(*input_table));
break;
}
default:
return GDF_INVALID_HASH_FUNCTION;
}
CUDA_CHECK_LAST();
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Functor to map a hash value to a particular 'bin' or partition number
* that uses the FAST modulo operation implemented in int_fastdiv from here:
* https://github.com/milakov/int_fastdiv
*/
/* ----------------------------------------------------------------------------*/
template <typename hash_value_t,
typename size_type,
typename output_type>
struct fast_modulo_partitioner
{
fast_modulo_partitioner(int num_partitions) : fast_divisor{num_partitions}{}
__host__ __device__
output_type operator()(hash_value_t hash_value) const
{
// Using int_fastdiv casts 'hash_value' to an int, which can
// result in negative modulos, requiring taking the absolute value
// Because of the casting it can also return results that are not
// the same as using the normal % operator
output_type partition_number = std::abs(hash_value % fast_divisor);
return partition_number;
}
const int_fastdiv fast_divisor;
};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Functor to map a hash value to a particular 'bin' or partition number
* that uses the modulo operation.
*/
/* ----------------------------------------------------------------------------*/
template <typename hash_value_t,
typename size_type,
typename output_type>
struct modulo_partitioner
{
modulo_partitioner(size_type num_partitions) : divisor{num_partitions}{}
__host__ __device__
output_type operator()(hash_value_t hash_value) const
{
return hash_value % divisor;
}
const size_type divisor;
};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Functor to map a hash value to a particular 'bin' or partition number
* that uses bitshifts. Only works when num_partitions is a power of 2.
*
* For n % d, if d is a power of two, then it can be computed more efficiently via
* a single bitwise AND as:
* n & (d - 1)
*/
/* ----------------------------------------------------------------------------*/
template <typename hash_value_t,
typename size_type,
typename output_type>
struct bitwise_partitioner
{
bitwise_partitioner(size_type num_partitions) : divisor{(num_partitions - 1)}
{
assert( is_power_two(num_partitions) );
}
__host__ __device__
output_type operator()(hash_value_t hash_value) const
{
return hash_value & (divisor);
}
const size_type divisor;
};
/* --------------------------------------------------------------------------*/
/**
* @brief Computes which partition each row of a gdf_table will belong to based
on hashing each row, and applying a partition function to the hash value.
Records the size of each partition for each thread block as well as the global
size of each partition across all thread blocks.
*
* @Param[in] the_table The table whose rows will be partitioned
* @Param[in] num_rows The number of rows in the table
* @Param[in] num_partitions The number of partitions to divide the rows into
* @Param[in] the_partitioner The functor that maps a rows hash value to a partition number
* @Param[out] row_partition_numbers Array that holds which partition each row belongs to
* @Param[out] block_partition_sizes Array that holds the size of each partition for each block,
* i.e., { {block0 partition0 size, block1 partition0 size, ...},
{block0 partition1 size, block1 partition1 size, ...},
...
{block0 partition(num_partitions-1) size, block1 partition(num_partitions -1) size, ...} }
* @Param[out] global_partition_sizes The number of rows in each partition.
*/
/* ----------------------------------------------------------------------------*/
template <template <typename> class hash_function,
typename partitioner_type,
typename size_type>
__global__
void compute_row_partition_numbers(gdf_table<size_type> const & the_table,
const size_type num_rows,
const size_type num_partitions,
const partitioner_type the_partitioner,
size_type * row_partition_numbers,
size_type * block_partition_sizes,
size_type * global_partition_sizes)
{
// Accumulate histogram of the size of each partition in shared memory
extern __shared__ size_type shared_partition_sizes[];
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize local histogram
size_type partition_number = threadIdx.x;
while(partition_number < num_partitions)
{
shared_partition_sizes[partition_number] = 0;
partition_number += blockDim.x;
}
__syncthreads();
// Compute the hash value for each row, store it to the array of hash values
// and compute the partition to which the hash value belongs and increment
// the shared memory counter for that partition
while( row_number < num_rows)
{
// See here why template disambiguator is required:
// https://stackoverflow.com/questions/4077110/template-disambiguator
const hash_value_type row_hash_value = the_table.template hash_row<hash_function>(row_number);
const size_type partition_number = the_partitioner(row_hash_value);
row_partition_numbers[row_number] = partition_number;
atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1));
row_number += blockDim.x * gridDim.x;
}
__syncthreads();
// Flush shared memory histogram to global memory
partition_number = threadIdx.x;
while(partition_number < num_partitions)
{
const size_type block_partition_size = shared_partition_sizes[partition_number];
// Update global size of each partition
atomicAdd(&global_partition_sizes[partition_number], block_partition_size);
// Record the size of this partition in this block
const size_type write_location = partition_number * gridDim.x + blockIdx.x;
block_partition_sizes[write_location] = block_partition_size;
partition_number += blockDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Given an array of partition numbers, computes the final output location
for each element in the output such that all rows with the same partition are
contiguous in memory.
*
* @Param row_partition_numbers The array that records the partition number for each row
* @Param num_rows The number of rows
* @Param num_partitions THe number of partitions
* @Param[out] block_partition_offsets Array that holds the offset of each partition for each thread block,
* i.e., { {block0 partition0 offset, block1 partition0 offset, ...},
{block0 partition1 offset, block1 partition1 offset, ...},
...
{block0 partition(num_partitions-1) offset, block1 partition(num_partitions -1) offset, ...} }
*/
/* ----------------------------------------------------------------------------*/
template <typename size_type>
__global__
void compute_row_output_locations(size_type * row_partition_numbers,
const size_type num_rows,
const size_type num_partitions,
size_type * block_partition_offsets)
{
// Shared array that holds the offset of this blocks partitions in
// global memory
extern __shared__ size_type shared_partition_offsets[];
// Initialize array of this blocks offsets from global array
size_type partition_number= threadIdx.x;
while(partition_number < num_partitions)
{
shared_partition_offsets[partition_number] = block_partition_offsets[partition_number * gridDim.x + blockIdx.x];
partition_number += blockDim.x;
}
__syncthreads();
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Get each row's partition number, and get it's output location by
// incrementing block's offset counter for that partition number
// and store the row's output location in-place
while( row_number < num_rows )
{
// Get partition number of this row
const size_type partition_number = row_partition_numbers[row_number];
// Get output location based on partition number by incrementing the corresponding
// partition offset for this block
const size_type row_output_location = atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1));
// Store the row's output location in-place
row_partition_numbers[row_number] = row_output_location;
row_number += blockDim.x * gridDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Partitions an input gdf_table into a specified number of partitions.
* A hash value is computed for each row in a sub-set of the columns of the
* input table. Each hash value is placed in a bin from [0, number of partitions).
* A copy of the input table is created where the rows are rearranged such that
* rows with hash values in the same bin are contiguous.
*
* @Param[in] input_table The table to partition
* @Param[in] table_to_hash Sub-table of the input table with only the columns
* that will be hashed
* @Param[in] num_partitions The number of partitions that table will be rearranged into
* @Param[out] partition_offsets Preallocated array the size of the number of
* partitions. Where partition_offsets[i] indicates the starting position
* of partition 'i'
* @Param[out] partitioned_output Preallocated gdf_columns to hold the rearrangement
* of the input columns into the desired number of partitions
* @tparam hash_function The hash function that will be used to hash the rows
*/
/* ----------------------------------------------------------------------------*/
template < template <typename> class hash_function,
typename size_type>
gdf_error hash_partition_gdf_table(gdf_table<size_type> const & input_table,
gdf_table<size_type> const & table_to_hash,
const size_type num_partitions,
size_type * partition_offsets,
gdf_table<size_type> & partitioned_output)
{
const size_type num_rows = table_to_hash.get_column_length();
constexpr int rows_per_block = BLOCK_SIZE * ROWS_PER_THREAD;
const size_type grid_size = (num_rows + rows_per_block - 1) / rows_per_block;
// Allocate array to hold which partition each row belongs to
size_type * row_partition_numbers{nullptr};
RMM_TRY( RMM_ALLOC((void**)&row_partition_numbers, num_rows * sizeof(hash_value_type), 0) ); // TODO: non-default stream?
// Array to hold the size of each partition computed by each block
// i.e., { {block0 partition0 size, block1 partition0 size, ...},
// {block0 partition1 size, block1 partition1 size, ...},
// ...
// {block0 partition(num_partitions-1) size, block1 partition(num_partitions -1) size, ...} }
size_type * block_partition_sizes{nullptr};
RMM_TRY(RMM_ALLOC((void**)&block_partition_sizes, (grid_size * num_partitions) * sizeof(size_type), 0) );
// Holds the total number of rows in each partition
size_type * global_partition_sizes{nullptr};
RMM_TRY( RMM_ALLOC((void**)&global_partition_sizes, num_partitions * sizeof(size_type), 0) );
CUDA_TRY( cudaMemsetAsync(global_partition_sizes, 0, num_partitions * sizeof(size_type)) );
// If the number of partitions is a power of two, we can compute the partition
// number of each row more efficiently with bitwise operations
if( true == is_power_two(num_partitions) )
{
// Determines how the mapping between hash value and partition number is computed
using partitioner_type = bitwise_partitioner<hash_value_type, size_type, size_type>;
// Computes which partition each row belongs to by hashing the row and performing
// a partitioning operator on the hash value. Also computes the number of
// rows in each partition both for each thread block as well as across all blocks
compute_row_partition_numbers<hash_function>
<<<grid_size, BLOCK_SIZE, num_partitions * sizeof(size_type)>>>(table_to_hash,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers,
block_partition_sizes,
global_partition_sizes);
}
else
{
// Determines how the mapping between hash value and partition number is computed
using partitioner_type = modulo_partitioner<hash_value_type, size_type, size_type>;
// Computes which partition each row belongs to by hashing the row and performing
// a partitioning operator on the hash value. Also computes the number of
// rows in each partition both for each thread block as well as across all blocks
compute_row_partition_numbers<hash_function>
<<<grid_size, BLOCK_SIZE, num_partitions * sizeof(size_type)>>>(table_to_hash,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers,
block_partition_sizes,
global_partition_sizes);
}
CUDA_CHECK_LAST();
// Compute exclusive scan of all blocks' partition sizes in-place to determine
// the starting point for each blocks portion of each partition in the output
size_type * scanned_block_partition_sizes{block_partition_sizes};
thrust::exclusive_scan(rmm::exec_policy()->on(0),
block_partition_sizes,
block_partition_sizes + (grid_size * num_partitions),
scanned_block_partition_sizes);
CUDA_CHECK_LAST();
// Compute exclusive scan of size of each partition to determine offset location
// of each partition in final output. This can be done independently on a separate stream
cudaStream_t s1{};
cudaStreamCreate(&s1);
size_type * scanned_global_partition_sizes{global_partition_sizes};
thrust::exclusive_scan(rmm::exec_policy(s1)->on(s1),
global_partition_sizes,
global_partition_sizes + num_partitions,
scanned_global_partition_sizes);
CUDA_CHECK_LAST();
// Copy the result of the exlusive scan to the output offsets array
// to indicate the starting point for each partition in the output
CUDA_TRY(cudaMemcpyAsync(partition_offsets,
scanned_global_partition_sizes,
num_partitions * sizeof(size_type),
cudaMemcpyDeviceToHost,
s1));
// Compute the output location for each row in-place based on it's
// partition number such that each partition will be contiguous in memory
size_type * row_output_locations{row_partition_numbers};
compute_row_output_locations
<<<grid_size, BLOCK_SIZE, num_partitions * sizeof(size_type)>>>(row_output_locations,
num_rows,
num_partitions,
scanned_block_partition_sizes);
CUDA_CHECK_LAST();
// Creates the partitioned output table by scattering the rows of
// the input table to rows of the output table based on each rows
// output location
gdf_error gdf_error_code = input_table.scatter(partitioned_output,
row_output_locations);
if(GDF_SUCCESS != gdf_error_code){
return gdf_error_code;
}
CUDA_CHECK_LAST();
RMM_TRY(RMM_FREE(row_partition_numbers, 0));
RMM_TRY(RMM_FREE(block_partition_sizes, 0));
cudaStreamSynchronize(s1);
cudaStreamDestroy(s1);
RMM_TRY(RMM_FREE(global_partition_sizes, 0));
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the hash values of the specified rows in the input columns and
* bins the hash values into the desired number of partitions. Rearranges the input
* columns such that rows with hash values in the same bin are contiguous.
*
* @Param[in] num_input_cols The number of columns in the input columns
* @Param[in] input[] The input set of columns
* @Param[in] columns_to_hash[] Indices of the columns in the input set to hash
* @Param[in] num_cols_to_hash The number of columns to hash
* @Param[in] num_partitions The number of partitions to rearrange the input rows into
* @Param[out] partitioned_output Preallocated gdf_columns to hold the rearrangement
* of the input columns into the desired number of partitions
* @Param[out] partition_offsets Preallocated array the size of the number of
* partitions. Where partition_offsets[i] indicates the starting position
* of partition 'i'
* @Param[in] hash The hash function to use
*
* @Returns If the operation was successful, returns GDF_SUCCESS
*/
/* ----------------------------------------------------------------------------*/
gdf_error gdf_hash_partition(int num_input_cols,
gdf_column * input[],
int columns_to_hash[],
int num_cols_to_hash,
int num_partitions,
gdf_column * partitioned_output[],
int partition_offsets[],
gdf_hash_func hash)
{
// Use int until gdf API is updated to use something other than int
// for ordinal variables
using size_type = int;
// Ensure all the inputs are non-zero and not null
if((0 == num_input_cols)
|| (0 == num_cols_to_hash)
|| (0 == num_partitions)
|| (nullptr == input)
|| (nullptr == partitioned_output)
|| (nullptr == columns_to_hash)
|| (nullptr == partition_offsets))
{
return GDF_INVALID_API_CALL;
}
const gdf_size_type num_rows{input[0]->size};
// If the input is empty, return immediately
if(0 == num_rows)
{
return GDF_SUCCESS;
}
// TODO Check if the num_rows is > MAX_ROWS (MAX_INT)
// check that the columns data are not null, have matching types,
// and the same number of rows
for (size_type i = 0; i < num_input_cols; i++) {
if( (nullptr == input[i]->data)
|| (nullptr == partitioned_output[i]->data))
return GDF_DATASET_EMPTY;
if(input[i]->dtype != partitioned_output[i]->dtype)
return GDF_PARTITION_DTYPE_MISMATCH;
if((num_rows != input[i]->size)
|| (num_rows != partitioned_output[i]->size))
return GDF_COLUMN_SIZE_MISMATCH;
}
PUSH_RANGE("LIBGDF_HASH_PARTITION", PARTITION_COLOR);
// Wrap input and output columns in gdf_table
std::unique_ptr< const gdf_table<size_type> > input_table{new gdf_table<size_type>(num_input_cols, input)};
std::unique_ptr< gdf_table<size_type> > output_table{new gdf_table<size_type>(num_input_cols, partitioned_output)};
// Create vector of pointers to columns that will be hashed
std::vector<gdf_column *> gdf_columns_to_hash(num_cols_to_hash);
for(size_type i = 0; i < num_cols_to_hash; ++i)
{
gdf_columns_to_hash[i] = input[columns_to_hash[i]];
}
// Create a separate table of the columns to be hashed
std::unique_ptr< const gdf_table<size_type> > table_to_hash {new gdf_table<size_type>(num_cols_to_hash,
gdf_columns_to_hash.data())};
gdf_error gdf_status{GDF_SUCCESS};
switch(hash)
{
case GDF_HASH_MURMUR3:
{
gdf_status = hash_partition_gdf_table<MurmurHash3_32>(*input_table,
*table_to_hash,
num_partitions,
partition_offsets,
*output_table);
break;
}
case GDF_HASH_IDENTITY:
{
gdf_status = hash_partition_gdf_table<IdentityHash>(*input_table,
*table_to_hash,
num_partitions,
partition_offsets,
*output_table);
break;
}
default:
gdf_status = GDF_INVALID_HASH_FUNCTION;
}
POP_RANGE();
return gdf_status;
}
|
388a32ed76c5d39566221aa86e0fd62b86dfd27d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pairwise_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SimilarityProcess(const int nthreads, Dtype* similarity, Dtype label_dim) {
CUDA_KERNEL_LOOP(index, nthreads) {
if((similarity[index] < 0) || (similarity[index] >= label_dim)){
//unknown label
similarity[index] = Dtype(-1.0);
}
else if(similarity[index] > 0){
//similar label
similarity[index] = Dtype(1.0);
}
}
}
template <typename Dtype>
__global__ void ContinousSimilarityProcess(const int nthreads, const Dtype* similarity, const Dtype* similarity1, Dtype* similarity2, Dtype* sim, const int outer_num) {
CUDA_KERNEL_LOOP(index, nthreads) {
int data_id1 = index / outer_num;
int data_id2 = index % outer_num;
sim[index] = similarity[index] * similarity[index] / (similarity1[outer_num*data_id1+data_id1] * similarity2[outer_num*data_id2+data_id2]);
if(sim[index] == 0){
sim[index] = 0.25;
}
}
}
template <typename Dtype>
__global__ void RemoveZero(const int nthreads, Dtype* similarity1, Dtype* similarity2) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(similarity1[index] == 0){
similarity1[index] = 1.0;
}
if(similarity2[index] == 0){
similarity2[index] = 1.0;
}
}
}
template <typename Dtype>
__global__ void PairwiseLossForwardGPU(const int nthreads, const int num, const Dtype* similarity,
const Dtype* exp_product, const Dtype* product, const Dtype threshold, Dtype* count, Dtype* loss_data, const Dtype class_num) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(similarity[index] >= 0){
count[index] = Dtype(1.0);
if((threshold >= 0) && (product[index] >= threshold)){
loss_data[index] = product[index] * (1 - (similarity[index] > 0));
}
else{
loss_data[index] = log(1 + exp_product[index]) - (similarity[index] > 0) * product[index];
}
if(similarity[index] > 0){
loss_data[index] = loss_data[index] * class_num;
count[index] *= class_num;
}
}
else{
count[index] = Dtype(0.0);
loss_data[index] = Dtype(0.0);
}
}
}
template <typename Dtype>
void PairwiseLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Dtype* similarity = loss_.mutable_gpu_data();
Dtype* dot_product = product_.mutable_gpu_data();
Dtype* exp_product = product_.mutable_gpu_diff();
Dtype* loss_data = loss_.mutable_gpu_diff();
Dtype* count = count_.mutable_gpu_data();
Dtype* similarity1 = own_similarity_.mutable_gpu_data();
Dtype* similarity2 = own_similarity_.mutable_gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_data1 = bottom[2]->gpu_data();
Dtype* label = bottom[1]->mutable_gpu_data();
Dtype* label1 = bottom[3]->mutable_gpu_data();
int nthreads = outer_num_ * outer_num_;
Dtype loss, count_num;
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_,
Dtype(1.0), label, label1, Dtype(0.0), similarity);
if (continous_similarity_){
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_,
Dtype(1.0), label, label, Dtype(0.0), similarity1);
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_,
Dtype(1.0), label1, label1, Dtype(0.0), similarity2);
hipLaunchKernelGGL(( RemoveZero<Dtype>), dim3(CAFFE_GET_BLOCKS(own_similarity_.count())),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, own_similarity_.count(), similarity1, similarity2);
/*caffe_gpu_memcpy(nthreads*sizeof(Dtype), similarity, own_similarity_.mutable_cpu_data());
for(int i = 0; i < outer_num_; i++){
LOG(INFO) << own_similarity_.cpu_data()[i];
}
caffe_gpu_memcpy(nthreads*sizeof(Dtype), similarity1, own_similarity_.mutable_cpu_data());
for(int i = 0; i < outer_num_; i++){
LOG(INFO) << own_similarity_.cpu_data()[i*outer_num_+i];
}
caffe_gpu_memcpy(nthreads*sizeof(Dtype), similarity2, own_similarity_.mutable_cpu_data());
for(int i = 0; i < outer_num_; i++){
LOG(INFO) << own_similarity_.cpu_data()[i*outer_num_+i];
}*/
hipLaunchKernelGGL(( ContinousSimilarityProcess<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, similarity, similarity1, similarity2, loss_data, outer_num_);
caffe_gpu_memcpy(nthreads*sizeof(Dtype), loss_data, similarity1);
/*caffe_gpu_memcpy(nthreads*sizeof(Dtype), similarity1, own_similarity_.mutable_cpu_data());
for(int i = 0; i < outer_num_; i++){
LOG(INFO) << own_similarity_.cpu_data()[i];
}*/
}
hipLaunchKernelGGL(( SimilarityProcess<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, similarity, label_dim_);
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, inner_num_,
Dtype(1.0), bottom_data, bottom_data1, Dtype(0.0), dot_product);
caffe_gpu_scal(outer_num_ * outer_num_, sigmoid_param_, dot_product);
caffe_gpu_exp(outer_num_ * outer_num_, dot_product, exp_product);
hipLaunchKernelGGL(( PairwiseLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, outer_num_, similarity, exp_product,
dot_product, l_threshold_, count, loss_data, class_num_);
caffe_gpu_asum(nthreads, loss_data, &loss);
caffe_gpu_asum(nthreads, count, &count_num);
loss /= (count_num > 0 ? count_num : Dtype(1));
LOG(INFO) << "L loss:" << loss;
loss = loss * (l_lambda_ > 0);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void PairwiseLossBackwardGPU(const int nthreads, const int num,
const Dtype* similarity, const Dtype* exp_product, Dtype* count, Dtype* diff, const Dtype class_num) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(similarity[index] >= 0){
diff[index] = 2 * (
1 / (1 + 1 / exp_product[index]) -
(similarity[index] > 0)
);
count[index] = Dtype(1.0);
if(similarity[index] > 0){
diff[index] = diff[index] * class_num;
count[index] *= class_num;
}
}
else{
diff[index] = Dtype(0.0);
count[index] = Dtype(0.0);
}
}
}
template <typename Dtype>
void PairwiseLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* diff = count_.mutable_gpu_diff();
Dtype* count = count_.mutable_gpu_data();
const Dtype* similarity = loss_.gpu_data();
const Dtype* exp_product = product_.gpu_diff();
const Dtype* similarity1 = own_similarity_.gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* bottom_diff1 = bottom[2]->mutable_gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_data1 = bottom[2]->gpu_data();
int nthreads = outer_num_ * outer_num_;
//calculate diff
hipLaunchKernelGGL(( PairwiseLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, outer_num_, similarity,
exp_product, count, diff, class_num_);
if(continous_similarity_){
caffe_gpu_mul(nthreads, diff, similarity1, diff);
caffe_gpu_scal(nthreads, Dtype(4), diff);
}
//copy to bottom_diff
Dtype count_num;
caffe_gpu_asum(nthreads, count, &count_num);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_, inner_num_, outer_num_,
l_lambda_ / (count_num > 0 ? count_num : Dtype(1)), diff, bottom_data1,
Dtype(0.0), bottom_diff);
caffe_gpu_gemm(CblasTrans, CblasNoTrans, outer_num_, inner_num_, outer_num_,
l_lambda_ / (count_num > 0 ? count_num : Dtype(1)), diff, bottom_data,
Dtype(0.0), bottom_diff1);
caffe_gpu_scal(outer_num_, sigmoid_param_, bottom_diff);
caffe_gpu_scal(outer_num_, sigmoid_param_, bottom_diff1);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PairwiseLossLayer);
} // namespace caffe
| 388a32ed76c5d39566221aa86e0fd62b86dfd27d.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/pairwise_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SimilarityProcess(const int nthreads, Dtype* similarity, Dtype label_dim) {
CUDA_KERNEL_LOOP(index, nthreads) {
if((similarity[index] < 0) || (similarity[index] >= label_dim)){
//unknown label
similarity[index] = Dtype(-1.0);
}
else if(similarity[index] > 0){
//similar label
similarity[index] = Dtype(1.0);
}
}
}
template <typename Dtype>
__global__ void ContinousSimilarityProcess(const int nthreads, const Dtype* similarity, const Dtype* similarity1, Dtype* similarity2, Dtype* sim, const int outer_num) {
CUDA_KERNEL_LOOP(index, nthreads) {
int data_id1 = index / outer_num;
int data_id2 = index % outer_num;
sim[index] = similarity[index] * similarity[index] / (similarity1[outer_num*data_id1+data_id1] * similarity2[outer_num*data_id2+data_id2]);
if(sim[index] == 0){
sim[index] = 0.25;
}
}
}
template <typename Dtype>
__global__ void RemoveZero(const int nthreads, Dtype* similarity1, Dtype* similarity2) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(similarity1[index] == 0){
similarity1[index] = 1.0;
}
if(similarity2[index] == 0){
similarity2[index] = 1.0;
}
}
}
template <typename Dtype>
__global__ void PairwiseLossForwardGPU(const int nthreads, const int num, const Dtype* similarity,
const Dtype* exp_product, const Dtype* product, const Dtype threshold, Dtype* count, Dtype* loss_data, const Dtype class_num) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(similarity[index] >= 0){
count[index] = Dtype(1.0);
if((threshold >= 0) && (product[index] >= threshold)){
loss_data[index] = product[index] * (1 - (similarity[index] > 0));
}
else{
loss_data[index] = log(1 + exp_product[index]) - (similarity[index] > 0) * product[index];
}
if(similarity[index] > 0){
loss_data[index] = loss_data[index] * class_num;
count[index] *= class_num;
}
}
else{
count[index] = Dtype(0.0);
loss_data[index] = Dtype(0.0);
}
}
}
template <typename Dtype>
void PairwiseLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
Dtype* similarity = loss_.mutable_gpu_data();
Dtype* dot_product = product_.mutable_gpu_data();
Dtype* exp_product = product_.mutable_gpu_diff();
Dtype* loss_data = loss_.mutable_gpu_diff();
Dtype* count = count_.mutable_gpu_data();
Dtype* similarity1 = own_similarity_.mutable_gpu_data();
Dtype* similarity2 = own_similarity_.mutable_gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_data1 = bottom[2]->gpu_data();
Dtype* label = bottom[1]->mutable_gpu_data();
Dtype* label1 = bottom[3]->mutable_gpu_data();
int nthreads = outer_num_ * outer_num_;
Dtype loss, count_num;
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_,
Dtype(1.0), label, label1, Dtype(0.0), similarity);
if (continous_similarity_){
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_,
Dtype(1.0), label, label, Dtype(0.0), similarity1);
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, label_dim_,
Dtype(1.0), label1, label1, Dtype(0.0), similarity2);
RemoveZero<Dtype><<<CAFFE_GET_BLOCKS(own_similarity_.count()),
CAFFE_CUDA_NUM_THREADS>>>(own_similarity_.count(), similarity1, similarity2);
/*caffe_gpu_memcpy(nthreads*sizeof(Dtype), similarity, own_similarity_.mutable_cpu_data());
for(int i = 0; i < outer_num_; i++){
LOG(INFO) << own_similarity_.cpu_data()[i];
}
caffe_gpu_memcpy(nthreads*sizeof(Dtype), similarity1, own_similarity_.mutable_cpu_data());
for(int i = 0; i < outer_num_; i++){
LOG(INFO) << own_similarity_.cpu_data()[i*outer_num_+i];
}
caffe_gpu_memcpy(nthreads*sizeof(Dtype), similarity2, own_similarity_.mutable_cpu_data());
for(int i = 0; i < outer_num_; i++){
LOG(INFO) << own_similarity_.cpu_data()[i*outer_num_+i];
}*/
ContinousSimilarityProcess<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, similarity, similarity1, similarity2, loss_data, outer_num_);
caffe_gpu_memcpy(nthreads*sizeof(Dtype), loss_data, similarity1);
/*caffe_gpu_memcpy(nthreads*sizeof(Dtype), similarity1, own_similarity_.mutable_cpu_data());
for(int i = 0; i < outer_num_; i++){
LOG(INFO) << own_similarity_.cpu_data()[i];
}*/
}
SimilarityProcess<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, similarity, label_dim_);
caffe_gpu_gemm(CblasNoTrans, CblasTrans, outer_num_, outer_num_, inner_num_,
Dtype(1.0), bottom_data, bottom_data1, Dtype(0.0), dot_product);
caffe_gpu_scal(outer_num_ * outer_num_, sigmoid_param_, dot_product);
caffe_gpu_exp(outer_num_ * outer_num_, dot_product, exp_product);
PairwiseLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, outer_num_, similarity, exp_product,
dot_product, l_threshold_, count, loss_data, class_num_);
caffe_gpu_asum(nthreads, loss_data, &loss);
caffe_gpu_asum(nthreads, count, &count_num);
loss /= (count_num > 0 ? count_num : Dtype(1));
LOG(INFO) << "L loss:" << loss;
loss = loss * (l_lambda_ > 0);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void PairwiseLossBackwardGPU(const int nthreads, const int num,
const Dtype* similarity, const Dtype* exp_product, Dtype* count, Dtype* diff, const Dtype class_num) {
CUDA_KERNEL_LOOP(index, nthreads) {
if(similarity[index] >= 0){
diff[index] = 2 * (
1 / (1 + 1 / exp_product[index]) -
(similarity[index] > 0)
);
count[index] = Dtype(1.0);
if(similarity[index] > 0){
diff[index] = diff[index] * class_num;
count[index] *= class_num;
}
}
else{
diff[index] = Dtype(0.0);
count[index] = Dtype(0.0);
}
}
}
template <typename Dtype>
void PairwiseLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* diff = count_.mutable_gpu_diff();
Dtype* count = count_.mutable_gpu_data();
const Dtype* similarity = loss_.gpu_data();
const Dtype* exp_product = product_.gpu_diff();
const Dtype* similarity1 = own_similarity_.gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* bottom_diff1 = bottom[2]->mutable_gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_data1 = bottom[2]->gpu_data();
int nthreads = outer_num_ * outer_num_;
//calculate diff
PairwiseLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, outer_num_, similarity,
exp_product, count, diff, class_num_);
if(continous_similarity_){
caffe_gpu_mul(nthreads, diff, similarity1, diff);
caffe_gpu_scal(nthreads, Dtype(4), diff);
}
//copy to bottom_diff
Dtype count_num;
caffe_gpu_asum(nthreads, count, &count_num);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, outer_num_, inner_num_, outer_num_,
l_lambda_ / (count_num > 0 ? count_num : Dtype(1)), diff, bottom_data1,
Dtype(0.0), bottom_diff);
caffe_gpu_gemm(CblasTrans, CblasNoTrans, outer_num_, inner_num_, outer_num_,
l_lambda_ / (count_num > 0 ? count_num : Dtype(1)), diff, bottom_data,
Dtype(0.0), bottom_diff1);
caffe_gpu_scal(outer_num_, sigmoid_param_, bottom_diff);
caffe_gpu_scal(outer_num_, sigmoid_param_, bottom_diff1);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(PairwiseLossLayer);
} // namespace caffe
|
0dac337952de19baf4b9a9c864e7ee5263c80320.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <time.h>
#define TAM 5
void llenarVector(int *A) {
//srand(time(NULL));
for(int i=0; i<TAM; i++) {
A[i]=rand();
}
}
__global__ void sumaVectores(int *A, int *B, int *C) {
int i = threadIdx.x+blockDim.x * blockIdx.x;
if(i<TAM)
C[i] = A[i]+B[i];
}
void printVector(int *A) {
printf("(");
for(int i=0; i<TAM; i++) {
printf("%d ", A[i]);
if(i!=TAM-1) {
printf(", ");
}
}
printf(")\n");
}
int main(){
int size = TAM*sizeof(int);
int *A = (int *) malloc(size);
int *B = (int *) malloc(size);
int *C = (int *) malloc(size);
int *d_A, *d_B, *d_C;
hipError_t err = hipMalloc((void**)&d_A,size);
if (err != hipSuccess) {
printf("Error %s", hipGetErrorString( err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void**)&d_B,size);
if (err != hipSuccess) {
printf("Error %s", hipGetErrorString( err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void**)&d_C,size);
if (err != hipSuccess) {
printf("Error %s", hipGetErrorString( err));
exit(EXIT_FAILURE);
}
llenarVector(A);
// printVector(A);
llenarVector(B);
// printVector(B);
hipMemcpy(d_A,A,size,hipMemcpyHostToDevice);
hipMemcpy(d_B,B,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sumaVectores), dim3(ceil(TAM/64)),dim3(64), 0, 0, d_A,d_B,d_C);
hipMemcpy(C, d_C, size, hipMemcpyDeviceToHost );
printVector(C);
//printf( "c[0] = %d\n",0,C[0] );
//printf( "c[%d] = %d\n",TAM-1, C[TAM-1] );
err = hipFree(d_A);
if (err != hipSuccess) {
printf("Error %s", hipGetErrorString( err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess) {
printf("Error %s", hipGetErrorString( err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess) {
printf("Error %s", hipGetErrorString( err));
exit(EXIT_FAILURE);
}
free(A);
free(B);
free(C);
return 0;
}
| 0dac337952de19baf4b9a9c864e7ee5263c80320.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <time.h>
#define TAM 5
void llenarVector(int *A) {
//srand(time(NULL));
for(int i=0; i<TAM; i++) {
A[i]=rand();
}
}
__global__ void sumaVectores(int *A, int *B, int *C) {
int i = threadIdx.x+blockDim.x * blockIdx.x;
if(i<TAM)
C[i] = A[i]+B[i];
}
void printVector(int *A) {
printf("(");
for(int i=0; i<TAM; i++) {
printf("%d ", A[i]);
if(i!=TAM-1) {
printf(", ");
}
}
printf(")\n");
}
int main(){
int size = TAM*sizeof(int);
int *A = (int *) malloc(size);
int *B = (int *) malloc(size);
int *C = (int *) malloc(size);
int *d_A, *d_B, *d_C;
cudaError_t err = cudaMalloc((void**)&d_A,size);
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString( err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_B,size);
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString( err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_C,size);
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString( err));
exit(EXIT_FAILURE);
}
llenarVector(A);
// printVector(A);
llenarVector(B);
// printVector(B);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
sumaVectores<<<ceil(TAM/64),64>>>(d_A,d_B,d_C);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost );
printVector(C);
//printf( "c[0] = %d\n",0,C[0] );
//printf( "c[%d] = %d\n",TAM-1, C[TAM-1] );
err = cudaFree(d_A);
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString( err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString( err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess) {
printf("Error %s", cudaGetErrorString( err));
exit(EXIT_FAILURE);
}
free(A);
free(B);
free(C);
return 0;
}
|
fbfda406833a2679e3394f72955b522ef5b8fad6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/VolumetricAdaptiveMaxPooling.cu"
#else
#include <THHUNN/common.h>
// 5d tensor B x D x T x H x W
void THNN_(VolumetricAdaptiveMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int osizeT,
int osizeW,
int osizeH)
{
THCUNN_assertSameGPU(state, 3, input, output, indices);
THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input,
"4D or 5D (batch mode) tensor expected for input, but got: %s");
THCIndex_t *indices_data;
scalar_t *output_data;
scalar_t *input_data;
int64_t sizeD, isizeT, isizeH, isizeW;
int64_t istrideD, istrideT, istrideH, istrideW;
int64_t totalZ;
if (input->dim() == 4) {
sizeD = input->size(0);
isizeT = input->size(1);
isizeH = input->size(2);
isizeW = input->size(3);
istrideD = input->stride(0);
istrideT = input->stride(1);
istrideH = input->stride(2);
istrideW = input->stride(3);
THCTensor_(resize4d)(state, output, sizeD, osizeT, osizeH, osizeW);
THCIndexTensor_(resize4d)(state, indices, sizeD, osizeT, osizeH, osizeW);
totalZ = sizeD * osizeT;
} else {
input = THCTensor_(newContiguous)(state, input);
int64_t sizeB = input->size(0);
sizeD = input->size(1);
isizeT = input->size(2);
isizeH = input->size(3);
isizeW = input->size(4);
istrideD = input->stride(1);
istrideT = input->stride(2);
istrideH = input->stride(3);
istrideW = input->stride(4);
THCTensor_(resize5d)(state, output, sizeB, sizeD, osizeT, osizeH, osizeW);
THCIndexTensor_(resize5d)(state, indices, sizeB, sizeD, osizeT, osizeH, osizeW);
totalZ = sizeB * sizeD * osizeT;
}
input_data = THCTensor_(data)(state, input);
output_data = THCTensor_(data)(state, output);
indices_data = THCIndexTensor_(data)(state, indices);
int64_t offsetZ = 0;
dim3 threads(32, 8);
// each H*W plane is processed by blocksH thread blocks
int blocksH = max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
hipLaunchKernelGGL(( cunn_VolumetricAdaptiveMaxPooling_updateOutput_kernel)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
input_data, output_data, indices_data, isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW, offsetZ
);
totalZ -= 65535;
offsetZ += 65535;
THCudaCheck(hipGetLastError());
}
if (input->dim() == 5) {
// clean
THCTensor_(free)(state, input);
}
}
void THNN_(VolumetricAdaptiveMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices)
{
THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THCIndex_t *indices_data;
scalar_t *gradInput_data;
scalar_t *gradOutput_data;
int64_t sizeD, isizeT, isizeH, isizeW;
int64_t osizeT, osizeH, osizeW;
int64_t totalZ;
if (input->dim() == 4) {
sizeD = input->size(0);
isizeT = input->size(1);
isizeH = input->size(2);
isizeW = input->size(3);
osizeT = gradOutput->size(1);
osizeH = gradOutput->size(2);
osizeW = gradOutput->size(3);
} else {
sizeD = input->size(1);
isizeT = input->size(2);
isizeH = input->size(3);
isizeW = input->size(4);
osizeT = gradOutput->size(2);
osizeH = gradOutput->size(3);
osizeW = gradOutput->size(4);
}
bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0);
if (input->dim() == 4) {
totalZ = sizeD * osizeT;
} else {
int sizeB = input->size(0);
totalZ = sizeB * sizeD * osizeT;
}
indices_data = THCIndexTensor_(data)(state, indices);
gradInput_data = THCTensor_(data)(state, gradInput);
gradOutput_data = THCTensor_(data)(state, gradOutput);
int64_t offsetZ = 0;
dim3 threads(32, 8);
// each H*W plane is processed by blocksH thread blocks
int blocksH = max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
if (atomic)
{
hipLaunchKernelGGL(( cunn_atomic_VolumetricAdaptiveMaxPooling_updateGradInput_kernel)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
gradInput_data, gradOutput_data, indices_data,
isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ
);
} else {
hipLaunchKernelGGL(( cunn_VolumetricAdaptiveMaxPooling_updateGradInput_kernel)
, dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
gradInput_data, gradOutput_data, indices_data,
isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ
);
}
totalZ -= 65535;
offsetZ += 65535;
THCudaCheck(hipGetLastError());
}
// clean
THCTensor_(free)(state, gradOutput);
}
#endif
| fbfda406833a2679e3394f72955b522ef5b8fad6.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/VolumetricAdaptiveMaxPooling.cu"
#else
#include <THCUNN/common.h>
// 5d tensor B x D x T x H x W
void THNN_(VolumetricAdaptiveMaxPooling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCIndexTensor *indices,
int osizeT,
int osizeW,
int osizeH)
{
THCUNN_assertSameGPU(state, 3, input, output, indices);
THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input,
"4D or 5D (batch mode) tensor expected for input, but got: %s");
THCIndex_t *indices_data;
scalar_t *output_data;
scalar_t *input_data;
int64_t sizeD, isizeT, isizeH, isizeW;
int64_t istrideD, istrideT, istrideH, istrideW;
int64_t totalZ;
if (input->dim() == 4) {
sizeD = input->size(0);
isizeT = input->size(1);
isizeH = input->size(2);
isizeW = input->size(3);
istrideD = input->stride(0);
istrideT = input->stride(1);
istrideH = input->stride(2);
istrideW = input->stride(3);
THCTensor_(resize4d)(state, output, sizeD, osizeT, osizeH, osizeW);
THCIndexTensor_(resize4d)(state, indices, sizeD, osizeT, osizeH, osizeW);
totalZ = sizeD * osizeT;
} else {
input = THCTensor_(newContiguous)(state, input);
int64_t sizeB = input->size(0);
sizeD = input->size(1);
isizeT = input->size(2);
isizeH = input->size(3);
isizeW = input->size(4);
istrideD = input->stride(1);
istrideT = input->stride(2);
istrideH = input->stride(3);
istrideW = input->stride(4);
THCTensor_(resize5d)(state, output, sizeB, sizeD, osizeT, osizeH, osizeW);
THCIndexTensor_(resize5d)(state, indices, sizeB, sizeD, osizeT, osizeH, osizeW);
totalZ = sizeB * sizeD * osizeT;
}
input_data = THCTensor_(data)(state, input);
output_data = THCTensor_(data)(state, output);
indices_data = THCIndexTensor_(data)(state, indices);
int64_t offsetZ = 0;
dim3 threads(32, 8);
// each H*W plane is processed by blocksH thread blocks
int blocksH = max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
cunn_VolumetricAdaptiveMaxPooling_updateOutput_kernel
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
input_data, output_data, indices_data, isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW, istrideD, istrideT, istrideH, istrideW, offsetZ
);
totalZ -= 65535;
offsetZ += 65535;
THCudaCheck(cudaGetLastError());
}
if (input->dim() == 5) {
// clean
THCTensor_(free)(state, input);
}
}
void THNN_(VolumetricAdaptiveMaxPooling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCIndexTensor *indices)
{
THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
THCIndex_t *indices_data;
scalar_t *gradInput_data;
scalar_t *gradOutput_data;
int64_t sizeD, isizeT, isizeH, isizeW;
int64_t osizeT, osizeH, osizeW;
int64_t totalZ;
if (input->dim() == 4) {
sizeD = input->size(0);
isizeT = input->size(1);
isizeH = input->size(2);
isizeW = input->size(3);
osizeT = gradOutput->size(1);
osizeH = gradOutput->size(2);
osizeW = gradOutput->size(3);
} else {
sizeD = input->size(1);
isizeT = input->size(2);
isizeH = input->size(3);
isizeW = input->size(4);
osizeT = gradOutput->size(2);
osizeH = gradOutput->size(3);
osizeW = gradOutput->size(4);
}
bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0);
if (input->dim() == 4) {
totalZ = sizeD * osizeT;
} else {
int sizeB = input->size(0);
totalZ = sizeB * sizeD * osizeT;
}
indices_data = THCIndexTensor_(data)(state, indices);
gradInput_data = THCTensor_(data)(state, gradInput);
gradOutput_data = THCTensor_(data)(state, gradOutput);
int64_t offsetZ = 0;
dim3 threads(32, 8);
// each H*W plane is processed by blocksH thread blocks
int blocksH = max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
if (atomic)
{
cunn_atomic_VolumetricAdaptiveMaxPooling_updateGradInput_kernel
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
gradInput_data, gradOutput_data, indices_data,
isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ
);
} else {
cunn_VolumetricAdaptiveMaxPooling_updateGradInput_kernel
<<<blocks, threads, 0, THCState_getCurrentStream(state)>>>(
gradInput_data, gradOutput_data, indices_data,
isizeT, isizeH, isizeW, osizeT, osizeH, osizeW, offsetZ
);
}
totalZ -= 65535;
offsetZ += 65535;
THCudaCheck(cudaGetLastError());
}
// clean
THCTensor_(free)(state, gradOutput);
}
#endif
|
fe14a5559403ceb032c21b2dd518bd757353eb64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void kernel0(int *data)
{
size_t t_id = threadIdx.x;
if (1 <= t_id)
data[t_id] += data[t_id - 1];
if (2 <= t_id)
data[t_id] += data[t_id - 2];
if (4 <= t_id)
data[t_id] += data[t_id - 4];
if (8 <= t_id)
data[t_id] += data[t_id - 8];
if (16 <= t_id)
data[t_id] += data[t_id - 16];
}
int main()
{
int data[32];
int result[32];
int *data_d;
hipMalloc(&data_d, sizeof(data));
for (int i = 0; i < 32; i++)
data[i] = i;
dim3 gridDim(1);
dim3 blockDim(32);
hipMemcpy(data_d, data, sizeof(data), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel0), dim3(gridDim), dim3(blockDim), 0, 0, data_d);
hipMemcpy(result, data_d, sizeof(data), hipMemcpyDeviceToHost);
printf("kernel0 : ");
for (int i = 0; i < 32; i++)
printf("%4i ", result[i]);
printf("(device, global memory)\n");
}
| fe14a5559403ceb032c21b2dd518bd757353eb64.cu | #include <stdio.h>
__global__ void kernel0(int *data)
{
size_t t_id = threadIdx.x;
if (1 <= t_id)
data[t_id] += data[t_id - 1];
if (2 <= t_id)
data[t_id] += data[t_id - 2];
if (4 <= t_id)
data[t_id] += data[t_id - 4];
if (8 <= t_id)
data[t_id] += data[t_id - 8];
if (16 <= t_id)
data[t_id] += data[t_id - 16];
}
int main()
{
int data[32];
int result[32];
int *data_d;
cudaMalloc(&data_d, sizeof(data));
for (int i = 0; i < 32; i++)
data[i] = i;
dim3 gridDim(1);
dim3 blockDim(32);
cudaMemcpy(data_d, data, sizeof(data), cudaMemcpyHostToDevice);
kernel0<<<gridDim, blockDim>>>(data_d);
cudaMemcpy(result, data_d, sizeof(data), cudaMemcpyDeviceToHost);
printf("kernel0 : ");
for (int i = 0; i < 32; i++)
printf("%4i ", result[i]);
printf("(device, global memory)\n");
}
|
bcd3c2dafa088e5fec3f210977246c24b0ba19a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define THREADS 256
__global__ void hamming_matcher_unroll(
unsigned* out_idx,
unsigned* out_dist,
const unsigned max_dist,
const unsigned feat_len)
{
unsigned nquery = 6;
unsigned ntrain = 6;
unsigned f = blockDim.x * blockIdx.x + threadIdx.x;
unsigned tid = threadIdx.x;
__shared__ unsigned s_dist[THREADS];
__shared__ unsigned s_idx[THREADS];
s_dist[tid] = max_dist;
s_idx[tid] = 0xffffffff;
bool valid_feat = (f < ntrain);
for (unsigned j = 0; j < nquery; j++) {
s_dist[tid] = max_dist;
// Load one query feature that will be tested against all training
// features in current block
if (tid < feat_len && f < ntrain) {
out_dist[tid] = tid * nquery + j;
}
__syncthreads();
unsigned dist = 0;
if (tid < 32) {
if (s_dist[tid + 64] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 128];
s_idx[tid] = s_idx[tid + 128];
}
}
__syncthreads();
if (tid < 16) {
if (s_dist[tid + 32] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 64];
s_idx[tid] = s_idx[tid + 64];
}
}
__syncthreads();
if (tid < 8) {
if (s_dist[tid + 16] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 32];
s_idx[tid] = s_idx[tid + 32];
}
if (s_dist[tid + 4] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 4];
s_idx[tid] = s_idx[tid + 4];
}
if (s_dist[tid + 2] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 2];
s_idx[tid] = s_idx[tid + 2];
}
if (s_dist[tid + 1] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 1];
s_idx[tid] = s_idx[tid + 1];
}
}
__syncthreads();
// Store best match in training features from block to the current
// query feature
if (f < ntrain) {
out_dist[j * gridDim.x + blockIdx.x] = s_dist[0];
out_idx[j * gridDim.x + blockIdx.x] = s_idx[0];
}
__syncthreads();
}
} | bcd3c2dafa088e5fec3f210977246c24b0ba19a8.cu | #define THREADS 256
__global__ void hamming_matcher_unroll(
unsigned* out_idx,
unsigned* out_dist,
const unsigned max_dist,
const unsigned feat_len)
{
unsigned nquery = 6;
unsigned ntrain = 6;
unsigned f = blockDim.x * blockIdx.x + threadIdx.x;
unsigned tid = threadIdx.x;
__shared__ unsigned s_dist[THREADS];
__shared__ unsigned s_idx[THREADS];
s_dist[tid] = max_dist;
s_idx[tid] = 0xffffffff;
bool valid_feat = (f < ntrain);
for (unsigned j = 0; j < nquery; j++) {
s_dist[tid] = max_dist;
// Load one query feature that will be tested against all training
// features in current block
if (tid < feat_len && f < ntrain) {
out_dist[tid] = tid * nquery + j;
}
__syncthreads();
unsigned dist = 0;
if (tid < 32) {
if (s_dist[tid + 64] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 128];
s_idx[tid] = s_idx[tid + 128];
}
}
__syncthreads();
if (tid < 16) {
if (s_dist[tid + 32] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 64];
s_idx[tid] = s_idx[tid + 64];
}
}
__syncthreads();
if (tid < 8) {
if (s_dist[tid + 16] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 32];
s_idx[tid] = s_idx[tid + 32];
}
if (s_dist[tid + 4] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 4];
s_idx[tid] = s_idx[tid + 4];
}
if (s_dist[tid + 2] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 2];
s_idx[tid] = s_idx[tid + 2];
}
if (s_dist[tid + 1] < s_dist[tid]) {
s_dist[tid] = s_dist[tid + 1];
s_idx[tid] = s_idx[tid + 1];
}
}
__syncthreads();
// Store best match in training features from block to the current
// query feature
if (f < ntrain) {
out_dist[j * gridDim.x + blockIdx.x] = s_dist[0];
out_idx[j * gridDim.x + blockIdx.x] = s_idx[0];
}
__syncthreads();
}
} |
ebb9f4fb7fa6398af6f7e03e1790160ed1a4c0d7.hip | // !!! This is a file automatically generated by hipify!!!
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//****************************************************************************
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
#include <iostream>
#include <iomanip>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime_api.h>
//anchura del filtro y declaracion de la matriz que usaremos como filtro en memoria constante, por defecto Laplace 5x5
__constant__ const int filterWidthConstant = 5;
__constant__ float filterMatrixConstant[filterWidthConstant * filterWidthConstant];
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != hipSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << hipGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
//comentar "const float* const filter," en la cabecera si se quiere usar memoria de constantes
__global__
void box_filter(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols,
/*const float* const filter, */const int filterWidth)
{
//acceso al identificardor del thread en el conjunto global de threads
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
//convertimos el identificador 2D del thread a uno en 1D para escribir en la memoria global
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//comprobamos que el indice obtenido no se sale de los limites de la imagen
if ( thread_2D_pos.x >= numCols || thread_2D_pos.y>= numRows )
return;
//recorremos la matriz del filtro
float outputPixel = 0.0f;
for (int i = 0; i < filterWidth; i++)
{
for (int j = 0; j < filterWidth; j++)
{
int row = (int)(thread_2D_pos.y + (i - filterWidth / 2));
//comprobamos que seguimos dentro de la imagen en las filas
if (row < 0)
row = 0;
if (row > numRows - 1)
row = numRows - 1;
int column = (int)(thread_2D_pos.x + (j - filterWidth / 2));
//comprobamos que seguimos dentro de la imagen en las columnas
if (column < 0)
column = 0;
if (column > numCols - 1)
column = numCols - 1;
//Con memoria de constantes
outputPixel += (float)filterMatrixConstant[i * filterWidth + j] * (float)(inputChannel[row * numCols + column]);
//sin memoria de constantes
//outputPixel += (float)filter[i * filterWidth + j] * (float)(inputChannel[row * numCols + column]);
}
}
//comprobamos que el color resultado no sea erroneo RGB --> 0-255
if (outputPixel < 0.0f)
outputPixel = 0.0f;
if (outputPixel > 255.0f)
outputPixel = 255.0f;
outputChannel[thread_1D_pos] = outputPixel;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel,
unsigned char* const greenChannel,unsigned char* const blueChannel)
{
//acceso al identificardor del thread en el conjunto global de threads
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
//convertimos el identificador 2D del thread a uno en 1D para escribir en la memoria global
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//comprobamos que el indice obtenido no se sale de los limites de la imagen
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
//Dividimos los 3 canales de la imagen (RGB)
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,const unsigned char* const greenChannel,const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,int numRows,int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
// ------------------------ MEMORIA CONSTANTE ------------------------//
//Si se quiere usar memoria de constante hay que comentar estas dos funciones.
//Reservar memoria para el filtro en GPU: d_filter, la cual ya esta declarada
//checkCudaErrors(hipMalloc(&d_filter, filterWidth * filterWidth * sizeof(float)));
// Copiar el filtro (h_filter) a memoria global de la GPU (d_filter)
//checkCudaErrors(hipMemcpy(d_filter, h_filter, filterWidth * filterWidth * sizeof(float), hipMemcpyHostToDevice));
}
void create_filter(float **h_filter, int *filterWidth)
{
//Modificar el tamao del filtro dependiendo de cual queremos usar.
//const int KernelWidth = 5; //Memoria global
const int KernelWidth = filterWidthConstant; //Memoria de constante
*filterWidth = KernelWidth;
//create and fill the filter we will convolve with
*h_filter = new float[KernelWidth * KernelWidth];
/*
//Filtro gaussiano: blur
const float KernelSigma = 2.;
float filterSum = 0.f; //for normalization
for (int r = -KernelWidth/2; r <= KernelWidth/2; ++r)
{
for (int c = -KernelWidth/2; c <= KernelWidth/2; ++c)
{
float filterValue = expf( -(float)(c * c + r * r) / (2.f * KernelSigma * KernelSigma));
(*h_filter)[(r + KernelWidth/2) * KernelWidth + c + KernelWidth/2] = filterValue;
filterSum += filterValue;
}
}
float normalizationFactor = 1.f / filterSum;
for (int r = -KernelWidth/2; r <= KernelWidth/2; ++r)
{
for (int c = -KernelWidth/2; c <= KernelWidth/2; ++c) {
(*h_filter)[(r + KernelWidth/2) * KernelWidth + c + KernelWidth/2] *= normalizationFactor;
}
}
*/
//Laplaciano 5x5
(*h_filter)[0] = 0; (*h_filter)[1] = 0; (*h_filter)[2] = -1.; (*h_filter)[3] = 0; (*h_filter)[4] = 0;
(*h_filter)[5] = 1.; (*h_filter)[6] = -1.; (*h_filter)[7] = -2.; (*h_filter)[8] = -1.; (*h_filter)[9] = 0;
(*h_filter)[10] = -1.;(*h_filter)[11] = -2.; (*h_filter)[12] = 17.; (*h_filter)[13] = -2.; (*h_filter)[14] = -1.;
(*h_filter)[15] = 1.; (*h_filter)[16] = -1.; (*h_filter)[17] = -2.; (*h_filter)[18] = -1.; (*h_filter)[19] = 0;
(*h_filter)[20] = 1.; (*h_filter)[21] = 0; (*h_filter)[22] = -1.; (*h_filter)[23] = 0; (*h_filter)[24] = 0;
//TODO: crear los filtros segun necesidad
//NOTA: cuidado al establecer el tamao del filtro a utilizar
/*
//Aumentar nitidez 3x3
(*h_filter)[0] = -0.25; (*h_filter)[1] = -0.25; (*h_filter)[2] = -0.25;
(*h_filter)[3] = -0.25; (*h_filter)[4] = 3.; (*h_filter)[5] = -0.25;
(*h_filter)[6] = -0.25; (*h_filter)[7] = -0.25; (*h_filter)[8] = -0.25;
*/
/*
//Suavizado - 5x5
(*h_filter)[0] = 1; (*h_filter)[1] = 1; (*h_filter)[2] = 1.; (*h_filter)[3] = 1; (*h_filter)[4] = 1.;
(*h_filter)[5] = 1.; (*h_filter)[6] = 4.; (*h_filter)[7] = 4.; (*h_filter)[8] = 4.; (*h_filter)[9] = 1.;
(*h_filter)[10] = 1.;(*h_filter)[11] = 4.; (*h_filter)[12] = 12.; (*h_filter)[13] = 4.; (*h_filter)[14] = 1.;
(*h_filter)[15] = 1.; (*h_filter)[16] = 4.; (*h_filter)[17] = 4.; (*h_filter)[18] = 4.; (*h_filter)[19] = 1.;
(*h_filter)[20] = 1.; (*h_filter)[21] = 1.; (*h_filter)[22] = 1.; (*h_filter)[23] = 1.; (*h_filter)[24] = 1.;
*/
/*
//Detectar bordes - 3x3
(*h_filter)[0] = 0.; (*h_filter)[1] = 1.; (*h_filter)[2] = 0.;
(*h_filter)[3] = 1.; (*h_filter)[4] = -4.; (*h_filter)[5] = 1.;
(*h_filter)[6] = 0.; (*h_filter)[7] = 1.; (*h_filter)[8] = 0.;
*/
//Memoria de constantes(comentar si no se quiere usar), copia a la GPU
hipMemcpyToSymbol(filterMatrixConstant, *h_filter, sizeof(float) * KernelWidth * KernelWidth);
}
void convolution(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redFiltered,
unsigned char *d_greenFiltered,
unsigned char *d_blueFiltered,
const int filterWidth)
{
//Calcular tamaos de bloque
const dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1);
const dim3 gridSize(ceil(1.0f*numCols / blockSize.x), ceil(1.0f*numRows / blockSize.y));
//Lanzar kernel para separar imagenes RGBA en diferentes colores
separateChannels << <gridSize, blockSize >> > (d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//Ejecutar convolucin. Una por canal
//Box Filter
{
//comentar "d_filter," para memoria de constante
hipLaunchKernelGGL(( box_filter) , gridSize, blockSize >> > (d_red, d_redFiltered, numRows, numCols,/* d_filter, */filterWidth);
( box_filter) , gridSize, blockSize >> > (d_green, d_greenFiltered, numRows, numCols,/* d_filter, */filterWidth);
( box_filter) , gridSize, blockSize >> > (d_blue, d_blueFiltered, numRows, numCols,/* d_filter, */filterWidth);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
// Recombining the results.
recombineChannels, gridSize, blockSize, 0, 0, 0, d_redFiltered,
d_greenFiltered,
d_blueFiltered,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
//comentar si memoria de constante
//checkCudaErrors(hipFree(d_filter));
}
| ebb9f4fb7fa6398af6f7e03e1790160ed1a4c0d7.cu | //****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//****************************************************************************
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
#include <iostream>
#include <iomanip>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <cuda_profiler_api.h>
//anchura del filtro y declaracion de la matriz que usaremos como filtro en memoria constante, por defecto Laplace 5x5
__constant__ const int filterWidthConstant = 5;
__constant__ float filterMatrixConstant[filterWidthConstant * filterWidthConstant];
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
//comentar "const float* const filter," en la cabecera si se quiere usar memoria de constantes
__global__
void box_filter(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols,
/*const float* const filter, */const int filterWidth)
{
//acceso al identificardor del thread en el conjunto global de threads
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
//convertimos el identificador 2D del thread a uno en 1D para escribir en la memoria global
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//comprobamos que el indice obtenido no se sale de los limites de la imagen
if ( thread_2D_pos.x >= numCols || thread_2D_pos.y>= numRows )
return;
//recorremos la matriz del filtro
float outputPixel = 0.0f;
for (int i = 0; i < filterWidth; i++)
{
for (int j = 0; j < filterWidth; j++)
{
int row = (int)(thread_2D_pos.y + (i - filterWidth / 2));
//comprobamos que seguimos dentro de la imagen en las filas
if (row < 0)
row = 0;
if (row > numRows - 1)
row = numRows - 1;
int column = (int)(thread_2D_pos.x + (j - filterWidth / 2));
//comprobamos que seguimos dentro de la imagen en las columnas
if (column < 0)
column = 0;
if (column > numCols - 1)
column = numCols - 1;
//Con memoria de constantes
outputPixel += (float)filterMatrixConstant[i * filterWidth + j] * (float)(inputChannel[row * numCols + column]);
//sin memoria de constantes
//outputPixel += (float)filter[i * filterWidth + j] * (float)(inputChannel[row * numCols + column]);
}
}
//comprobamos que el color resultado no sea erroneo RGB --> 0-255
if (outputPixel < 0.0f)
outputPixel = 0.0f;
if (outputPixel > 255.0f)
outputPixel = 255.0f;
outputChannel[thread_1D_pos] = outputPixel;
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel,
unsigned char* const greenChannel,unsigned char* const blueChannel)
{
//acceso al identificardor del thread en el conjunto global de threads
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
//convertimos el identificador 2D del thread a uno en 1D para escribir en la memoria global
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//comprobamos que el indice obtenido no se sale de los limites de la imagen
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
//Dividimos los 3 canales de la imagen (RGB)
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,const unsigned char* const greenChannel,const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,int numRows,int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
// ------------------------ MEMORIA CONSTANTE ------------------------//
//Si se quiere usar memoria de constante hay que comentar estas dos funciones.
//Reservar memoria para el filtro en GPU: d_filter, la cual ya esta declarada
//checkCudaErrors(cudaMalloc(&d_filter, filterWidth * filterWidth * sizeof(float)));
// Copiar el filtro (h_filter) a memoria global de la GPU (d_filter)
//checkCudaErrors(cudaMemcpy(d_filter, h_filter, filterWidth * filterWidth * sizeof(float), cudaMemcpyHostToDevice));
}
void create_filter(float **h_filter, int *filterWidth)
{
//Modificar el tamaño del filtro dependiendo de cual queremos usar.
//const int KernelWidth = 5; //Memoria global
const int KernelWidth = filterWidthConstant; //Memoria de constante
*filterWidth = KernelWidth;
//create and fill the filter we will convolve with
*h_filter = new float[KernelWidth * KernelWidth];
/*
//Filtro gaussiano: blur
const float KernelSigma = 2.;
float filterSum = 0.f; //for normalization
for (int r = -KernelWidth/2; r <= KernelWidth/2; ++r)
{
for (int c = -KernelWidth/2; c <= KernelWidth/2; ++c)
{
float filterValue = expf( -(float)(c * c + r * r) / (2.f * KernelSigma * KernelSigma));
(*h_filter)[(r + KernelWidth/2) * KernelWidth + c + KernelWidth/2] = filterValue;
filterSum += filterValue;
}
}
float normalizationFactor = 1.f / filterSum;
for (int r = -KernelWidth/2; r <= KernelWidth/2; ++r)
{
for (int c = -KernelWidth/2; c <= KernelWidth/2; ++c) {
(*h_filter)[(r + KernelWidth/2) * KernelWidth + c + KernelWidth/2] *= normalizationFactor;
}
}
*/
//Laplaciano 5x5
(*h_filter)[0] = 0; (*h_filter)[1] = 0; (*h_filter)[2] = -1.; (*h_filter)[3] = 0; (*h_filter)[4] = 0;
(*h_filter)[5] = 1.; (*h_filter)[6] = -1.; (*h_filter)[7] = -2.; (*h_filter)[8] = -1.; (*h_filter)[9] = 0;
(*h_filter)[10] = -1.;(*h_filter)[11] = -2.; (*h_filter)[12] = 17.; (*h_filter)[13] = -2.; (*h_filter)[14] = -1.;
(*h_filter)[15] = 1.; (*h_filter)[16] = -1.; (*h_filter)[17] = -2.; (*h_filter)[18] = -1.; (*h_filter)[19] = 0;
(*h_filter)[20] = 1.; (*h_filter)[21] = 0; (*h_filter)[22] = -1.; (*h_filter)[23] = 0; (*h_filter)[24] = 0;
//TODO: crear los filtros segun necesidad
//NOTA: cuidado al establecer el tamaño del filtro a utilizar
/*
//Aumentar nitidez 3x3
(*h_filter)[0] = -0.25; (*h_filter)[1] = -0.25; (*h_filter)[2] = -0.25;
(*h_filter)[3] = -0.25; (*h_filter)[4] = 3.; (*h_filter)[5] = -0.25;
(*h_filter)[6] = -0.25; (*h_filter)[7] = -0.25; (*h_filter)[8] = -0.25;
*/
/*
//Suavizado - 5x5
(*h_filter)[0] = 1; (*h_filter)[1] = 1; (*h_filter)[2] = 1.; (*h_filter)[3] = 1; (*h_filter)[4] = 1.;
(*h_filter)[5] = 1.; (*h_filter)[6] = 4.; (*h_filter)[7] = 4.; (*h_filter)[8] = 4.; (*h_filter)[9] = 1.;
(*h_filter)[10] = 1.;(*h_filter)[11] = 4.; (*h_filter)[12] = 12.; (*h_filter)[13] = 4.; (*h_filter)[14] = 1.;
(*h_filter)[15] = 1.; (*h_filter)[16] = 4.; (*h_filter)[17] = 4.; (*h_filter)[18] = 4.; (*h_filter)[19] = 1.;
(*h_filter)[20] = 1.; (*h_filter)[21] = 1.; (*h_filter)[22] = 1.; (*h_filter)[23] = 1.; (*h_filter)[24] = 1.;
*/
/*
//Detectar bordes - 3x3
(*h_filter)[0] = 0.; (*h_filter)[1] = 1.; (*h_filter)[2] = 0.;
(*h_filter)[3] = 1.; (*h_filter)[4] = -4.; (*h_filter)[5] = 1.;
(*h_filter)[6] = 0.; (*h_filter)[7] = 1.; (*h_filter)[8] = 0.;
*/
//Memoria de constantes(comentar si no se quiere usar), copia a la GPU
cudaMemcpyToSymbol(filterMatrixConstant, *h_filter, sizeof(float) * KernelWidth * KernelWidth);
}
void convolution(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redFiltered,
unsigned char *d_greenFiltered,
unsigned char *d_blueFiltered,
const int filterWidth)
{
//Calcular tamaños de bloque
const dim3 blockSize(BLOCK_SIZE_X, BLOCK_SIZE_Y, 1);
const dim3 gridSize(ceil(1.0f*numCols / blockSize.x), ceil(1.0f*numRows / blockSize.y));
//Lanzar kernel para separar imagenes RGBA en diferentes colores
separateChannels << <gridSize, blockSize >> > (d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//Ejecutar convolución. Una por canal
//Box Filter
{
//comentar "d_filter," para memoria de constante
box_filter <<<gridSize, blockSize >> > (d_red, d_redFiltered, numRows, numCols,/* d_filter, */filterWidth);
box_filter <<<gridSize, blockSize >> > (d_green, d_greenFiltered, numRows, numCols,/* d_filter, */filterWidth);
box_filter <<<gridSize, blockSize >> > (d_blue, d_blueFiltered, numRows, numCols,/* d_filter, */filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
// Recombining the results.
recombineChannels<<<gridSize, blockSize>>>(d_redFiltered,
d_greenFiltered,
d_blueFiltered,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
//comentar si memoria de constante
//checkCudaErrors(cudaFree(d_filter));
}
|
20b8963a48204912d61ba4112dde6151d0bf270b.hip | // !!! This is a file automatically generated by hipify!!!
///////////////////////////////////////////////////////////
// SLMParentsCUDA.cu
// Implementation of the Class AnalyseNeuronData
// CUDA optimized
// Created on: 05-june-2017 15:38:35
// Original author: Kim Bjerge
///////////////////////////////////////////////////////////
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "cutil_inline_runtime.h"
#include <stdio.h>
#include "SLMParentsCUDA.h"
#define DEBUG_MSG //printf
// Global random state variable on GPU
hiprandState_t* m_randStates = 0;
/**
* This GPU kernel function is used to initialize the random states
*
*/
__global__ void initRandom(unsigned int seed, hiprandState_t* states, int rowIdx)
{
//int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int seq = rowIdx + col;
if (seq < M*M) {
/* we have to initialize the state */
hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
seq, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[col]);
}
}
void initCUDARandom(void)
{
/* CUDA's random number library uses hiprandState_t to keep track of the seed value
we will store a random state for every thread */
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(M / BLOCK_SIZE, M / BLOCK_SIZE);
/* allocate space on the GPU for the random states */
if (m_randStates == 0)
hipMalloc((void**)&m_randStates, (M*M) * sizeof(hiprandState_t));
/* invoke the GPU to initialize all of the random states */
//initRandom<<< grid, threads >>> ((int)time(0), m_randStates);
std::cout << "Initializing random numbers on GPU" << std::endl;
for (int i = 0; i < M; i++) {
initRandom << < M / BLOCK_SIZE, BLOCK_SIZE >> > ((int)time(0), &m_randStates[M*i], M*i);
cutilSafeCall(hipDeviceSynchronize());
std::cout << M-i << '\r';
}
}
void freeCUDARandom(void)
{
if (m_randStates != 0)
hipFree(m_randStates);
}
/**
* CUDA Kernel Device code to generate binary random templates
*
*/
__global__ void
genBinaryTemplate( unsigned char* dst, hiprandState_t* states, int strideDst)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int index = row * strideDst + col;
/* hiprand works like rand - except that it takes a state as a parameter */
dst[index] = hiprand(&states[index]) % 2;
}
void GenBinaryCUDA(unsigned char* matrixCUDA_, int Stride_)
{
//setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid( M / BLOCK_SIZE, M / BLOCK_SIZE );
// DEBUG_MSG("Grid (Blocks) [%d,%d]\n", grid.x, grid.y);
//DEBUG_MSG("Threads in Block [%d,%d]\n", threads.x, threads.y);
// Generate binary template
hipLaunchKernelGGL(( genBinaryTemplate), dim3(grid), dim3(threads) , 0, 0, matrixCUDA_, m_randStates, Stride_);
//cutilSafeCall(hipDeviceSynchronize());
}
__global__ void
genBinaryInverseTemplate(unsigned char* dst, unsigned char* src, int strideDst, int strideSrc)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
dst[row * strideDst + col] = 1 - src[row * strideSrc + col];
}
void GenBinaryInverseCUDA(unsigned char* dst, unsigned char* src, int strideDst, int strideSrc)
{
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid( M / BLOCK_SIZE, M / BLOCK_SIZE );
// Generate binary template
hipLaunchKernelGGL(( genBinaryInverseTemplate), dim3(grid), dim3(threads) , 0, 0, dst, src, strideDst, strideSrc);
//cutilSafeCall(hipDeviceSynchronize());
}
__global__ void
multiplyTemplates(unsigned char* dst, unsigned char* src1, unsigned char* src2, int strideDst, int strideSrc)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
dst[row * strideDst + col] = src1[row * strideSrc + col] * src2[row * strideSrc + col];
}
void MultiplyCellCUDA(unsigned char* dst, unsigned char* src1, unsigned char* src2, int strideDst, int strideSrc)
{
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid( M / BLOCK_SIZE, M / BLOCK_SIZE );
// Generate binary template
hipLaunchKernelGGL(( multiplyTemplates), dim3(grid), dim3(threads) , 0, 0, dst, src1, src2, strideDst, strideSrc);
cutilSafeCall(hipDeviceSynchronize());
}
__global__ void
addTemplates( unsigned char* dst, unsigned char* src1, unsigned char* src2, int strideDst, int strideSrc)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
dst[row * strideDst + col] = src1[row * strideSrc + col] + src2[row * strideSrc + col];
}
void AddCellCUDA(unsigned char* dst, unsigned char* src1, unsigned char* src2, int strideDst, int strideSrc)
{
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid( M / BLOCK_SIZE, M / BLOCK_SIZE );
// Generate binary template
hipLaunchKernelGGL(( addTemplates), dim3(grid), dim3(threads) , 0, 0, dst, src1, src2, strideDst, strideSrc);
cutilSafeCall(hipDeviceSynchronize());
}
| 20b8963a48204912d61ba4112dde6151d0bf270b.cu | ///////////////////////////////////////////////////////////
// SLMParentsCUDA.cu
// Implementation of the Class AnalyseNeuronData
// CUDA optimized
// Created on: 05-june-2017 15:38:35
// Original author: Kim Bjerge
///////////////////////////////////////////////////////////
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <curand.h>
#include <curand_kernel.h>
#include "cutil_inline_runtime.h"
#include <stdio.h>
#include "SLMParentsCUDA.h"
#define DEBUG_MSG //printf
// Global random state variable on GPU
curandState_t* m_randStates = 0;
/**
* This GPU kernel function is used to initialize the random states
*
*/
__global__ void initRandom(unsigned int seed, curandState_t* states, int rowIdx)
{
//int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int seq = rowIdx + col;
if (seq < M*M) {
/* we have to initialize the state */
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
seq, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[col]);
}
}
void initCUDARandom(void)
{
/* CUDA's random number library uses curandState_t to keep track of the seed value
we will store a random state for every thread */
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(M / BLOCK_SIZE, M / BLOCK_SIZE);
/* allocate space on the GPU for the random states */
if (m_randStates == 0)
cudaMalloc((void**)&m_randStates, (M*M) * sizeof(curandState_t));
/* invoke the GPU to initialize all of the random states */
//initRandom<<< grid, threads >>> ((int)time(0), m_randStates);
std::cout << "Initializing random numbers on GPU" << std::endl;
for (int i = 0; i < M; i++) {
initRandom << < M / BLOCK_SIZE, BLOCK_SIZE >> > ((int)time(0), &m_randStates[M*i], M*i);
cutilSafeCall(cudaThreadSynchronize());
std::cout << M-i << '\r';
}
}
void freeCUDARandom(void)
{
if (m_randStates != 0)
cudaFree(m_randStates);
}
/**
* CUDA Kernel Device code to generate binary random templates
*
*/
__global__ void
genBinaryTemplate( unsigned char* dst, curandState_t* states, int strideDst)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int index = row * strideDst + col;
/* curand works like rand - except that it takes a state as a parameter */
dst[index] = curand(&states[index]) % 2;
}
void GenBinaryCUDA(unsigned char* matrixCUDA_, int Stride_)
{
//setup execution parameters
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid( M / BLOCK_SIZE, M / BLOCK_SIZE );
// DEBUG_MSG("Grid (Blocks) [%d,%d]\n", grid.x, grid.y);
//DEBUG_MSG("Threads in Block [%d,%d]\n", threads.x, threads.y);
// Generate binary template
genBinaryTemplate<<< grid, threads >>>(matrixCUDA_, m_randStates, Stride_);
//cutilSafeCall(cudaThreadSynchronize());
}
__global__ void
genBinaryInverseTemplate(unsigned char* dst, unsigned char* src, int strideDst, int strideSrc)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
dst[row * strideDst + col] = 1 - src[row * strideSrc + col];
}
void GenBinaryInverseCUDA(unsigned char* dst, unsigned char* src, int strideDst, int strideSrc)
{
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid( M / BLOCK_SIZE, M / BLOCK_SIZE );
// Generate binary template
genBinaryInverseTemplate<<< grid, threads >>>(dst, src, strideDst, strideSrc);
//cutilSafeCall(cudaThreadSynchronize());
}
__global__ void
multiplyTemplates(unsigned char* dst, unsigned char* src1, unsigned char* src2, int strideDst, int strideSrc)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
dst[row * strideDst + col] = src1[row * strideSrc + col] * src2[row * strideSrc + col];
}
void MultiplyCellCUDA(unsigned char* dst, unsigned char* src1, unsigned char* src2, int strideDst, int strideSrc)
{
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid( M / BLOCK_SIZE, M / BLOCK_SIZE );
// Generate binary template
multiplyTemplates<<< grid, threads >>>(dst, src1, src2, strideDst, strideSrc);
cutilSafeCall(cudaThreadSynchronize());
}
__global__ void
addTemplates( unsigned char* dst, unsigned char* src1, unsigned char* src2, int strideDst, int strideSrc)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
dst[row * strideDst + col] = src1[row * strideSrc + col] + src2[row * strideSrc + col];
}
void AddCellCUDA(unsigned char* dst, unsigned char* src1, unsigned char* src2, int strideDst, int strideSrc)
{
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid( M / BLOCK_SIZE, M / BLOCK_SIZE );
// Generate binary template
addTemplates<<< grid, threads >>>(dst, src1, src2, strideDst, strideSrc);
cutilSafeCall(cudaThreadSynchronize());
}
|
6f0abdc8289fba31e20a9d3e8ad5ee7b0e3f255b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include "util.hpp"
#include "gpu.hpp"
#define MAX_MASK_WIDTH 5
#define MAX_MASK_SIZE (MAX_MASK_WIDTH * MAX_MASK_WIDTH)
__constant__ float c_M[MAX_MASK_SIZE];
constexpr const int TILE_SIZE = 4;
__global__ void convolution_2D_tiled_kernel(float *P, float *N, int height,
int width, int pitch,
int mask_width) {
const int halo_w = mask_width / 2; // halo cells width (left or right)
// TILE_SIZE = blockDim.x = blockDim.y
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int bx = blockIdx.x;
const int by = blockIdx.y;
const int row_o = by * TILE_SIZE + ty;
const int col_o = bx * TILE_SIZE + tx;
// for simplicity, halo cells are accessed through global memory directly
// halo cells to the left and to the right of the current block
// are cached and trigger caching for the next block respectively.
// Halo cells up above and down below still need shared memory optimization
// (not implemented).
__shared__ float N_ds[TILE_SIZE][TILE_SIZE];
if (row_o < height && col_o < width) {
N_ds[ty][tx] = N[row_o * pitch + col_o];
} else {
N_ds[ty][tx] = 0.0f;
}
__syncthreads();
const int x_start = col_o - halo_w; // bx * TILE_SIZE + tx - m_w / 2
const int y_start = row_o - halo_w;
const int blk_x_a = bx * TILE_SIZE;
const int blk_x_b = (bx + 1) * TILE_SIZE;
const int blk_y_a = by * TILE_SIZE;
const int blk_y_b = (by + 1) * TILE_SIZE;
float output = 0.0f;
for (int i = 0; i < mask_width; ++i) {
for (int j = 0; j < mask_width; ++j) {
int x_idx = x_start + i;
int y_idx = y_start + j;
if (x_idx >= 0 && x_idx < width && //
y_idx >= 0 && y_idx < height) {
if (x_idx >= blk_x_a && x_idx < blk_x_b && //
y_idx >= blk_y_a && y_idx < blk_y_b) {
output +=
c_M[j * mask_width + i] * N_ds[ty + j - halo_w][tx + i - halo_w];
} else {
output += c_M[j * mask_width + i] * N[y_idx * pitch + x_idx];
}
}
}
}
if (row_o < height && col_o < width) {
P[row_o * pitch + col_o] = output;
}
}
void convolution_2D_tiled(float *P, float *N, int height, int width, int pitch,
int mask_width, float *M) {
std::cout << __PRETTY_FUNCTION__ << std::endl;
hipError_t err = hipSuccess;
float *d_P, *d_N;
int size_P = height * pitch * sizeof(float);
int size_N = height * pitch * sizeof(float);
err = hipMalloc((void **)&d_P, size_P);
gpuErrchk(err);
err = hipMalloc((void **)&d_N, size_N);
gpuErrchk(err);
err = hipMemcpy(d_P, P, size_P, hipMemcpyHostToDevice);
gpuErrchk(err);
err = hipMemcpy(d_N, N, size_N, hipMemcpyHostToDevice);
gpuErrchk(err);
err = hipMemcpyToSymbol(c_M, M, mask_width * mask_width * sizeof(float));
gpuErrchk(err);
int m_blocks = (height + TILE_SIZE - 1) / TILE_SIZE;
int n_blocks = (width + TILE_SIZE - 1) / TILE_SIZE;
dim3 dimGrid(m_blocks, n_blocks);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
// call kernel
printf("m_blocks = %d\n", m_blocks);
printf("n_blocks = %d\n", n_blocks);
hipLaunchKernelGGL(( convolution_2D_tiled_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_P, d_N, height, width,
pitch, mask_width);
if (hipSuccess != hipGetLastError())
printf("Error!\n");
err = hipMemcpy(P, d_P, size_N, hipMemcpyDeviceToHost);
gpuErrchk(err);
err = hipFree(d_P);
gpuErrchk(err);
err = hipFree(d_N);
gpuErrchk(err);
}
| 6f0abdc8289fba31e20a9d3e8ad5ee7b0e3f255b.cu | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include "util.hpp"
#include "gpu.hpp"
#define MAX_MASK_WIDTH 5
#define MAX_MASK_SIZE (MAX_MASK_WIDTH * MAX_MASK_WIDTH)
__constant__ float c_M[MAX_MASK_SIZE];
constexpr const int TILE_SIZE = 4;
__global__ void convolution_2D_tiled_kernel(float *P, float *N, int height,
int width, int pitch,
int mask_width) {
const int halo_w = mask_width / 2; // halo cells width (left or right)
// TILE_SIZE = blockDim.x = blockDim.y
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int bx = blockIdx.x;
const int by = blockIdx.y;
const int row_o = by * TILE_SIZE + ty;
const int col_o = bx * TILE_SIZE + tx;
// for simplicity, halo cells are accessed through global memory directly
// halo cells to the left and to the right of the current block
// are cached and trigger caching for the next block respectively.
// Halo cells up above and down below still need shared memory optimization
// (not implemented).
__shared__ float N_ds[TILE_SIZE][TILE_SIZE];
if (row_o < height && col_o < width) {
N_ds[ty][tx] = N[row_o * pitch + col_o];
} else {
N_ds[ty][tx] = 0.0f;
}
__syncthreads();
const int x_start = col_o - halo_w; // bx * TILE_SIZE + tx - m_w / 2
const int y_start = row_o - halo_w;
const int blk_x_a = bx * TILE_SIZE;
const int blk_x_b = (bx + 1) * TILE_SIZE;
const int blk_y_a = by * TILE_SIZE;
const int blk_y_b = (by + 1) * TILE_SIZE;
float output = 0.0f;
for (int i = 0; i < mask_width; ++i) {
for (int j = 0; j < mask_width; ++j) {
int x_idx = x_start + i;
int y_idx = y_start + j;
if (x_idx >= 0 && x_idx < width && //
y_idx >= 0 && y_idx < height) {
if (x_idx >= blk_x_a && x_idx < blk_x_b && //
y_idx >= blk_y_a && y_idx < blk_y_b) {
output +=
c_M[j * mask_width + i] * N_ds[ty + j - halo_w][tx + i - halo_w];
} else {
output += c_M[j * mask_width + i] * N[y_idx * pitch + x_idx];
}
}
}
}
if (row_o < height && col_o < width) {
P[row_o * pitch + col_o] = output;
}
}
void convolution_2D_tiled(float *P, float *N, int height, int width, int pitch,
int mask_width, float *M) {
std::cout << __PRETTY_FUNCTION__ << std::endl;
cudaError_t err = cudaSuccess;
float *d_P, *d_N;
int size_P = height * pitch * sizeof(float);
int size_N = height * pitch * sizeof(float);
err = cudaMalloc((void **)&d_P, size_P);
gpuErrchk(err);
err = cudaMalloc((void **)&d_N, size_N);
gpuErrchk(err);
err = cudaMemcpy(d_P, P, size_P, cudaMemcpyHostToDevice);
gpuErrchk(err);
err = cudaMemcpy(d_N, N, size_N, cudaMemcpyHostToDevice);
gpuErrchk(err);
err = cudaMemcpyToSymbol(c_M, M, mask_width * mask_width * sizeof(float));
gpuErrchk(err);
int m_blocks = (height + TILE_SIZE - 1) / TILE_SIZE;
int n_blocks = (width + TILE_SIZE - 1) / TILE_SIZE;
dim3 dimGrid(m_blocks, n_blocks);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
// call kernel
printf("m_blocks = %d\n", m_blocks);
printf("n_blocks = %d\n", n_blocks);
convolution_2D_tiled_kernel<<<dimGrid, dimBlock>>>(d_P, d_N, height, width,
pitch, mask_width);
if (cudaSuccess != cudaGetLastError())
printf("Error!\n");
err = cudaMemcpy(P, d_P, size_N, cudaMemcpyDeviceToHost);
gpuErrchk(err);
err = cudaFree(d_P);
gpuErrchk(err);
err = cudaFree(d_N);
gpuErrchk(err);
}
|
b396a93b39377c0b5c7ddbb2bb710f33eb050550.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define GAPX (118)
#define EXTENT (5)
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X + GAPX);
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
// Initialize the value
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*0];
t2 = input[__iter_3__+M*1];
}
// Rest of the computation
for (int __iter_1__ = 1; __iter_1__ <= N-1; __iter_1__++) {
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__-__iter_0__];
__tilevar_2__[__iter_3__-__iter_0__] = t2;
t2 = input[__iter_3__+M*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_0__ = (__tilevar_2__[__iter_3__-__iter_0__] - b2);
float __temp_1__ = (__tilevar_2__[__iter_3__-__iter_0__] - b2);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_2__[__iter_3__-__iter_0__] - t2);
float __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__] - t2);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_13__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_2__[__iter_3__-__iter_0__] + __temp_17__);
b3 = __tilevar_3__[__iter_3__-__iter_0__];
__tilevar_3__[__iter_3__-__iter_0__] = t3;
t3 = __temp_18__;
}
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
if (__iter_3__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_3__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) {
__copy_arr_0__[__iter_1__+(M)*(__iter_3__)] = t3;
}
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_0__ = (__tilevar_3__[__iter_3__-__iter_0__] - b3);
float __temp_1__ = (__tilevar_3__[__iter_3__-__iter_0__] - b3);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_3__[__iter_3__-__iter_0__] - t3);
float __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__] - t3);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_13__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_3__[__iter_3__-__iter_0__] + __temp_17__);
b4 = __tilevar_4__[__iter_3__-__iter_0__];
__tilevar_4__[__iter_3__-__iter_0__] = t4;
t4 = __temp_18__;
}
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
if (__iter_3__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_3__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) {
__copy_arr_1__[__iter_1__+(M)*(__iter_3__)] = t4;
}
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_0__ = (__tilevar_4__[__iter_3__-__iter_0__] - b4);
float __temp_1__ = (__tilevar_4__[__iter_3__-__iter_0__] - b4);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_4__[__iter_3__-__iter_0__] - t4);
float __temp_5__ = (__tilevar_4__[__iter_3__-__iter_0__] - t4);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__+1-__iter_0__]);
float __temp_9__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__+1-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__-1-__iter_0__]);
float __temp_13__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__-1-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_4__[__iter_3__-__iter_0__] + __temp_17__);
b5 = __tilevar_5__[__iter_3__-__iter_0__];
__tilevar_5__[__iter_3__-__iter_0__] = t5;
t5 = __temp_18__;
}
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
if (__iter_3__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_3__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) {
__copy_arr_2__[__iter_1__+(M)*(__iter_3__)] = t5;
}
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_0__ = (__tilevar_5__[__iter_3__-__iter_0__] - b5);
float __temp_1__ = (__tilevar_5__[__iter_3__-__iter_0__] - b5);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_5__[__iter_3__-__iter_0__] - t5);
float __temp_5__ = (__tilevar_5__[__iter_3__-__iter_0__] - t5);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__+1-__iter_0__]);
float __temp_9__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__+1-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__-1-__iter_0__]);
float __temp_13__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__-1-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_5__[__iter_3__-__iter_0__] + __temp_17__);
__var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_18__;
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X + GAPX) + (int)FORMA_BLOCKDIM_X;
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
// Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ;
if (__iter_3__ >= FORMA_MAX(__iter_0__-2,0) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1))){
__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)] = input[__iter_3__+(M)*(0)];
t2 = input[__iter_3__+(M)*(1)];
}
// Rest of the computation
for (int __iter_1__ = 1; __iter_1__ <= N-1; __iter_1__++) {
if(__iter_3__ >= FORMA_MAX(__iter_0__-2,0) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)];
__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)] = t2;
t2 = input[__iter_3__+(M)*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__-1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){
float __temp_0__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - b2);
float __temp_1__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - b2);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - t2);
float __temp_5__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - t2);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - __tilevar_2__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_9__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - __tilevar_2__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - __tilevar_2__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_13__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - __tilevar_2__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] + __temp_17__);
b3 = __tilevar_3__[__iter_3__+EXTENT-__iter_0__];
__tilevar_3__[__iter_3__+EXTENT-__iter_0__] = t3;
t3 = __temp_18__;
}
if (__iter_3__ >= FORMA_MAX((__iter_0__-3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) & (__iter_3__ < FORMA_MAX((__iter_0__-1),1) | __iter_3__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)))) {
b3 = __copy_arr_0__[__iter_1__-2+(M)*(__iter_3__)];
__tilevar_3__[__iter_3__+(EXTENT-__iter_0__)] = __copy_arr_0__[__iter_1__-1+(M)*(__iter_3__)];
t3 = __copy_arr_0__[__iter_1__+(M)*(__iter_3__)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__-2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){
float __temp_0__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - b3);
float __temp_1__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - b3);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - t3);
float __temp_5__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - t3);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - __tilevar_3__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_9__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - __tilevar_3__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - __tilevar_3__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_13__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - __tilevar_3__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] + __temp_17__);
b4 = __tilevar_4__[__iter_3__+EXTENT-__iter_0__];
__tilevar_4__[__iter_3__+EXTENT-__iter_0__] = t4;
t4 = __temp_18__;
}
if (__iter_3__ >= FORMA_MAX((__iter_0__-4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) & (__iter_3__ < FORMA_MAX((__iter_0__-2),1) | __iter_3__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)))) {
b4 = __copy_arr_1__[__iter_1__-2+(M)*(__iter_3__)];
__tilevar_4__[__iter_3__+(EXTENT-__iter_0__)] = __copy_arr_1__[__iter_1__-1+(M)*(__iter_3__)];
t4 = __copy_arr_1__[__iter_1__+(M)*(__iter_3__)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__-3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
float __temp_0__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - b4);
float __temp_1__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - b4);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - t4);
float __temp_5__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - t4);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - __tilevar_4__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_9__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - __tilevar_4__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - __tilevar_4__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_13__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - __tilevar_4__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] + __temp_17__);
b5 = __tilevar_5__[__iter_3__+EXTENT-__iter_0__];
__tilevar_5__[__iter_3__+EXTENT-__iter_0__] = t5;
t5 = __temp_18__;
}
if (__iter_3__ >= FORMA_MAX((__iter_0__-5),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) & (__iter_3__ < FORMA_MAX((__iter_0__-3),1) | __iter_3__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)))) {
b5 = __copy_arr_2__[__iter_1__-2+(M)*(__iter_3__)];
__tilevar_5__[__iter_3__+(EXTENT-__iter_0__)] = __copy_arr_2__[__iter_1__-1+(M)*(__iter_3__)];
t5 = __copy_arr_2__[__iter_1__+(M)*(__iter_3__)];
}
__syncthreads();
if( __iter_3__ >= FORMA_MAX((__iter_0__-4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
float __temp_0__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - b5);
float __temp_1__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - b5);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - t5);
float __temp_5__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - t5);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - __tilevar_5__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_9__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - __tilevar_5__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - __tilevar_5__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_13__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - __tilevar_5__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] + __temp_17__);
__var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_18__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void gradient (float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
hipMalloc(&__copy_arr_0__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
hipMalloc(&__copy_arr_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
hipMalloc(&__copy_arr_2__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__1__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(__blockConfig___kernel___forma_kernel__0__), __SMemSize___kernel___forma_kernel__0__, 0, input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__copy_arr_0__);
hipFree(__copy_arr_1__);
hipFree(__copy_arr_2__);
}
/*Host Free End*/
| b396a93b39377c0b5c7ddbb2bb710f33eb050550.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#define GAPX (118)
#define EXTENT (5)
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X + GAPX);
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
// Initialize the value
int __iter_3__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
__tilevar_2__[__iter_3__-__iter_0__] = input[__iter_3__+M*0];
t2 = input[__iter_3__+M*1];
}
// Rest of the computation
for (int __iter_1__ = 1; __iter_1__ <= N-1; __iter_1__++) {
if(__iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__-__iter_0__];
__tilevar_2__[__iter_3__-__iter_0__] = t2;
t2 = input[__iter_3__+M*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
float __temp_0__ = (__tilevar_2__[__iter_3__-__iter_0__] - b2);
float __temp_1__ = (__tilevar_2__[__iter_3__-__iter_0__] - b2);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_2__[__iter_3__-__iter_0__] - t2);
float __temp_5__ = (__tilevar_2__[__iter_3__-__iter_0__] - t2);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_9__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__+1-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_13__ = (__tilevar_2__[__iter_3__-__iter_0__] - __tilevar_2__[__iter_3__-1-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_2__[__iter_3__-__iter_0__] + __temp_17__);
b3 = __tilevar_3__[__iter_3__-__iter_0__];
__tilevar_3__[__iter_3__-__iter_0__] = t3;
t3 = __temp_18__;
}
if(__iter_3__ >= FORMA_MAX((__iter_0__+1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2)) ){
if (__iter_3__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_3__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(M-2))-2)) {
__copy_arr_0__[__iter_1__+(M)*(__iter_3__)] = t3;
}
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
float __temp_0__ = (__tilevar_3__[__iter_3__-__iter_0__] - b3);
float __temp_1__ = (__tilevar_3__[__iter_3__-__iter_0__] - b3);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_3__[__iter_3__-__iter_0__] - t3);
float __temp_5__ = (__tilevar_3__[__iter_3__-__iter_0__] - t3);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_9__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__+1-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_13__ = (__tilevar_3__[__iter_3__-__iter_0__] - __tilevar_3__[__iter_3__-1-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_3__[__iter_3__-__iter_0__] + __temp_17__);
b4 = __tilevar_4__[__iter_3__-__iter_0__];
__tilevar_4__[__iter_3__-__iter_0__] = t4;
t4 = __temp_18__;
}
if(__iter_3__ >= FORMA_MAX((__iter_0__+2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2)) ){
if (__iter_3__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_3__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(M-2))-2)) {
__copy_arr_1__[__iter_1__+(M)*(__iter_3__)] = t4;
}
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
float __temp_0__ = (__tilevar_4__[__iter_3__-__iter_0__] - b4);
float __temp_1__ = (__tilevar_4__[__iter_3__-__iter_0__] - b4);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_4__[__iter_3__-__iter_0__] - t4);
float __temp_5__ = (__tilevar_4__[__iter_3__-__iter_0__] - t4);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__+1-__iter_0__]);
float __temp_9__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__+1-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__-1-__iter_0__]);
float __temp_13__ = (__tilevar_4__[__iter_3__-__iter_0__] - __tilevar_4__[__iter_3__-1-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_4__[__iter_3__-__iter_0__] + __temp_17__);
b5 = __tilevar_5__[__iter_3__-__iter_0__];
__tilevar_5__[__iter_3__-__iter_0__] = t5;
t5 = __temp_18__;
}
if(__iter_3__ >= FORMA_MAX((__iter_0__+3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2)) ){
if (__iter_3__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_3__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(M-2))-2)) {
__copy_arr_2__[__iter_1__+(M)*(__iter_3__)] = t5;
}
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__+4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(M-2)) ){
float __temp_0__ = (__tilevar_5__[__iter_3__-__iter_0__] - b5);
float __temp_1__ = (__tilevar_5__[__iter_3__-__iter_0__] - b5);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_5__[__iter_3__-__iter_0__] - t5);
float __temp_5__ = (__tilevar_5__[__iter_3__-__iter_0__] - t5);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__+1-__iter_0__]);
float __temp_9__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__+1-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__-1-__iter_0__]);
float __temp_13__ = (__tilevar_5__[__iter_3__-__iter_0__] - __tilevar_5__[__iter_3__-1-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_5__[__iter_3__-__iter_0__] + __temp_17__);
__var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_18__;
}
}
}
int __blockSizeToSMemSize___kernel___forma_kernel__0__(dim3 blockDim){
int FORMA_BLOCKDIM_X = (int)(blockDim.x);
int SMemSize = 0;
SMemSize += sizeof(float)*(4*FORMA_BLOCKDIM_X);
return SMemSize;
}
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int N, int M, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, float * __restrict__ __var_1__){
int __FORMA_SHARED_MEM_OFFSET__ = 0;
float * __tilevar_2__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_3__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_4__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
float * __tilevar_5__ = (float*)(__FORMA_SHARED_MEM__+__FORMA_SHARED_MEM_OFFSET__);
__FORMA_SHARED_MEM_OFFSET__ += sizeof(float)*(FORMA_BLOCKDIM_X);
int __iter_0__ = (int)(blockIdx.x)*((int)FORMA_BLOCKDIM_X + GAPX) + (int)FORMA_BLOCKDIM_X;
float t2=0.0f, t3=0.0f, t4=0.0f, t5=0.0f;
float b2=0.0f, b3=0.0f, b4=0.0f, b5=0.0f;
// Initialize the values
int __iter_3__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ;
if (__iter_3__ >= FORMA_MAX(__iter_0__-2,0) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1))){
__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)] = input[__iter_3__+(M)*(0)];
t2 = input[__iter_3__+(M)*(1)];
}
// Rest of the computation
for (int __iter_1__ = 1; __iter_1__ <= N-1; __iter_1__++) {
if(__iter_3__ >= FORMA_MAX(__iter_0__-2,0) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-1))){
b2 = __tilevar_2__[__iter_3__+(EXTENT-__iter_0__)];
__tilevar_2__[__iter_3__+(EXTENT-__iter_0__)] = t2;
t2 = input[__iter_3__+(M)*(__iter_1__+1)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__-1),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)) ){
float __temp_0__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - b2);
float __temp_1__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - b2);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - t2);
float __temp_5__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - t2);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - __tilevar_2__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_9__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - __tilevar_2__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - __tilevar_2__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_13__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] - __tilevar_2__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_2__[__iter_3__+EXTENT-__iter_0__] + __temp_17__);
b3 = __tilevar_3__[__iter_3__+EXTENT-__iter_0__];
__tilevar_3__[__iter_3__+EXTENT-__iter_0__] = t3;
t3 = __temp_18__;
}
if (__iter_3__ >= FORMA_MAX((__iter_0__-3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) & (__iter_3__ < FORMA_MAX((__iter_0__-1),1) | __iter_3__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(M-2)))) {
b3 = __copy_arr_0__[__iter_1__-2+(M)*(__iter_3__)];
__tilevar_3__[__iter_3__+(EXTENT-__iter_0__)] = __copy_arr_0__[__iter_1__-1+(M)*(__iter_3__)];
t3 = __copy_arr_0__[__iter_1__+(M)*(__iter_3__)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__-2),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)) ){
float __temp_0__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - b3);
float __temp_1__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - b3);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - t3);
float __temp_5__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - t3);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - __tilevar_3__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_9__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - __tilevar_3__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - __tilevar_3__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_13__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] - __tilevar_3__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_3__[__iter_3__+EXTENT-__iter_0__] + __temp_17__);
b4 = __tilevar_4__[__iter_3__+EXTENT-__iter_0__];
__tilevar_4__[__iter_3__+EXTENT-__iter_0__] = t4;
t4 = __temp_18__;
}
if (__iter_3__ >= FORMA_MAX((__iter_0__-4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) & (__iter_3__ < FORMA_MAX((__iter_0__-2),1) | __iter_3__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(M-2)))) {
b4 = __copy_arr_1__[__iter_1__-2+(M)*(__iter_3__)];
__tilevar_4__[__iter_3__+(EXTENT-__iter_0__)] = __copy_arr_1__[__iter_1__-1+(M)*(__iter_3__)];
t4 = __copy_arr_1__[__iter_1__+(M)*(__iter_3__)];
}
__syncthreads();
if(__iter_3__ >= FORMA_MAX((__iter_0__-3),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)) ){
float __temp_0__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - b4);
float __temp_1__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - b4);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - t4);
float __temp_5__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - t4);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - __tilevar_4__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_9__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - __tilevar_4__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - __tilevar_4__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_13__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] - __tilevar_4__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_4__[__iter_3__+EXTENT-__iter_0__] + __temp_17__);
b5 = __tilevar_5__[__iter_3__+EXTENT-__iter_0__];
__tilevar_5__[__iter_3__+EXTENT-__iter_0__] = t5;
t5 = __temp_18__;
}
if (__iter_3__ >= FORMA_MAX((__iter_0__-5),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(M-2)) & (__iter_3__ < FORMA_MAX((__iter_0__-3),1) | __iter_3__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(M-2)))) {
b5 = __copy_arr_2__[__iter_1__-2+(M)*(__iter_3__)];
__tilevar_5__[__iter_3__+(EXTENT-__iter_0__)] = __copy_arr_2__[__iter_1__-1+(M)*(__iter_3__)];
t5 = __copy_arr_2__[__iter_1__+(M)*(__iter_3__)];
}
__syncthreads();
if( __iter_3__ >= FORMA_MAX((__iter_0__-4),1) & __iter_3__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(M-2)) ){
float __temp_0__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - b5);
float __temp_1__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - b5);
float __temp_2__ = (__temp_0__ * __temp_1__);
float __temp_3__ = (0.000100f + __temp_2__);
float __temp_4__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - t5);
float __temp_5__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - t5);
float __temp_6__ = (__temp_4__ * __temp_5__);
float __temp_7__ = (__temp_3__ + __temp_6__);
float __temp_8__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - __tilevar_5__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_9__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - __tilevar_5__[__iter_3__+1+EXTENT-__iter_0__]);
float __temp_10__ = (__temp_8__ * __temp_9__);
float __temp_11__ = (__temp_7__ + __temp_10__);
float __temp_12__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - __tilevar_5__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_13__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] - __tilevar_5__[__iter_3__-1+EXTENT-__iter_0__]);
float __temp_14__ = (__temp_12__ * __temp_13__);
float __temp_15__ = (__temp_11__ + __temp_14__);
float __temp_16__ = sqrt(__temp_15__);
float __temp_17__ = (1.000000f / __temp_16__);
float __temp_18__ = (__tilevar_5__[__iter_3__+EXTENT-__iter_0__] + __temp_17__);
__var_1__[__iter_3__+(M)*FORMA_MAX(__iter_1__-3,0)] = __temp_18__;
}
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void gradient (float * h_input, int N, int M, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*((N)*(M)), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
cudaMalloc(&__copy_arr_0__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
cudaMalloc(&__copy_arr_1__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
cudaMalloc(&__copy_arr_2__,sizeof(float)*((N)*(M)));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = ((M-1) - 0 ) + 1;
int __block_0___kernel___forma_kernel__0__ = 128;
int __block_1___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__);
int __SMemSize___kernel___forma_kernel__0__ = 0;
__SMemSize___kernel___forma_kernel__0__ = __blockSizeToSMemSize___kernel___forma_kernel__0__(__blockConfig___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__1__<<<__gridConfig___kernel___forma_kernel__0__, __blockConfig___kernel___forma_kernel__0__, __SMemSize___kernel___forma_kernel__0__>>> (input, N, M, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*((N)*(M)), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__copy_arr_0__);
cudaFree(__copy_arr_1__);
cudaFree(__copy_arr_2__);
}
/*Host Free End*/
|
9e1226b189807f744cb867806a138c791e802d76.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <cfloat>
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/filters.hpp"
namespace cv { namespace cuda { namespace device
{
// kernels
template <typename T> __global__ void resize_nearest(const PtrStep<T> src, PtrStepSz<T> dst, const float fy, const float fx)
{
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dst_x < dst.cols && dst_y < dst.rows)
{
const float src_x = dst_x * fx;
const float src_y = dst_y * fy;
dst(dst_y, dst_x) = src(__float2int_rz(src_y), __float2int_rz(src_x));
}
}
template <typename T> __global__ void resize_linear(const PtrStepSz<T> src, PtrStepSz<T> dst, const float fy, const float fx)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dst_x < dst.cols && dst_y < dst.rows)
{
const float src_x = dst_x * fx;
const float src_y = dst_y * fy;
work_type out = VecTraits<work_type>::all(0);
const int x1 = __float2int_rd(src_x);
const int y1 = __float2int_rd(src_y);
const int x2 = x1 + 1;
const int y2 = y1 + 1;
const int x2_read = ::min(x2, src.cols - 1);
const int y2_read = ::min(y2, src.rows - 1);
T src_reg = src(y1, x1);
out = out + src_reg * ((x2 - src_x) * (y2 - src_y));
src_reg = src(y1, x2_read);
out = out + src_reg * ((src_x - x1) * (y2 - src_y));
src_reg = src(y2_read, x1);
out = out + src_reg * ((x2 - src_x) * (src_y - y1));
src_reg = src(y2_read, x2_read);
out = out + src_reg * ((src_x - x1) * (src_y - y1));
dst(dst_y, dst_x) = saturate_cast<T>(out);
}
}
template <class Ptr2D, typename T> __global__ void resize(const Ptr2D src, PtrStepSz<T> dst, const float fy, const float fx)
{
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dst_x < dst.cols && dst_y < dst.rows)
{
const float src_x = dst_x * fx;
const float src_y = dst_y * fy;
dst(dst_y, dst_x) = src(src_y, src_x);
}
}
template <typename Ptr2D, typename T> __global__ void resize_area(const Ptr2D src, PtrStepSz<T> dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
dst(y, x) = src(y, x);
}
}
// textures
template <typename T> struct TextureAccessor;
#define OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(type) \
texture<type, hipTextureType2D, hipReadModeElementType> tex_resize_##type (0, hipFilterModePoint, hipAddressModeClamp); \
template <> struct TextureAccessor<type> \
{ \
typedef type elem_type; \
typedef int index_type; \
int xoff; \
int yoff; \
__device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \
{ \
return tex2D(tex_resize_##type, x + xoff, y + yoff); \
} \
__host__ static void bind(const PtrStepSz<type>& mat) \
{ \
bindTexture(&tex_resize_##type, mat); \
} \
};
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(uchar)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(uchar4)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(ushort)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(ushort4)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(short)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(short4)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(float)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(float4)
#undef OPENCV_CUDA_IMPLEMENT_RESIZE_TEX
template <typename T>
TextureAccessor<T> texAccessor(const PtrStepSz<T>& mat, int yoff, int xoff)
{
TextureAccessor<T>::bind(mat);
TextureAccessor<T> t;
t.xoff = xoff;
t.yoff = yoff;
return t;
}
// callers for nearest interpolation
template <typename T>
void call_resize_nearest_glob(const PtrStepSz<T>& src, const PtrStepSz<T>& dst, float fy, float fx, hipStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( resize_nearest), dim3(grid), dim3(block), 0, stream, src, dst, fy, fx);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T>
void call_resize_nearest_tex(const PtrStepSz<T>& /*src*/, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx)
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( resize), dim3(grid), dim3(block), 0, 0, texAccessor(srcWhole, yoff, xoff), dst, fy, fx);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
// callers for linear interpolation
template <typename T>
void call_resize_linear_glob(const PtrStepSz<T>& src, const PtrStepSz<T>& dst, float fy, float fx, hipStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
hipLaunchKernelGGL(( resize_linear), dim3(grid), dim3(block), 0, stream, src, dst, fy, fx);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T>
void call_resize_linear_tex(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx)
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
if (srcWhole.data == src.data)
{
TextureAccessor<T> texSrc = texAccessor(src, 0, 0);
LinearFilter< TextureAccessor<T> > filteredSrc(texSrc);
hipLaunchKernelGGL(( resize), dim3(grid), dim3(block), 0, 0, filteredSrc, dst, fy, fx);
}
else
{
TextureAccessor<T> texSrc = texAccessor(srcWhole, yoff, xoff);
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader<TextureAccessor<T>, BrdReplicate<T> > brdSrc(texSrc, brd);
LinearFilter< BorderReader<TextureAccessor<T>, BrdReplicate<T> > > filteredSrc(brdSrc);
hipLaunchKernelGGL(( resize), dim3(grid), dim3(block), 0, 0, filteredSrc, dst, fy, fx);
}
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
// callers for cubic interpolation
template <typename T>
void call_resize_cubic_glob(const PtrStepSz<T>& src, const PtrStepSz<T>& dst, float fy, float fx, hipStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdReplicate<T> > brdSrc(src, brd);
CubicFilter< BorderReader< PtrStep<T>, BrdReplicate<T> > > filteredSrc(brdSrc);
hipLaunchKernelGGL(( resize), dim3(grid), dim3(block), 0, stream, filteredSrc, dst, fy, fx);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T>
void call_resize_cubic_tex(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx)
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
if (srcWhole.data == src.data)
{
TextureAccessor<T> texSrc = texAccessor(src, 0, 0);
CubicFilter< TextureAccessor<T> > filteredSrc(texSrc);
hipLaunchKernelGGL(( resize), dim3(grid), dim3(block), 0, 0, filteredSrc, dst, fy, fx);
}
else
{
TextureAccessor<T> texSrc = texAccessor(srcWhole, yoff, xoff);
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader<TextureAccessor<T>, BrdReplicate<T> > brdSrc(texSrc, brd);
CubicFilter< BorderReader<TextureAccessor<T>, BrdReplicate<T> > > filteredSrc(brdSrc);
hipLaunchKernelGGL(( resize), dim3(grid), dim3(block), 0, 0, filteredSrc, dst, fy, fx);
}
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
// ResizeNearestDispatcher
template <typename T> struct ResizeNearestDispatcher
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& /*srcWhole*/, int /*yoff*/, int /*xoff*/, const PtrStepSz<T>& dst, float fy, float fx, hipStream_t stream)
{
call_resize_nearest_glob(src, dst, fy, fx, stream);
}
};
template <typename T> struct SelectImplForNearest
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx, hipStream_t stream)
{
if (stream)
call_resize_nearest_glob(src, dst, fy, fx, stream);
else
{
if (fx > 1 || fy > 1)
call_resize_nearest_glob(src, dst, fy, fx, 0);
else
call_resize_nearest_tex(src, srcWhole, yoff, xoff, dst, fy, fx);
}
}
};
template <> struct ResizeNearestDispatcher<uchar> : SelectImplForNearest<uchar> {};
template <> struct ResizeNearestDispatcher<uchar4> : SelectImplForNearest<uchar4> {};
template <> struct ResizeNearestDispatcher<ushort> : SelectImplForNearest<ushort> {};
template <> struct ResizeNearestDispatcher<ushort4> : SelectImplForNearest<ushort4> {};
template <> struct ResizeNearestDispatcher<short> : SelectImplForNearest<short> {};
template <> struct ResizeNearestDispatcher<short4> : SelectImplForNearest<short4> {};
template <> struct ResizeNearestDispatcher<float> : SelectImplForNearest<float> {};
template <> struct ResizeNearestDispatcher<float4> : SelectImplForNearest<float4> {};
// ResizeLinearDispatcher
template <typename T> struct ResizeLinearDispatcher
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& /*srcWhole*/, int /*yoff*/, int /*xoff*/, const PtrStepSz<T>& dst, float fy, float fx, hipStream_t stream)
{
call_resize_linear_glob(src, dst, fy, fx, stream);
}
};
template <typename T> struct SelectImplForLinear
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx, hipStream_t stream)
{
if (stream)
call_resize_linear_glob(src, dst, fy, fx, stream);
else
{
if (fx > 1 || fy > 1)
call_resize_linear_glob(src, dst, fy, fx, 0);
else
call_resize_linear_tex(src, srcWhole, yoff, xoff, dst, fy, fx);
}
}
};
template <> struct ResizeLinearDispatcher<uchar> : SelectImplForLinear<uchar> {};
template <> struct ResizeLinearDispatcher<uchar4> : SelectImplForLinear<uchar4> {};
template <> struct ResizeLinearDispatcher<ushort> : SelectImplForLinear<ushort> {};
template <> struct ResizeLinearDispatcher<ushort4> : SelectImplForLinear<ushort4> {};
template <> struct ResizeLinearDispatcher<short> : SelectImplForLinear<short> {};
template <> struct ResizeLinearDispatcher<short4> : SelectImplForLinear<short4> {};
template <> struct ResizeLinearDispatcher<float> : SelectImplForLinear<float> {};
template <> struct ResizeLinearDispatcher<float4> : SelectImplForLinear<float4> {};
// ResizeCubicDispatcher
template <typename T> struct ResizeCubicDispatcher
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& /*srcWhole*/, int /*yoff*/, int /*xoff*/, const PtrStepSz<T>& dst, float fy, float fx, hipStream_t stream)
{
call_resize_cubic_glob(src, dst, fy, fx, stream);
}
};
template <typename T> struct SelectImplForCubic
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx, hipStream_t stream)
{
if (stream)
call_resize_cubic_glob(src, dst, fy, fx, stream);
else
call_resize_cubic_tex(src, srcWhole, yoff, xoff, dst, fy, fx);
}
};
template <> struct ResizeCubicDispatcher<uchar> : SelectImplForCubic<uchar> {};
template <> struct ResizeCubicDispatcher<uchar4> : SelectImplForCubic<uchar4> {};
template <> struct ResizeCubicDispatcher<ushort> : SelectImplForCubic<ushort> {};
template <> struct ResizeCubicDispatcher<ushort4> : SelectImplForCubic<ushort4> {};
template <> struct ResizeCubicDispatcher<short> : SelectImplForCubic<short> {};
template <> struct ResizeCubicDispatcher<short4> : SelectImplForCubic<short4> {};
template <> struct ResizeCubicDispatcher<float> : SelectImplForCubic<float> {};
template <> struct ResizeCubicDispatcher<float4> : SelectImplForCubic<float4> {};
// ResizeAreaDispatcher
template <typename T> struct ResizeAreaDispatcher
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>&, int, int, const PtrStepSz<T>& dst, float fy, float fx, hipStream_t stream)
{
const int iscale_x = (int) round(fx);
const int iscale_y = (int) round(fy);
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
if (std::abs(fx - iscale_x) < FLT_MIN && std::abs(fy - iscale_y) < FLT_MIN)
{
BrdConstant<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdConstant<T> > brdSrc(src, brd);
IntegerAreaFilter< BorderReader< PtrStep<T>, BrdConstant<T> > > filteredSrc(brdSrc, fx, fy);
hipLaunchKernelGGL(( resize_area), dim3(grid), dim3(block), 0, stream, filteredSrc, dst);
}
else
{
BrdConstant<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdConstant<T> > brdSrc(src, brd);
AreaFilter< BorderReader< PtrStep<T>, BrdConstant<T> > > filteredSrc(brdSrc, fx, fy);
hipLaunchKernelGGL(( resize_area), dim3(grid), dim3(block), 0, stream, filteredSrc, dst);
}
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
};
// resize
template <typename T> void resize(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, hipStream_t stream)
{
typedef void (*func_t)(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx, hipStream_t stream);
static const func_t funcs[4] =
{
ResizeNearestDispatcher<T>::call,
ResizeLinearDispatcher<T>::call,
ResizeCubicDispatcher<T>::call,
ResizeAreaDispatcher<T>::call
};
// change to linear if area interpolation upscaling
if (interpolation == 3 && (fx <= 1.f || fy <= 1.f))
interpolation = 1;
funcs[interpolation](static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(srcWhole), yoff, xoff, static_cast< PtrStepSz<T> >(dst), fy, fx, stream);
}
template void resize<uchar >(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, hipStream_t stream);
template void resize<uchar3>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, hipStream_t stream);
template void resize<uchar4>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, hipStream_t stream);
template void resize<ushort >(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, hipStream_t stream);
template void resize<ushort3>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, hipStream_t stream);
template void resize<ushort4>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, hipStream_t stream);
template void resize<short >(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, hipStream_t stream);
template void resize<short3>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, hipStream_t stream);
template void resize<short4>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, hipStream_t stream);
template void resize<float >(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, hipStream_t stream);
template void resize<float3>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, hipStream_t stream);
template void resize<float4>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, hipStream_t stream);
}}}
#endif /* CUDA_DISABLER */
| 9e1226b189807f744cb867806a138c791e802d76.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <cfloat>
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/filters.hpp"
namespace cv { namespace cuda { namespace device
{
// kernels
template <typename T> __global__ void resize_nearest(const PtrStep<T> src, PtrStepSz<T> dst, const float fy, const float fx)
{
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dst_x < dst.cols && dst_y < dst.rows)
{
const float src_x = dst_x * fx;
const float src_y = dst_y * fy;
dst(dst_y, dst_x) = src(__float2int_rz(src_y), __float2int_rz(src_x));
}
}
template <typename T> __global__ void resize_linear(const PtrStepSz<T> src, PtrStepSz<T> dst, const float fy, const float fx)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dst_x < dst.cols && dst_y < dst.rows)
{
const float src_x = dst_x * fx;
const float src_y = dst_y * fy;
work_type out = VecTraits<work_type>::all(0);
const int x1 = __float2int_rd(src_x);
const int y1 = __float2int_rd(src_y);
const int x2 = x1 + 1;
const int y2 = y1 + 1;
const int x2_read = ::min(x2, src.cols - 1);
const int y2_read = ::min(y2, src.rows - 1);
T src_reg = src(y1, x1);
out = out + src_reg * ((x2 - src_x) * (y2 - src_y));
src_reg = src(y1, x2_read);
out = out + src_reg * ((src_x - x1) * (y2 - src_y));
src_reg = src(y2_read, x1);
out = out + src_reg * ((x2 - src_x) * (src_y - y1));
src_reg = src(y2_read, x2_read);
out = out + src_reg * ((src_x - x1) * (src_y - y1));
dst(dst_y, dst_x) = saturate_cast<T>(out);
}
}
template <class Ptr2D, typename T> __global__ void resize(const Ptr2D src, PtrStepSz<T> dst, const float fy, const float fx)
{
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
if (dst_x < dst.cols && dst_y < dst.rows)
{
const float src_x = dst_x * fx;
const float src_y = dst_y * fy;
dst(dst_y, dst_x) = src(src_y, src_x);
}
}
template <typename Ptr2D, typename T> __global__ void resize_area(const Ptr2D src, PtrStepSz<T> dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
dst(y, x) = src(y, x);
}
}
// textures
template <typename T> struct TextureAccessor;
#define OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(type) \
texture<type, cudaTextureType2D, cudaReadModeElementType> tex_resize_##type (0, cudaFilterModePoint, cudaAddressModeClamp); \
template <> struct TextureAccessor<type> \
{ \
typedef type elem_type; \
typedef int index_type; \
int xoff; \
int yoff; \
__device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \
{ \
return tex2D(tex_resize_##type, x + xoff, y + yoff); \
} \
__host__ static void bind(const PtrStepSz<type>& mat) \
{ \
bindTexture(&tex_resize_##type, mat); \
} \
};
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(uchar)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(uchar4)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(ushort)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(ushort4)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(short)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(short4)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(float)
OPENCV_CUDA_IMPLEMENT_RESIZE_TEX(float4)
#undef OPENCV_CUDA_IMPLEMENT_RESIZE_TEX
template <typename T>
TextureAccessor<T> texAccessor(const PtrStepSz<T>& mat, int yoff, int xoff)
{
TextureAccessor<T>::bind(mat);
TextureAccessor<T> t;
t.xoff = xoff;
t.yoff = yoff;
return t;
}
// callers for nearest interpolation
template <typename T>
void call_resize_nearest_glob(const PtrStepSz<T>& src, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
resize_nearest<<<grid, block, 0, stream>>>(src, dst, fy, fx);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T>
void call_resize_nearest_tex(const PtrStepSz<T>& /*src*/, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx)
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
resize<<<grid, block>>>(texAccessor(srcWhole, yoff, xoff), dst, fy, fx);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
// callers for linear interpolation
template <typename T>
void call_resize_linear_glob(const PtrStepSz<T>& src, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
resize_linear<<<grid, block, 0, stream>>>(src, dst, fy, fx);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T>
void call_resize_linear_tex(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx)
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
if (srcWhole.data == src.data)
{
TextureAccessor<T> texSrc = texAccessor(src, 0, 0);
LinearFilter< TextureAccessor<T> > filteredSrc(texSrc);
resize<<<grid, block>>>(filteredSrc, dst, fy, fx);
}
else
{
TextureAccessor<T> texSrc = texAccessor(srcWhole, yoff, xoff);
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader<TextureAccessor<T>, BrdReplicate<T> > brdSrc(texSrc, brd);
LinearFilter< BorderReader<TextureAccessor<T>, BrdReplicate<T> > > filteredSrc(brdSrc);
resize<<<grid, block>>>(filteredSrc, dst, fy, fx);
}
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
// callers for cubic interpolation
template <typename T>
void call_resize_cubic_glob(const PtrStepSz<T>& src, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream)
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdReplicate<T> > brdSrc(src, brd);
CubicFilter< BorderReader< PtrStep<T>, BrdReplicate<T> > > filteredSrc(brdSrc);
resize<<<grid, block, 0, stream>>>(filteredSrc, dst, fy, fx);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T>
void call_resize_cubic_tex(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx)
{
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
if (srcWhole.data == src.data)
{
TextureAccessor<T> texSrc = texAccessor(src, 0, 0);
CubicFilter< TextureAccessor<T> > filteredSrc(texSrc);
resize<<<grid, block>>>(filteredSrc, dst, fy, fx);
}
else
{
TextureAccessor<T> texSrc = texAccessor(srcWhole, yoff, xoff);
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader<TextureAccessor<T>, BrdReplicate<T> > brdSrc(texSrc, brd);
CubicFilter< BorderReader<TextureAccessor<T>, BrdReplicate<T> > > filteredSrc(brdSrc);
resize<<<grid, block>>>(filteredSrc, dst, fy, fx);
}
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
// ResizeNearestDispatcher
template <typename T> struct ResizeNearestDispatcher
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& /*srcWhole*/, int /*yoff*/, int /*xoff*/, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream)
{
call_resize_nearest_glob(src, dst, fy, fx, stream);
}
};
template <typename T> struct SelectImplForNearest
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream)
{
if (stream)
call_resize_nearest_glob(src, dst, fy, fx, stream);
else
{
if (fx > 1 || fy > 1)
call_resize_nearest_glob(src, dst, fy, fx, 0);
else
call_resize_nearest_tex(src, srcWhole, yoff, xoff, dst, fy, fx);
}
}
};
template <> struct ResizeNearestDispatcher<uchar> : SelectImplForNearest<uchar> {};
template <> struct ResizeNearestDispatcher<uchar4> : SelectImplForNearest<uchar4> {};
template <> struct ResizeNearestDispatcher<ushort> : SelectImplForNearest<ushort> {};
template <> struct ResizeNearestDispatcher<ushort4> : SelectImplForNearest<ushort4> {};
template <> struct ResizeNearestDispatcher<short> : SelectImplForNearest<short> {};
template <> struct ResizeNearestDispatcher<short4> : SelectImplForNearest<short4> {};
template <> struct ResizeNearestDispatcher<float> : SelectImplForNearest<float> {};
template <> struct ResizeNearestDispatcher<float4> : SelectImplForNearest<float4> {};
// ResizeLinearDispatcher
template <typename T> struct ResizeLinearDispatcher
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& /*srcWhole*/, int /*yoff*/, int /*xoff*/, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream)
{
call_resize_linear_glob(src, dst, fy, fx, stream);
}
};
template <typename T> struct SelectImplForLinear
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream)
{
if (stream)
call_resize_linear_glob(src, dst, fy, fx, stream);
else
{
if (fx > 1 || fy > 1)
call_resize_linear_glob(src, dst, fy, fx, 0);
else
call_resize_linear_tex(src, srcWhole, yoff, xoff, dst, fy, fx);
}
}
};
template <> struct ResizeLinearDispatcher<uchar> : SelectImplForLinear<uchar> {};
template <> struct ResizeLinearDispatcher<uchar4> : SelectImplForLinear<uchar4> {};
template <> struct ResizeLinearDispatcher<ushort> : SelectImplForLinear<ushort> {};
template <> struct ResizeLinearDispatcher<ushort4> : SelectImplForLinear<ushort4> {};
template <> struct ResizeLinearDispatcher<short> : SelectImplForLinear<short> {};
template <> struct ResizeLinearDispatcher<short4> : SelectImplForLinear<short4> {};
template <> struct ResizeLinearDispatcher<float> : SelectImplForLinear<float> {};
template <> struct ResizeLinearDispatcher<float4> : SelectImplForLinear<float4> {};
// ResizeCubicDispatcher
template <typename T> struct ResizeCubicDispatcher
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& /*srcWhole*/, int /*yoff*/, int /*xoff*/, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream)
{
call_resize_cubic_glob(src, dst, fy, fx, stream);
}
};
template <typename T> struct SelectImplForCubic
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream)
{
if (stream)
call_resize_cubic_glob(src, dst, fy, fx, stream);
else
call_resize_cubic_tex(src, srcWhole, yoff, xoff, dst, fy, fx);
}
};
template <> struct ResizeCubicDispatcher<uchar> : SelectImplForCubic<uchar> {};
template <> struct ResizeCubicDispatcher<uchar4> : SelectImplForCubic<uchar4> {};
template <> struct ResizeCubicDispatcher<ushort> : SelectImplForCubic<ushort> {};
template <> struct ResizeCubicDispatcher<ushort4> : SelectImplForCubic<ushort4> {};
template <> struct ResizeCubicDispatcher<short> : SelectImplForCubic<short> {};
template <> struct ResizeCubicDispatcher<short4> : SelectImplForCubic<short4> {};
template <> struct ResizeCubicDispatcher<float> : SelectImplForCubic<float> {};
template <> struct ResizeCubicDispatcher<float4> : SelectImplForCubic<float4> {};
// ResizeAreaDispatcher
template <typename T> struct ResizeAreaDispatcher
{
static void call(const PtrStepSz<T>& src, const PtrStepSz<T>&, int, int, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream)
{
const int iscale_x = (int) round(fx);
const int iscale_y = (int) round(fy);
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
if (std::abs(fx - iscale_x) < FLT_MIN && std::abs(fy - iscale_y) < FLT_MIN)
{
BrdConstant<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdConstant<T> > brdSrc(src, brd);
IntegerAreaFilter< BorderReader< PtrStep<T>, BrdConstant<T> > > filteredSrc(brdSrc, fx, fy);
resize_area<<<grid, block, 0, stream>>>(filteredSrc, dst);
}
else
{
BrdConstant<T> brd(src.rows, src.cols);
BorderReader< PtrStep<T>, BrdConstant<T> > brdSrc(src, brd);
AreaFilter< BorderReader< PtrStep<T>, BrdConstant<T> > > filteredSrc(brdSrc, fx, fy);
resize_area<<<grid, block, 0, stream>>>(filteredSrc, dst);
}
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
};
// resize
template <typename T> void resize(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, cudaStream_t stream)
{
typedef void (*func_t)(const PtrStepSz<T>& src, const PtrStepSz<T>& srcWhole, int yoff, int xoff, const PtrStepSz<T>& dst, float fy, float fx, cudaStream_t stream);
static const func_t funcs[4] =
{
ResizeNearestDispatcher<T>::call,
ResizeLinearDispatcher<T>::call,
ResizeCubicDispatcher<T>::call,
ResizeAreaDispatcher<T>::call
};
// change to linear if area interpolation upscaling
if (interpolation == 3 && (fx <= 1.f || fy <= 1.f))
interpolation = 1;
funcs[interpolation](static_cast< PtrStepSz<T> >(src), static_cast< PtrStepSz<T> >(srcWhole), yoff, xoff, static_cast< PtrStepSz<T> >(dst), fy, fx, stream);
}
template void resize<uchar >(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, cudaStream_t stream);
template void resize<uchar3>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, cudaStream_t stream);
template void resize<uchar4>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, cudaStream_t stream);
template void resize<ushort >(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, cudaStream_t stream);
template void resize<ushort3>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, cudaStream_t stream);
template void resize<ushort4>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, cudaStream_t stream);
template void resize<short >(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, cudaStream_t stream);
template void resize<short3>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, cudaStream_t stream);
template void resize<short4>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, cudaStream_t stream);
template void resize<float >(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, cudaStream_t stream);
template void resize<float3>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, cudaStream_t stream);
template void resize<float4>(const PtrStepSzb& src, const PtrStepSzb& srcWhole, int yoff, int xoff, const PtrStepSzb& dst, float fy, float fx, int interpolation, cudaStream_t stream);
}}}
#endif /* CUDA_DISABLER */
|
539e47d67e91e28763b36d74a0df37fdee8e1b83.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <hiprand/hiprand_kernel.h>
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <thrust/extrema.h>
#include "Particle.h"
#include "utility_kernels.cuh"
#include "matrix_operations.cuh"
#include "quaternion_operations.cuh"
#include "GJKIntersection.cuh"
#include "ik_constants.h"
__host__ __device__ int getParticleIndex(int particleCount, int particleIdx, ParticleProperty propType, int dimension)
{
int idx = particleIdx + particleCount * dimension;
if (propType == velocity)
{
idx += particleCount * DEGREES_OF_FREEDOM;
}
else if (propType == localBest)
{
idx += 2 * particleCount * DEGREES_OF_FREEDOM;
}
return idx;
}
__device__ void updateChainMatrices(NodeCUDA *chain, int particleCount, float* particles, int particleIdx, Matrix *matrices)
{
int nodeCount = NODE_COUNT;
int nodeIndex = 0;
Matrix matrix = createMatrix(1.0f);
matrix = translateMatrix(matrix, chain[nodeIndex].position);
matrix = rotateEuler(matrix, chain[nodeIndex].rotation);
for (int i = 0; i < 16; i++)
{
matrices[nodeIndex].cells[i] = matrix.cells[i];
}
for (nodeIndex = 1; nodeIndex < nodeCount; nodeIndex++)
{
int dimensionIdx = (nodeIndex - 1) * 3;
int positionIdx = getParticleIndex(particleCount, particleIdx, position, dimensionIdx);
float3 particleEulerRotation = make_float3(particles[positionIdx],
particles[positionIdx + particleCount],
particles[positionIdx + particleCount * 2]);
Matrix tempMat = createMatrix(1.0f);
tempMat = rotateEuler(tempMat, particleEulerRotation);
tempMat = translateMatrix(tempMat, make_float3(chain[nodeIndex].length, 0.0f, 0.0f));
int parentIdx = chain[nodeIndex].parentIndex;
matrix = multiplyMatrices(matrices[parentIdx], tempMat);
for (int i = 0; i < 16; i++)
{
matrices[nodeIndex].cells[i] = matrix.cells[i];
}
}
}
__device__ float calculateDistance(NodeCUDA *chain, float* positions, int particleCount, float* particles, int particleIdx, obj_t* colliders, int colliderCount, FitnessConfig fitConfig)
{
float rotationDifference = 0.0f;
float positionDifferenceMag = 0.0f;
float distance = 0.0f;
int nodeCount = NODE_COUNT;
Matrix matrices[NODE_COUNT];
updateChainMatrices(chain, particleCount, particles, particleIdx, matrices);
for (int ind = 1; ind < nodeCount; ind++)
{
int dimensionIdx = (ind - 1) * 3;
int positionIdx = getParticleIndex(particleCount, particleIdx, position, dimensionIdx);
float3 chainRotation = chain[ind].rotation;
float3 particleRotation = make_float3(particles[positionIdx],
particles[positionIdx + particleCount],
particles[positionIdx + particleCount * 2]);
rotationDifference = rotationDifference + magnitudeSqr(chainRotation - particleRotation);
float4 originVector = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
Matrix model;
for (int i = 0; i < 16; i++)
{
model.cells[i] = matrices[ind].cells[i];
}
float4 position = multiplyMatByVec(model, originVector);
int armInd = (ind - 1) * 4;
float4 armPosition = make_float4(positions[armInd],
positions[armInd + 1],
positions[armInd + 2],
positions[armInd + 3]);
float4 positionDifference = position - armPosition;
positionDifferenceMag += magnitudeSqr(positionDifference);
float4 rotation = matrixToQuaternion(model);
obj_t nodeCollider;
nodeCollider.pos = make_float3(position.x, position.y, position.z);
nodeCollider.quat = rotation;
nodeCollider.x = nodeCollider.y = nodeCollider.z = GIZMO_SIZE;
obj_t linkCollider;
float4 startPos = multiplyMatByVec(model, originVector); //this node
float4 endPos = multiplyMatByVec(matrices[chain[ind].parentIndex], originVector); //parent node
float4 centerPos = (startPos + endPos) * 0.5f;
linkCollider.pos = make_float3(centerPos.x, centerPos.y, centerPos.z);
linkCollider.quat = rotation;
linkCollider.x = chain[ind].length;
linkCollider.y = linkCollider.z = GIZMO_SIZE * 0.25f;
GJKData_t gjkData;
CCD_INIT(&gjkData);
gjkData.max_iterations = GJK_ITERATIONS;
int intersects = 0;
for (int i = 0; i < colliderCount; i++)
{
intersects = GJKIntersect(&nodeCollider, &colliders[i], &gjkData);
if (intersects)
{
return FLT_MAX;
}
intersects = GJKIntersect(&linkCollider, &colliders[i], &gjkData);
if (intersects)
{
return FLT_MAX;
}
}
if (chain[ind].nodeType == NodeType::effectorNode)
{
float distTmp = magnitudeSqr(make_float3(
position.x - chain[ind].targetPosition.x,
position.y - chain[ind].targetPosition.y,
position.z - chain[ind].targetPosition.z));
distance = distance + distTmp * chain[ind].effectorWeight;
}
}
return distance + fitConfig.distanceWeight / (DEGREES_OF_FREEDOM / 3) * positionDifferenceMag + fitConfig.angleWeight / (DEGREES_OF_FREEDOM / 3) * rotationDifference;
}
__global__ void simulateParticlesKernel(float *particles, float* positions, float *localBests, hiprandState_t *randoms, int size, NodeCUDA *chain, PSOConfig psoConfig, Coordinates *global, float globalMin)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = id; i < size; i += stride)
{
for (int deg = 0; deg < DEGREES_OF_FREEDOM; deg++)
{
int velocityIdx = getParticleIndex(size, i, velocity, deg);
int positionIdx = getParticleIndex(size, i, position, deg);
particles[velocityIdx] = psoConfig._inertia * hiprand_uniform(&randoms[i]) * particles[velocityIdx] +
psoConfig._local * hiprand_uniform(&randoms[i]) * (particles[getParticleIndex(size, i, localBest, deg)] - particles[positionIdx]) +
psoConfig._global * hiprand_uniform(&randoms[i]) * (global->positions[deg] - particles[positionIdx]);
particles[positionIdx] += particles[velocityIdx];
}
for (int ind = 1; ind <= DEGREES_OF_FREEDOM / 3; ind++)
{
int deg = (ind - 1) * 3;
int xPositionIdx = getParticleIndex(size, i, position, deg);
int yPositionIdx = getParticleIndex(size, i, position, deg + 1);
int zPositionIdx = getParticleIndex(size, i, position, deg + 2);
float posX = particles[xPositionIdx];
float posY = particles[yPositionIdx];
float posZ = particles[zPositionIdx];
particles[xPositionIdx] = clamp(particles[xPositionIdx], chain[ind].minRotation.x, chain[ind].maxRotation.x);
particles[yPositionIdx] = clamp(particles[yPositionIdx], chain[ind].minRotation.y, chain[ind].maxRotation.y);
particles[zPositionIdx] = clamp(particles[zPositionIdx], chain[ind].minRotation.z, chain[ind].maxRotation.z);
}
}
}
__global__ void initLocalBests(float *particles, float *localBests, NodeCUDA * chain, float* positions, int particleCount, obj_t* colliders, int colliderCount, FitnessConfig fitConfig)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = id; i < particleCount; i += stride)
{
localBests[i] = calculateDistance(chain, positions, particleCount, particles, i, colliders, colliderCount, fitConfig);
}
}
__global__ void updateLocalBests(float *particles, float *localBests, NodeCUDA * chain, float* positions, int particleCount, obj_t* colliders, int colliderCount, FitnessConfig fitConfig)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = id; i < particleCount; i += stride)
{
float currentDistance = calculateDistance(chain, positions, particleCount, particles, i, colliders, colliderCount, fitConfig);
if (currentDistance < localBests[i])
{
localBests[i] = currentDistance;
for (int deg = 0; deg < DEGREES_OF_FREEDOM; deg++)
{
particles[getParticleIndex(particleCount, i, localBest, deg)] = particles[getParticleIndex(particleCount, i, position, deg)];
}
}
}
}
__global__ void initParticlesKernel(float *particles, float *localBests, hiprandState_t *randoms, NodeCUDA * chain, float* positions, int particleCount)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = id; i < particleCount; i += stride)
{
for (int deg = 0; deg < DEGREES_OF_FREEDOM; deg += 3)
{
//Uniform distribution of particles across the domain
int chainIndex = (deg / 3) + 1;
float3 eulerMaxConstraint = chain[chainIndex].maxRotation;
float3 eulerMinConstraint = chain[chainIndex].minRotation;
//printf("maxconstraint x %f\n", chain[chainIndex].maxRotation.x);
//printf("maxconstraint y %f\n", chain[chainIndex].maxRotation.y);
//printf("maxconstraint z %f\n", chain[chainIndex].maxRotation.z);
//printf("quaterniondiff - deg %d : %f\n",deg, eulerMaxConstraint.z - eulerMinConstraint.z);
//printf("quaterniondiff - deg %d : %f\n",deg+1, eulerMaxConstraint.x - eulerMinConstraint.x);
//printf("quaterniondiff - deg %d : %f\n",deg+2, eulerMaxConstraint.y - eulerMinConstraint.y);
//particles[i].positions[deg] = (hiprand_uniform(&randoms[i]) *6.28f - 3.14f); //(hiprand_uniform(&randoms[i]) * (eulerMaxConstraint.x - eulerMinConstraint.x)) + eulerMinConstraint.x;
//particles[i].positions[deg + 1] = (hiprand_uniform(&randoms[i])*6.28f - 3.14f);// (hiprand_uniform(&randoms[i]) * (eulerMaxConstraint.y - eulerMinConstraint.y)) + eulerMinConstraint.y;
//particles[i].positions[deg + 2] = (hiprand_uniform(&randoms[i])*6.28f - 3.14f);// (hiprand_uniform(&randoms[i]) * (eulerMaxConstraint.z - eulerMinConstraint.z)) + eulerMinConstraint.z;
float3 eulerRot = chain[chainIndex].rotation;
int positionIdx = getParticleIndex(particleCount, i, position, deg);
particles[positionIdx] = eulerRot.x;
particles[positionIdx + particleCount] = eulerRot.y;
particles[positionIdx + particleCount * 2] = eulerRot.z;
}
//Init bests with current data
for (int deg = 0; deg < DEGREES_OF_FREEDOM; deg += 1)
{
int positionIdx = getParticleIndex(particleCount, i, position, deg);
particles[positionIdx + particleCount * DEGREES_OF_FREEDOM] = hiprand_uniform(&randoms[i]) * 2.0f - 1.0f;
particles[positionIdx + particleCount * DEGREES_OF_FREEDOM * 2] = particles[positionIdx];
}
}
}
__global__ void updateGlobalBestCoordsKernel(float *particles, int particleCount, Coordinates* global, int globalIndex)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int deg = id; deg < DEGREES_OF_FREEDOM; deg += stride)
{
global->positions[deg] = particles[getParticleIndex(particleCount, globalIndex, localBest, deg)];
}
}
hipError_t calculatePSO(float* particles, float* positions, float* bests,
hiprandState_t *randoms, int size, NodeCUDA *chain, PSOConfig psoConfig, FitnessConfig fitConfig,
Coordinates *result, obj_t* colliders, int colliderCount)
{
hipError_t status;
float globalMin;
float currentGlobalMin;
float eps = 0.00005;
int numBlocks = (size + blockSize - 1) / blockSize;
int globalUpdateNumBlocks = (DEGREES_OF_FREEDOM + blockSize - 1) / blockSize;
int sharedMemorySize = sizeof(NodeCUDA)*((DEGREES_OF_FREEDOM / 3) + 1);
hipLaunchKernelGGL(( initParticlesKernel), dim3(numBlocks), dim3(blockSize), 0, 0, particles, bests, randoms, chain, positions, size);
checkCuda(status = hipDeviceSynchronize());
hipLaunchKernelGGL(( initLocalBests), dim3(numBlocks), dim3(blockSize), 0, 0, particles, bests, chain, positions, size, colliders, colliderCount, fitConfig);
checkCuda(status = hipGetLastError());
if (status != hipSuccess) return status;
float *globalBest = thrust::min_element(thrust::device, bests, bests + size);
int globalIndex = globalBest - bests;
hipLaunchKernelGGL(( updateGlobalBestCoordsKernel), dim3(globalUpdateNumBlocks), dim3(blockSize), 0, 0, particles, size, result, globalIndex);
checkCuda(status = hipDeviceSynchronize());
checkCuda(status = hipMemcpy(&globalMin, bests + globalIndex, sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < psoConfig._iterations; i++)
{
hipLaunchKernelGGL(( simulateParticlesKernel), dim3(numBlocks), dim3(blockSize), 0, 0, particles, positions, bests, randoms, size, chain, psoConfig, result, globalMin);
checkCuda(status = hipDeviceSynchronize());
hipLaunchKernelGGL(( updateLocalBests), dim3(numBlocks), dim3(blockSize), 0, 0, particles, bests, chain, positions, size, colliders, colliderCount, fitConfig);
checkCuda(status = hipGetLastError());
if (status != hipSuccess) return status;
globalBest = thrust::min_element(thrust::device, bests, bests + size);
globalIndex = globalBest - bests;
checkCuda(status = hipMemcpy(¤tGlobalMin, bests + globalIndex, sizeof(float), hipMemcpyDeviceToHost));
if (globalMin > currentGlobalMin)
{
checkCuda(status = hipMemcpy(&globalMin, bests + globalIndex, sizeof(float), hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( updateGlobalBestCoordsKernel), dim3(globalUpdateNumBlocks), dim3(blockSize), 0, 0, particles, size, result, globalIndex);
checkCuda(status = hipDeviceSynchronize());
}
}
return status;
}
#pragma region implementheaders
#pragma region vectorOperations
__device__ float3 operator+(float3 f1, float3 f2)
{
return make_float3(f1.x + f2.x, f1.y + f2.y, f1.z + f2.z);
}
__device__ float3 operator-(float3 f1, float3 f2)
{
return make_float3(f1.x - f2.x, f1.y - f2.y, f1.z - f2.z);
}
__device__ float3 operator*(float3 f1, float a)
{
return make_float3(f1.x *a, f1.y *a, f1.z *a);
}
__device__ float4 operator+(float4 f1, float4 f2)
{
return make_float4(f1.x + f2.x, f1.y + f2.y, f1.z + f2.z, f1.w + f2.w);
}
__device__ float4 operator-(float4 f1, float4 f2)
{
return make_float4(f1.x - f2.x, f1.y - f2.y, f1.z - f2.z, f1.w - f2.w);
}
__device__ float4 operator*(float4 f1, float a)
{
return make_float4(f1.x *a, f1.y *a, f1.z *a, f1.w *a);
}
__device__ float magnitudeSqr(const float3 vector)
{
return (vector.x * vector.x) + (vector.y * vector.y) + (vector.z * vector.z);
}
__device__ float magnitudeSqr(const float4 vector)
{
return (vector.x * vector.x) + (vector.y * vector.y) + (vector.z * vector.z) + (vector.w * vector.w);
}
__device__ void float3Scale(float3* v, float a)
{
(*v) = (*v) * a;
}
__device__ void float3Sub(float3* V, const float3* v1, const float3* v2)
{
(*V) = (*v1) - (*v2);
}
__device__ void float3Add(float3* V, const float3* v1, const float3* v2)
{
(*V) = (*v1) + (*v2);
}
__device__ float float3Dot(const float3* v1, const float3* v2)
{
float dot = 0.0f;
dot = v1->x * v2->x;
dot += v1->y * v2->y;
dot += v1->z * v2->z;
return dot;
}
__device__ void float3Cross(float3* d, const float3* a, const float3* b)
{
d->x = (a->y * b->z) - (a->z * b->y);
d->y = (a->z * b->x) - (a->x * b->z);
d->z = (a->x * b->y) - (a->y * b->x);
}
__device__ void float4Copy(float4* V, const float4* v1)
{
V->x = v1->x;
V->y = v1->y;
V->z = v1->z;
V->w = v1->w;
}
__device__ void float3Copy(float3* V, const float3* v1)
{
V->x = v1->x;
V->y = v1->y;
V->z = v1->z;
}
__device__ float float3Len(float3 *v)
{
return float3Dot(v, v);
}
__device__ bool float3Eq(const float3 *v1, const float3 *v2)
{
return (v1->x == v2->x) &&
(v1->y == v2->y) &&
(v1->z == v2->z);
}
__device__ float float3Dist(const float3 * v1, const float3 * v2)
{
float3 res = (*v1) - (*v2);
return float3Len(&(res));
}
#pragma endregion
__device__ void SupportCopy(support_t *d, const support_t *s)
{
*d = *s;
}
__device__ void SimplexInit(simplex_t *s)
{
s->last = -1;
}
__device__ int SimplexSize(const simplex_t *s)
{
return s->last + 1;
}
__device__ const support_t *SimplexLast(const simplex_t *s)
{
return SimplexPoint(s, s->last);
}
__device__ const support_t *SimplexPoint(const simplex_t *s, int idx)
{
// here is no check on boundaries
return &s->ps[idx];
}
__device__ support_t *SimplexPointW(simplex_t *s, int idx)
{
return &s->ps[idx];
}
__device__ void SimplexAdd(simplex_t *s, const support_t *v)
{
// here is no check on boundaries in sake of speed
++s->last;
SupportCopy(s->ps + s->last, v);
}
__device__ void SimplexSet(simplex_t *s, size_t pos, const support_t *a)
{
SupportCopy(s->ps + pos, a);
}
__device__ void SimplexSetSize(simplex_t *s, int size)
{
s->last = size - 1;
}
__device__ void SimplexSwap(simplex_t *s, size_t pos1, size_t pos2)
{
support_t supp;
SupportCopy(&supp, &s->ps[pos1]);
SupportCopy(&s->ps[pos1], &s->ps[pos2]);
SupportCopy(&s->ps[pos2], &supp);
}
__device__ void firstDir(const void *obj1, const void *obj2, float3 *dir) {
dir->x = ONE;
dir->y = ONE;
dir->z = ZERO;
}
__device__ void supportBox(const void *_obj, const float3 *_dir, float3 *v)
{
// assume that obj_t is user-defined structure that holds info about
// object (in this case box: x, y, z, pos, quat - dimensions of box,
// position and rotation)
obj_t *obj = (obj_t *)_obj;
float3 dir;
float4 qinv;
// apply rotation on direction vector
float3Copy(&dir, _dir);
quatInvert2(&qinv, &obj->quat);
quatRotVec(&dir, &qinv);
// compute support point in specified direction
*v = make_float3(
Signum(dir.x) * obj->x * 0.5f,
Signum(dir.y) * obj->y * 0.5f,
Signum(dir.z) * obj->z * 0.5f);
// czlowiek to kubek q.e.d.
// transform support point according to position and rotation of object
quatRotVec(v, &obj->quat);
float3Add(v, v, &obj->pos);
}
__device__ int GJKIntersect(const void *obj1, const void *obj2, const GJKData_t *data)
{
simplex_t simplex;
return GJK(obj1, obj2, data, &simplex) == 0;
}
__device__ static int GJK(const void *obj1, const void *obj2,
const GJKData_t *data, simplex_t *simplex)
{
unsigned long iterations;
float3 dir; // direction vector
support_t last; // last support point
int do_simplex_res;
// initialize simplex struct
SimplexInit(simplex);
// get first direction
firstDir(obj1, obj2, &dir);
// get first support point
SupportCalc(obj1, obj2, &dir, &last);
// and add this point to simplex as last one
SimplexAdd(simplex, &last);
// set up direction vector to as (O - last) which is exactly -last
float3Copy(&dir, &last.v);
float3Scale(&dir, -ONE);
// start iterations
for (iterations = 0UL; iterations < data->max_iterations; ++iterations) {
// obtain support point
SupportCalc(obj1, obj2, &dir, &last);
// check if farthest point in Minkowski difference in direction dir
// isn't somewhere before origin (the test on negative dot product)
// - because if it is, objects are not intersecting at all.
if (float3Dot(&last.v, &dir) < ZERO) {
return -1; // intersection not found
}
// add last support vector to simplex
SimplexAdd(simplex, &last);
// if doSimplex returns 1 if objects intersect, -1 if objects don't
// intersect and 0 if algorithm should continue
do_simplex_res = doSimplex(simplex, &dir);
if (do_simplex_res == 1) {
return 0; // intersection found
}
else if (do_simplex_res == -1) {
return -1; // intersection not found
}
if (IsZERO(float3Len(&dir))) {
return -1; // intersection not found
}
}
// intersection wasn't found
return -1;
}
__device__ void SupportCalc(const void *obj1, const void *obj2, const float3 *_dir, support_t *supp)
{
float3 dir;
float3Copy(&dir, _dir);
//wklejenie w v1 wyniku wywolania funkcji support1
supportBox(obj1, &dir, &supp->v1);
float3Scale(&dir, -ONE);
//wklejenie w v2 wyniku wywolania funkcji support2
supportBox(obj2, &dir, &supp->v2);
//roznica minkowskiego
float3Sub(&supp->v, &supp->v1, &supp->v2);
}
#pragma region doSimplexi
__device__ static int doSimplex(simplex_t *simplex, float3 *dir)
{
if (SimplexSize(simplex) == 2) {
// simplex contains segment only one segment
return doSimplex2(simplex, dir);
}
else if (SimplexSize(simplex) == 3) {
// simplex contains triangle
return doSimplex3(simplex, dir);
}
else { // ccdSimplexSize(simplex) == 4
// tetrahedron - this is the only shape which can encapsule origin
// so doSimplex4() also contains test on it
return doSimplex4(simplex, dir);
}
}
__device__ static int doSimplex2(simplex_t *simplex, float3 *dir)
{
const support_t *A, *B;
float3 AB, AO, tmp;
float dot;
// get last added as A
A = SimplexLast(simplex);
// get the other point
B = SimplexPoint(simplex, 0);
// compute AB oriented segment
float3Sub(&AB, &B->v, &A->v);
// compute AO vector
float3Copy(&AO, &A->v);
float3Scale(&AO, -ONE);
// dot product AB . AO
dot = float3Dot(&AB, &AO);
// check if origin doesn't lie on AB segment
float3Cross(&tmp, &AB, &AO);
if (IsZERO(float3Len(&tmp)) && dot > ZERO) {
return 1;
}
// check if origin is in area where AB segment is
if (IsZERO(dot) || dot < ZERO) {
// origin is in outside are of A
SimplexSet(simplex, 0, A);
SimplexSetSize(simplex, 1);
float3Copy(dir, &AO);
}
else {
// origin is in area where AB segment is
// keep simplex untouched and set direction to
// AB x AO x AB
tripleCross(&AB, &AO, &AB, dir);
}
return 0;
}
__device__ static int doSimplex3(simplex_t *simplex, float3 *dir)
{
const float3 origin = make_float3(0.f, 0.f, 0.f);
const float3* originPtr = &origin;
const support_t *A, *B, *C;
float3 AO, AB, AC, ABC, tmp;
float dot, dist;
// get last added as A
A = SimplexLast(simplex);
// get the other points
B = SimplexPoint(simplex, 1);
C = SimplexPoint(simplex, 0);
// check touching contact
dist = Vec3PointTriDist2(originPtr, &A->v, &B->v, &C->v, NULL);
if (IsZERO(dist)) {
return 1;
}
// check if triangle is really triangle (has area > 0)
// if not simplex can't be expanded and thus no itersection is found
if (float3Eq(&A->v, &B->v) || float3Eq(&A->v, &C->v)) {
return -1;
}
// compute AO vector
float3Copy(&AO, &A->v);
float3Scale(&AO, -ONE);
// compute AB and AC segments and ABC vector (perpendircular to triangle)
float3Sub(&AB, &B->v, &A->v);
float3Sub(&AC, &C->v, &A->v);
float3Cross(&ABC, &AB, &AC);
float3Cross(&tmp, &ABC, &AC);
dot = float3Dot(&tmp, &AO);
if (IsZERO(dot) || dot > ZERO) {
dot = float3Dot(&AC, &AO);
if (IsZERO(dot) || dot > ZERO) {
// C is already in place
SimplexSet(simplex, 1, A);
SimplexSetSize(simplex, 2);
tripleCross(&AC, &AO, &AC, dir);
}
else {
dot = float3Dot(&AB, &AO);
if (IsZERO(dot) || dot > ZERO) {
SimplexSet(simplex, 0, B);
SimplexSet(simplex, 1, A);
SimplexSetSize(simplex, 2);
tripleCross(&AB, &AO, &AB, dir);
}
else {
SimplexSet(simplex, 0, A);
SimplexSetSize(simplex, 1);
float3Copy(dir, &AO);
}
}
}
else {
float3Cross(&tmp, &AB, &ABC);
dot = float3Dot(&tmp, &AO);
if (IsZERO(dot) || dot > ZERO) {
dot = float3Dot(&AB, &AO);
if (IsZERO(dot) || dot > ZERO) {
SimplexSet(simplex, 0, B);
SimplexSet(simplex, 1, A);
SimplexSetSize(simplex, 2);
tripleCross(&AB, &AO, &AB, dir);
}
else {
SimplexSet(simplex, 0, A);
SimplexSetSize(simplex, 1);
float3Copy(dir, &AO);
}
}
else {
dot = float3Dot(&ABC, &AO);
if (IsZERO(dot) || dot > ZERO) {
float3Copy(dir, &ABC);
}
else {
support_t Ctmp;
SupportCopy(&Ctmp, C);
SimplexSet(simplex, 0, B);
SimplexSet(simplex, 1, &Ctmp);
float3Copy(dir, &ABC);
float3Scale(dir, -ONE);
}
}
}
return 0;
}
__device__ static int doSimplex4(simplex_t *simplex, float3 *dir)
{
const float3 origin = make_float3(0.f, 0.f, 0.f);
const float3* originPtr = &origin;
const support_t *A, *B, *C, *D;
float3 AO, AB, AC, AD, ABC, ACD, ADB;
int B_on_ACD, C_on_ADB, D_on_ABC;
int AB_O, AC_O, AD_O;
float dist;
// get last added as A
A = SimplexLast(simplex);
// get the other points
B = SimplexPoint(simplex, 2);
C = SimplexPoint(simplex, 1);
D = SimplexPoint(simplex, 0);
// check if tetrahedron is really tetrahedron (has volume > 0)
// if it is not simplex can't be expanded and thus no intersection is
// found
dist = Vec3PointTriDist2(&A->v, &B->v, &C->v, &D->v, NULL);
if (IsZERO(dist)) {
return -1;
}
// check if origin lies on some of tetrahedron's face - if so objects
// intersect
dist = Vec3PointTriDist2(originPtr, &A->v, &B->v, &C->v, NULL);
if (IsZERO(dist))
return 1;
dist = Vec3PointTriDist2(originPtr, &A->v, &C->v, &D->v, NULL);
if (IsZERO(dist))
return 1;
dist = Vec3PointTriDist2(originPtr, &A->v, &B->v, &D->v, NULL);
if (IsZERO(dist))
return 1;
dist = Vec3PointTriDist2(originPtr, &B->v, &C->v, &D->v, NULL);
if (IsZERO(dist))
return 1;
// compute AO, AB, AC, AD segments and ABC, ACD, ADB normal vectors
float3Copy(&AO, &A->v);
float3Scale(&AO, -ONE);
float3Sub(&AB, &B->v, &A->v);
float3Sub(&AC, &C->v, &A->v);
float3Sub(&AD, &D->v, &A->v);
float3Cross(&ABC, &AB, &AC);
float3Cross(&ACD, &AC, &AD);
float3Cross(&ADB, &AD, &AB);
// side (positive or negative) of B, C, D relative to planes ACD, ADB
// and ABC respectively
B_on_ACD = Signum(float3Dot(&ACD, &AB));
C_on_ADB = Signum(float3Dot(&ADB, &AC));
D_on_ABC = Signum(float3Dot(&ABC, &AD));
// whether origin is on same side of ACD, ADB, ABC as B, C, D
// respectively
AB_O = Signum(float3Dot(&ACD, &AO)) == B_on_ACD;
AC_O = Signum(float3Dot(&ADB, &AO)) == C_on_ADB;
AD_O = Signum(float3Dot(&ABC, &AO)) == D_on_ABC;
if (AB_O && AC_O && AD_O) {
// origin is in tetrahedron
return 1;
// rearrange simplex to triangle and call doSimplex3()
}
else if (!AB_O) {
// B is farthest from the origin among all of the tetrahedron's
// points, so remove it from the list and go on with the triangle
// case
// D and C are in place
SimplexSet(simplex, 2, A);
SimplexSetSize(simplex, 3);
}
else if (!AC_O) {
// C is farthest
SimplexSet(simplex, 1, D);
SimplexSet(simplex, 0, B);
SimplexSet(simplex, 2, A);
SimplexSetSize(simplex, 3);
}
else { // (!AD_O)
SimplexSet(simplex, 0, C);
SimplexSet(simplex, 1, B);
SimplexSet(simplex, 2, A);
SimplexSetSize(simplex, 3);
}
return doSimplex3(simplex, dir);
}
#pragma endregion
__device__ float Vec3PointTriDist2(const float3 *P, const float3 *x0, const float3 *B, const float3 *C, float3 *witness)
{
// Computation comes from analytic expression for triangle (x0, B, C)
// T(s, t) = x0 + s.d1 + t.d2, where d1 = B - x0 and d2 = C - x0 and
// Then equation for distance is:
// D(s, t) = | T(s, t) - P |^2
// This leads to minimization of quadratic function of two variables.
// The solution from is taken only if s is between 0 and 1, t is
// between 0 and 1 and t + s < 1, otherwise distance from segment is
// computed.
float3 d1, d2, a;
float u, v, w, p, q, r, d;
float s, t, dist, dist2;
float3 witness2;
float3Sub(&d1, B, x0);
float3Sub(&d2, C, x0);
float3Sub(&a, x0, P);
u = float3Dot(&a, &a);
v = float3Dot(&d1, &d1);
w = float3Dot(&d2, &d2);
p = float3Dot(&a, &d1);
q = float3Dot(&a, &d2);
r = float3Dot(&d1, &d2);
d = w * v - r * r;
if (IsZERO(d)) {
// To avoid division by zero for zero (or near zero) area triangles
s = t = -1.f;
}
else {
s = (q * r - w * p) / d;
t = (-s * r - q) / w;
}
if ((IsZERO(s) || s > ZERO)
&& (floatEq(s, ONE) || s < ONE)
&& (IsZERO(t) || t > ZERO)
&& (floatEq(t, ONE) || t < ONE)
&& (floatEq(t + s, ONE) || t + s < ONE)) {
if (witness) {
float3Scale(&d1, s);
float3Scale(&d2, t);
float3Copy(witness, x0);
float3Add(witness, witness, &d1);
float3Add(witness, witness, &d2);
dist = float3Dist(witness, P);
}
else {
dist = s * s * v;
dist += t * t * w;
dist += 2.f * s * t * r;
dist += 2.f * s * p;
dist += 2.f * t * q;
dist += u;
}
}
else {
dist = PointSegmentDist(P, x0, B, witness);
dist2 = PointSegmentDist(P, x0, C, &witness2);
if (dist2 < dist) {
dist = dist2;
if (witness)
float3Copy(witness, &witness2);
}
dist2 = PointSegmentDist(P, B, C, &witness2);
if (dist2 < dist) {
dist = dist2;
if (witness)
float3Copy(witness, &witness2);
}
}
return dist;
}
__device__ float PointSegmentDist(const float3 *P, const float3 *x0, const float3 *b, float3 *witness)
{
// The computation comes from solving equation of segment:
// S(t) = x0 + t.d
// where - x0 is initial point of segment
// - d is direction of segment from x0 (|d| > 0)
// - t belongs to <0, 1> interval
//
// Than, distance from a segment to some point P can be expressed:
// D(t) = |x0 + t.d - P|^2
// which is distance from any point on segment. Minimization
// of this function brings distance from P to segment.
// Minimization of D(t) leads to simple quadratic equation that's
// solving is straightforward.
//
// Bonus of this method is witness point for free.
float dist, t;
float3 d, a;
// direction of segment
float3Sub(&d, b, x0);
// precompute vector from P to x0
float3Sub(&a, x0, P);
t = -1.f * float3Dot(&a, &d);
t /= float3Len(&d);
if (t < ZERO || IsZERO(t)) {
dist = float3Dist(x0, P);
if (witness)
float3Copy(witness, x0);
}
else if (t > ONE || floatEq(t, ONE)) {
dist = float3Dist(b, P);
if (witness)
float3Copy(witness, b);
}
else {
if (witness) {
float3Copy(witness, &d);
float3Scale(witness, t);
float3Add(witness, witness, x0);
dist = float3Dist(witness, P);
}
else {
// recycling variables
float3Scale(&d, t);
float3Add(&d, &d, &a);
dist = float3Len(&d);
}
}
return dist;
}
__device__ void quatRotVec(float3 *v, const float4 *q)
{
// original version: 31 mul + 21 add
// optimized version: 18 mul + 12 add
// formula: v = v + 2 * cross(q.xyz, cross(q.xyz, v) + q.w * v)
float cross1_x, cross1_y, cross1_z, cross2_x, cross2_y, cross2_z;
float x, y, z, w;
float vx, vy, vz;
vx = v->x;
vy = v->y;
vz = v->z;
w = q->w;
x = q->x;
y = q->y;
z = q->z;
cross1_x = y * vz - z * vy + w * vx;
cross1_y = z * vx - x * vz + w * vy;
cross1_z = x * vy - y * vx + w * vz;
cross2_x = y * cross1_z - z * cross1_y;
cross2_y = z * cross1_x - x * cross1_z;
cross2_z = x * cross1_y - y * cross1_x;
*v = make_float3(vx + 2 * cross2_x, vy + 2 * cross2_y, vz + 2 * cross2_z);
}
__device__ int quatInvert2(float4 *dest, const float4 *src)
{
float4Copy(dest, src);
return quatInvert(dest);
}
__device__ int quatInvert(float4 *q)
{
float len2 = magnitudeSqr(*q);
if (len2 < FLT_EPSILON)
return -1;
len2 = ONE / len2;
q->x = -q->x * len2;
q->y = -q->y * len2;
q->z = -q->z * len2;
q->w = q->w * len2;
return 0;
}
#pragma region inlines
__device__ int Signum(float val)
{
if (IsZERO(val)) {
return 0;
}
else if (val < ZERO) {
return -1;
}
return 1;
}
__device__ void tripleCross(const float3 *a, const float3 *b,
const float3 *c, float3 *d)
{
float3 e;
float3Cross(&e, a, b);
float3Cross(d, &e, c);
}
__device__ int IsZERO(float val)
{
return absolute(val) < COL_EPS;
}
__device__ int floatEq(float a, float b)
{
return a == b;
}
__device__ float absolute(float val)
{
return val > 0 ? val : -val;
}
#pragma endregion
#pragma endregion
| 539e47d67e91e28763b36d74a0df37fdee8e1b83.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <curand_kernel.h>
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <thrust/extrema.h>
#include "Particle.h"
#include "utility_kernels.cuh"
#include "matrix_operations.cuh"
#include "quaternion_operations.cuh"
#include "GJKIntersection.cuh"
#include "ik_constants.h"
__host__ __device__ int getParticleIndex(int particleCount, int particleIdx, ParticleProperty propType, int dimension)
{
int idx = particleIdx + particleCount * dimension;
if (propType == velocity)
{
idx += particleCount * DEGREES_OF_FREEDOM;
}
else if (propType == localBest)
{
idx += 2 * particleCount * DEGREES_OF_FREEDOM;
}
return idx;
}
__device__ void updateChainMatrices(NodeCUDA *chain, int particleCount, float* particles, int particleIdx, Matrix *matrices)
{
int nodeCount = NODE_COUNT;
int nodeIndex = 0;
Matrix matrix = createMatrix(1.0f);
matrix = translateMatrix(matrix, chain[nodeIndex].position);
matrix = rotateEuler(matrix, chain[nodeIndex].rotation);
for (int i = 0; i < 16; i++)
{
matrices[nodeIndex].cells[i] = matrix.cells[i];
}
for (nodeIndex = 1; nodeIndex < nodeCount; nodeIndex++)
{
int dimensionIdx = (nodeIndex - 1) * 3;
int positionIdx = getParticleIndex(particleCount, particleIdx, position, dimensionIdx);
float3 particleEulerRotation = make_float3(particles[positionIdx],
particles[positionIdx + particleCount],
particles[positionIdx + particleCount * 2]);
Matrix tempMat = createMatrix(1.0f);
tempMat = rotateEuler(tempMat, particleEulerRotation);
tempMat = translateMatrix(tempMat, make_float3(chain[nodeIndex].length, 0.0f, 0.0f));
int parentIdx = chain[nodeIndex].parentIndex;
matrix = multiplyMatrices(matrices[parentIdx], tempMat);
for (int i = 0; i < 16; i++)
{
matrices[nodeIndex].cells[i] = matrix.cells[i];
}
}
}
__device__ float calculateDistance(NodeCUDA *chain, float* positions, int particleCount, float* particles, int particleIdx, obj_t* colliders, int colliderCount, FitnessConfig fitConfig)
{
float rotationDifference = 0.0f;
float positionDifferenceMag = 0.0f;
float distance = 0.0f;
int nodeCount = NODE_COUNT;
Matrix matrices[NODE_COUNT];
updateChainMatrices(chain, particleCount, particles, particleIdx, matrices);
for (int ind = 1; ind < nodeCount; ind++)
{
int dimensionIdx = (ind - 1) * 3;
int positionIdx = getParticleIndex(particleCount, particleIdx, position, dimensionIdx);
float3 chainRotation = chain[ind].rotation;
float3 particleRotation = make_float3(particles[positionIdx],
particles[positionIdx + particleCount],
particles[positionIdx + particleCount * 2]);
rotationDifference = rotationDifference + magnitudeSqr(chainRotation - particleRotation);
float4 originVector = make_float4(0.0f, 0.0f, 0.0f, 1.0f);
Matrix model;
for (int i = 0; i < 16; i++)
{
model.cells[i] = matrices[ind].cells[i];
}
float4 position = multiplyMatByVec(model, originVector);
int armInd = (ind - 1) * 4;
float4 armPosition = make_float4(positions[armInd],
positions[armInd + 1],
positions[armInd + 2],
positions[armInd + 3]);
float4 positionDifference = position - armPosition;
positionDifferenceMag += magnitudeSqr(positionDifference);
float4 rotation = matrixToQuaternion(model);
obj_t nodeCollider;
nodeCollider.pos = make_float3(position.x, position.y, position.z);
nodeCollider.quat = rotation;
nodeCollider.x = nodeCollider.y = nodeCollider.z = GIZMO_SIZE;
obj_t linkCollider;
float4 startPos = multiplyMatByVec(model, originVector); //this node
float4 endPos = multiplyMatByVec(matrices[chain[ind].parentIndex], originVector); //parent node
float4 centerPos = (startPos + endPos) * 0.5f;
linkCollider.pos = make_float3(centerPos.x, centerPos.y, centerPos.z);
linkCollider.quat = rotation;
linkCollider.x = chain[ind].length;
linkCollider.y = linkCollider.z = GIZMO_SIZE * 0.25f;
GJKData_t gjkData;
CCD_INIT(&gjkData);
gjkData.max_iterations = GJK_ITERATIONS;
int intersects = 0;
for (int i = 0; i < colliderCount; i++)
{
intersects = GJKIntersect(&nodeCollider, &colliders[i], &gjkData);
if (intersects)
{
return FLT_MAX;
}
intersects = GJKIntersect(&linkCollider, &colliders[i], &gjkData);
if (intersects)
{
return FLT_MAX;
}
}
if (chain[ind].nodeType == NodeType::effectorNode)
{
float distTmp = magnitudeSqr(make_float3(
position.x - chain[ind].targetPosition.x,
position.y - chain[ind].targetPosition.y,
position.z - chain[ind].targetPosition.z));
distance = distance + distTmp * chain[ind].effectorWeight;
}
}
return distance + fitConfig.distanceWeight / (DEGREES_OF_FREEDOM / 3) * positionDifferenceMag + fitConfig.angleWeight / (DEGREES_OF_FREEDOM / 3) * rotationDifference;
}
__global__ void simulateParticlesKernel(float *particles, float* positions, float *localBests, curandState_t *randoms, int size, NodeCUDA *chain, PSOConfig psoConfig, Coordinates *global, float globalMin)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = id; i < size; i += stride)
{
for (int deg = 0; deg < DEGREES_OF_FREEDOM; deg++)
{
int velocityIdx = getParticleIndex(size, i, velocity, deg);
int positionIdx = getParticleIndex(size, i, position, deg);
particles[velocityIdx] = psoConfig._inertia * curand_uniform(&randoms[i]) * particles[velocityIdx] +
psoConfig._local * curand_uniform(&randoms[i]) * (particles[getParticleIndex(size, i, localBest, deg)] - particles[positionIdx]) +
psoConfig._global * curand_uniform(&randoms[i]) * (global->positions[deg] - particles[positionIdx]);
particles[positionIdx] += particles[velocityIdx];
}
for (int ind = 1; ind <= DEGREES_OF_FREEDOM / 3; ind++)
{
int deg = (ind - 1) * 3;
int xPositionIdx = getParticleIndex(size, i, position, deg);
int yPositionIdx = getParticleIndex(size, i, position, deg + 1);
int zPositionIdx = getParticleIndex(size, i, position, deg + 2);
float posX = particles[xPositionIdx];
float posY = particles[yPositionIdx];
float posZ = particles[zPositionIdx];
particles[xPositionIdx] = clamp(particles[xPositionIdx], chain[ind].minRotation.x, chain[ind].maxRotation.x);
particles[yPositionIdx] = clamp(particles[yPositionIdx], chain[ind].minRotation.y, chain[ind].maxRotation.y);
particles[zPositionIdx] = clamp(particles[zPositionIdx], chain[ind].minRotation.z, chain[ind].maxRotation.z);
}
}
}
__global__ void initLocalBests(float *particles, float *localBests, NodeCUDA * chain, float* positions, int particleCount, obj_t* colliders, int colliderCount, FitnessConfig fitConfig)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = id; i < particleCount; i += stride)
{
localBests[i] = calculateDistance(chain, positions, particleCount, particles, i, colliders, colliderCount, fitConfig);
}
}
__global__ void updateLocalBests(float *particles, float *localBests, NodeCUDA * chain, float* positions, int particleCount, obj_t* colliders, int colliderCount, FitnessConfig fitConfig)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = id; i < particleCount; i += stride)
{
float currentDistance = calculateDistance(chain, positions, particleCount, particles, i, colliders, colliderCount, fitConfig);
if (currentDistance < localBests[i])
{
localBests[i] = currentDistance;
for (int deg = 0; deg < DEGREES_OF_FREEDOM; deg++)
{
particles[getParticleIndex(particleCount, i, localBest, deg)] = particles[getParticleIndex(particleCount, i, position, deg)];
}
}
}
}
__global__ void initParticlesKernel(float *particles, float *localBests, curandState_t *randoms, NodeCUDA * chain, float* positions, int particleCount)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = id; i < particleCount; i += stride)
{
for (int deg = 0; deg < DEGREES_OF_FREEDOM; deg += 3)
{
//Uniform distribution of particles across the domain
int chainIndex = (deg / 3) + 1;
float3 eulerMaxConstraint = chain[chainIndex].maxRotation;
float3 eulerMinConstraint = chain[chainIndex].minRotation;
//printf("maxconstraint x %f\n", chain[chainIndex].maxRotation.x);
//printf("maxconstraint y %f\n", chain[chainIndex].maxRotation.y);
//printf("maxconstraint z %f\n", chain[chainIndex].maxRotation.z);
//printf("quaterniondiff - deg %d : %f\n",deg, eulerMaxConstraint.z - eulerMinConstraint.z);
//printf("quaterniondiff - deg %d : %f\n",deg+1, eulerMaxConstraint.x - eulerMinConstraint.x);
//printf("quaterniondiff - deg %d : %f\n",deg+2, eulerMaxConstraint.y - eulerMinConstraint.y);
//particles[i].positions[deg] = (curand_uniform(&randoms[i]) *6.28f - 3.14f); //(curand_uniform(&randoms[i]) * (eulerMaxConstraint.x - eulerMinConstraint.x)) + eulerMinConstraint.x;
//particles[i].positions[deg + 1] = (curand_uniform(&randoms[i])*6.28f - 3.14f);// (curand_uniform(&randoms[i]) * (eulerMaxConstraint.y - eulerMinConstraint.y)) + eulerMinConstraint.y;
//particles[i].positions[deg + 2] = (curand_uniform(&randoms[i])*6.28f - 3.14f);// (curand_uniform(&randoms[i]) * (eulerMaxConstraint.z - eulerMinConstraint.z)) + eulerMinConstraint.z;
float3 eulerRot = chain[chainIndex].rotation;
int positionIdx = getParticleIndex(particleCount, i, position, deg);
particles[positionIdx] = eulerRot.x;
particles[positionIdx + particleCount] = eulerRot.y;
particles[positionIdx + particleCount * 2] = eulerRot.z;
}
//Init bests with current data
for (int deg = 0; deg < DEGREES_OF_FREEDOM; deg += 1)
{
int positionIdx = getParticleIndex(particleCount, i, position, deg);
particles[positionIdx + particleCount * DEGREES_OF_FREEDOM] = curand_uniform(&randoms[i]) * 2.0f - 1.0f;
particles[positionIdx + particleCount * DEGREES_OF_FREEDOM * 2] = particles[positionIdx];
}
}
}
__global__ void updateGlobalBestCoordsKernel(float *particles, int particleCount, Coordinates* global, int globalIndex)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int deg = id; deg < DEGREES_OF_FREEDOM; deg += stride)
{
global->positions[deg] = particles[getParticleIndex(particleCount, globalIndex, localBest, deg)];
}
}
cudaError_t calculatePSO(float* particles, float* positions, float* bests,
curandState_t *randoms, int size, NodeCUDA *chain, PSOConfig psoConfig, FitnessConfig fitConfig,
Coordinates *result, obj_t* colliders, int colliderCount)
{
cudaError_t status;
float globalMin;
float currentGlobalMin;
float eps = 0.00005;
int numBlocks = (size + blockSize - 1) / blockSize;
int globalUpdateNumBlocks = (DEGREES_OF_FREEDOM + blockSize - 1) / blockSize;
int sharedMemorySize = sizeof(NodeCUDA)*((DEGREES_OF_FREEDOM / 3) + 1);
initParticlesKernel<<<numBlocks, blockSize>>>(particles, bests, randoms, chain, positions, size);
checkCuda(status = cudaDeviceSynchronize());
initLocalBests<<<numBlocks, blockSize>>>(particles, bests, chain, positions, size, colliders, colliderCount, fitConfig);
checkCuda(status = cudaGetLastError());
if (status != cudaSuccess) return status;
float *globalBest = thrust::min_element(thrust::device, bests, bests + size);
int globalIndex = globalBest - bests;
updateGlobalBestCoordsKernel<<<globalUpdateNumBlocks, blockSize>>>(particles, size, result, globalIndex);
checkCuda(status = cudaDeviceSynchronize());
checkCuda(status = cudaMemcpy(&globalMin, bests + globalIndex, sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < psoConfig._iterations; i++)
{
simulateParticlesKernel<<<numBlocks, blockSize>>>(particles, positions, bests, randoms, size, chain, psoConfig, result, globalMin);
checkCuda(status = cudaDeviceSynchronize());
updateLocalBests<<<numBlocks, blockSize>>>(particles, bests, chain, positions, size, colliders, colliderCount, fitConfig);
checkCuda(status = cudaGetLastError());
if (status != cudaSuccess) return status;
globalBest = thrust::min_element(thrust::device, bests, bests + size);
globalIndex = globalBest - bests;
checkCuda(status = cudaMemcpy(¤tGlobalMin, bests + globalIndex, sizeof(float), cudaMemcpyDeviceToHost));
if (globalMin > currentGlobalMin)
{
checkCuda(status = cudaMemcpy(&globalMin, bests + globalIndex, sizeof(float), cudaMemcpyDeviceToHost));
updateGlobalBestCoordsKernel<<<globalUpdateNumBlocks, blockSize>>>(particles, size, result, globalIndex);
checkCuda(status = cudaDeviceSynchronize());
}
}
return status;
}
#pragma region implementheaders
#pragma region vectorOperations
__device__ float3 operator+(float3 f1, float3 f2)
{
return make_float3(f1.x + f2.x, f1.y + f2.y, f1.z + f2.z);
}
__device__ float3 operator-(float3 f1, float3 f2)
{
return make_float3(f1.x - f2.x, f1.y - f2.y, f1.z - f2.z);
}
__device__ float3 operator*(float3 f1, float a)
{
return make_float3(f1.x *a, f1.y *a, f1.z *a);
}
__device__ float4 operator+(float4 f1, float4 f2)
{
return make_float4(f1.x + f2.x, f1.y + f2.y, f1.z + f2.z, f1.w + f2.w);
}
__device__ float4 operator-(float4 f1, float4 f2)
{
return make_float4(f1.x - f2.x, f1.y - f2.y, f1.z - f2.z, f1.w - f2.w);
}
__device__ float4 operator*(float4 f1, float a)
{
return make_float4(f1.x *a, f1.y *a, f1.z *a, f1.w *a);
}
__device__ float magnitudeSqr(const float3 vector)
{
return (vector.x * vector.x) + (vector.y * vector.y) + (vector.z * vector.z);
}
__device__ float magnitudeSqr(const float4 vector)
{
return (vector.x * vector.x) + (vector.y * vector.y) + (vector.z * vector.z) + (vector.w * vector.w);
}
__device__ void float3Scale(float3* v, float a)
{
(*v) = (*v) * a;
}
__device__ void float3Sub(float3* V, const float3* v1, const float3* v2)
{
(*V) = (*v1) - (*v2);
}
__device__ void float3Add(float3* V, const float3* v1, const float3* v2)
{
(*V) = (*v1) + (*v2);
}
__device__ float float3Dot(const float3* v1, const float3* v2)
{
float dot = 0.0f;
dot = v1->x * v2->x;
dot += v1->y * v2->y;
dot += v1->z * v2->z;
return dot;
}
__device__ void float3Cross(float3* d, const float3* a, const float3* b)
{
d->x = (a->y * b->z) - (a->z * b->y);
d->y = (a->z * b->x) - (a->x * b->z);
d->z = (a->x * b->y) - (a->y * b->x);
}
__device__ void float4Copy(float4* V, const float4* v1)
{
V->x = v1->x;
V->y = v1->y;
V->z = v1->z;
V->w = v1->w;
}
__device__ void float3Copy(float3* V, const float3* v1)
{
V->x = v1->x;
V->y = v1->y;
V->z = v1->z;
}
__device__ float float3Len(float3 *v)
{
return float3Dot(v, v);
}
__device__ bool float3Eq(const float3 *v1, const float3 *v2)
{
return (v1->x == v2->x) &&
(v1->y == v2->y) &&
(v1->z == v2->z);
}
__device__ float float3Dist(const float3 * v1, const float3 * v2)
{
float3 res = (*v1) - (*v2);
return float3Len(&(res));
}
#pragma endregion
__device__ void SupportCopy(support_t *d, const support_t *s)
{
*d = *s;
}
__device__ void SimplexInit(simplex_t *s)
{
s->last = -1;
}
__device__ int SimplexSize(const simplex_t *s)
{
return s->last + 1;
}
__device__ const support_t *SimplexLast(const simplex_t *s)
{
return SimplexPoint(s, s->last);
}
__device__ const support_t *SimplexPoint(const simplex_t *s, int idx)
{
// here is no check on boundaries
return &s->ps[idx];
}
__device__ support_t *SimplexPointW(simplex_t *s, int idx)
{
return &s->ps[idx];
}
__device__ void SimplexAdd(simplex_t *s, const support_t *v)
{
// here is no check on boundaries in sake of speed
++s->last;
SupportCopy(s->ps + s->last, v);
}
__device__ void SimplexSet(simplex_t *s, size_t pos, const support_t *a)
{
SupportCopy(s->ps + pos, a);
}
__device__ void SimplexSetSize(simplex_t *s, int size)
{
s->last = size - 1;
}
__device__ void SimplexSwap(simplex_t *s, size_t pos1, size_t pos2)
{
support_t supp;
SupportCopy(&supp, &s->ps[pos1]);
SupportCopy(&s->ps[pos1], &s->ps[pos2]);
SupportCopy(&s->ps[pos2], &supp);
}
__device__ void firstDir(const void *obj1, const void *obj2, float3 *dir) {
dir->x = ONE;
dir->y = ONE;
dir->z = ZERO;
}
__device__ void supportBox(const void *_obj, const float3 *_dir, float3 *v)
{
// assume that obj_t is user-defined structure that holds info about
// object (in this case box: x, y, z, pos, quat - dimensions of box,
// position and rotation)
obj_t *obj = (obj_t *)_obj;
float3 dir;
float4 qinv;
// apply rotation on direction vector
float3Copy(&dir, _dir);
quatInvert2(&qinv, &obj->quat);
quatRotVec(&dir, &qinv);
// compute support point in specified direction
*v = make_float3(
Signum(dir.x) * obj->x * 0.5f,
Signum(dir.y) * obj->y * 0.5f,
Signum(dir.z) * obj->z * 0.5f);
// czlowiek to kubek q.e.d.
// transform support point according to position and rotation of object
quatRotVec(v, &obj->quat);
float3Add(v, v, &obj->pos);
}
__device__ int GJKIntersect(const void *obj1, const void *obj2, const GJKData_t *data)
{
simplex_t simplex;
return GJK(obj1, obj2, data, &simplex) == 0;
}
__device__ static int GJK(const void *obj1, const void *obj2,
const GJKData_t *data, simplex_t *simplex)
{
unsigned long iterations;
float3 dir; // direction vector
support_t last; // last support point
int do_simplex_res;
// initialize simplex struct
SimplexInit(simplex);
// get first direction
firstDir(obj1, obj2, &dir);
// get first support point
SupportCalc(obj1, obj2, &dir, &last);
// and add this point to simplex as last one
SimplexAdd(simplex, &last);
// set up direction vector to as (O - last) which is exactly -last
float3Copy(&dir, &last.v);
float3Scale(&dir, -ONE);
// start iterations
for (iterations = 0UL; iterations < data->max_iterations; ++iterations) {
// obtain support point
SupportCalc(obj1, obj2, &dir, &last);
// check if farthest point in Minkowski difference in direction dir
// isn't somewhere before origin (the test on negative dot product)
// - because if it is, objects are not intersecting at all.
if (float3Dot(&last.v, &dir) < ZERO) {
return -1; // intersection not found
}
// add last support vector to simplex
SimplexAdd(simplex, &last);
// if doSimplex returns 1 if objects intersect, -1 if objects don't
// intersect and 0 if algorithm should continue
do_simplex_res = doSimplex(simplex, &dir);
if (do_simplex_res == 1) {
return 0; // intersection found
}
else if (do_simplex_res == -1) {
return -1; // intersection not found
}
if (IsZERO(float3Len(&dir))) {
return -1; // intersection not found
}
}
// intersection wasn't found
return -1;
}
__device__ void SupportCalc(const void *obj1, const void *obj2, const float3 *_dir, support_t *supp)
{
float3 dir;
float3Copy(&dir, _dir);
//wklejenie w v1 wyniku wywolania funkcji support1
supportBox(obj1, &dir, &supp->v1);
float3Scale(&dir, -ONE);
//wklejenie w v2 wyniku wywolania funkcji support2
supportBox(obj2, &dir, &supp->v2);
//roznica minkowskiego
float3Sub(&supp->v, &supp->v1, &supp->v2);
}
#pragma region doSimplexi
__device__ static int doSimplex(simplex_t *simplex, float3 *dir)
{
if (SimplexSize(simplex) == 2) {
// simplex contains segment only one segment
return doSimplex2(simplex, dir);
}
else if (SimplexSize(simplex) == 3) {
// simplex contains triangle
return doSimplex3(simplex, dir);
}
else { // ccdSimplexSize(simplex) == 4
// tetrahedron - this is the only shape which can encapsule origin
// so doSimplex4() also contains test on it
return doSimplex4(simplex, dir);
}
}
__device__ static int doSimplex2(simplex_t *simplex, float3 *dir)
{
const support_t *A, *B;
float3 AB, AO, tmp;
float dot;
// get last added as A
A = SimplexLast(simplex);
// get the other point
B = SimplexPoint(simplex, 0);
// compute AB oriented segment
float3Sub(&AB, &B->v, &A->v);
// compute AO vector
float3Copy(&AO, &A->v);
float3Scale(&AO, -ONE);
// dot product AB . AO
dot = float3Dot(&AB, &AO);
// check if origin doesn't lie on AB segment
float3Cross(&tmp, &AB, &AO);
if (IsZERO(float3Len(&tmp)) && dot > ZERO) {
return 1;
}
// check if origin is in area where AB segment is
if (IsZERO(dot) || dot < ZERO) {
// origin is in outside are of A
SimplexSet(simplex, 0, A);
SimplexSetSize(simplex, 1);
float3Copy(dir, &AO);
}
else {
// origin is in area where AB segment is
// keep simplex untouched and set direction to
// AB x AO x AB
tripleCross(&AB, &AO, &AB, dir);
}
return 0;
}
__device__ static int doSimplex3(simplex_t *simplex, float3 *dir)
{
const float3 origin = make_float3(0.f, 0.f, 0.f);
const float3* originPtr = &origin;
const support_t *A, *B, *C;
float3 AO, AB, AC, ABC, tmp;
float dot, dist;
// get last added as A
A = SimplexLast(simplex);
// get the other points
B = SimplexPoint(simplex, 1);
C = SimplexPoint(simplex, 0);
// check touching contact
dist = Vec3PointTriDist2(originPtr, &A->v, &B->v, &C->v, NULL);
if (IsZERO(dist)) {
return 1;
}
// check if triangle is really triangle (has area > 0)
// if not simplex can't be expanded and thus no itersection is found
if (float3Eq(&A->v, &B->v) || float3Eq(&A->v, &C->v)) {
return -1;
}
// compute AO vector
float3Copy(&AO, &A->v);
float3Scale(&AO, -ONE);
// compute AB and AC segments and ABC vector (perpendircular to triangle)
float3Sub(&AB, &B->v, &A->v);
float3Sub(&AC, &C->v, &A->v);
float3Cross(&ABC, &AB, &AC);
float3Cross(&tmp, &ABC, &AC);
dot = float3Dot(&tmp, &AO);
if (IsZERO(dot) || dot > ZERO) {
dot = float3Dot(&AC, &AO);
if (IsZERO(dot) || dot > ZERO) {
// C is already in place
SimplexSet(simplex, 1, A);
SimplexSetSize(simplex, 2);
tripleCross(&AC, &AO, &AC, dir);
}
else {
dot = float3Dot(&AB, &AO);
if (IsZERO(dot) || dot > ZERO) {
SimplexSet(simplex, 0, B);
SimplexSet(simplex, 1, A);
SimplexSetSize(simplex, 2);
tripleCross(&AB, &AO, &AB, dir);
}
else {
SimplexSet(simplex, 0, A);
SimplexSetSize(simplex, 1);
float3Copy(dir, &AO);
}
}
}
else {
float3Cross(&tmp, &AB, &ABC);
dot = float3Dot(&tmp, &AO);
if (IsZERO(dot) || dot > ZERO) {
dot = float3Dot(&AB, &AO);
if (IsZERO(dot) || dot > ZERO) {
SimplexSet(simplex, 0, B);
SimplexSet(simplex, 1, A);
SimplexSetSize(simplex, 2);
tripleCross(&AB, &AO, &AB, dir);
}
else {
SimplexSet(simplex, 0, A);
SimplexSetSize(simplex, 1);
float3Copy(dir, &AO);
}
}
else {
dot = float3Dot(&ABC, &AO);
if (IsZERO(dot) || dot > ZERO) {
float3Copy(dir, &ABC);
}
else {
support_t Ctmp;
SupportCopy(&Ctmp, C);
SimplexSet(simplex, 0, B);
SimplexSet(simplex, 1, &Ctmp);
float3Copy(dir, &ABC);
float3Scale(dir, -ONE);
}
}
}
return 0;
}
__device__ static int doSimplex4(simplex_t *simplex, float3 *dir)
{
const float3 origin = make_float3(0.f, 0.f, 0.f);
const float3* originPtr = &origin;
const support_t *A, *B, *C, *D;
float3 AO, AB, AC, AD, ABC, ACD, ADB;
int B_on_ACD, C_on_ADB, D_on_ABC;
int AB_O, AC_O, AD_O;
float dist;
// get last added as A
A = SimplexLast(simplex);
// get the other points
B = SimplexPoint(simplex, 2);
C = SimplexPoint(simplex, 1);
D = SimplexPoint(simplex, 0);
// check if tetrahedron is really tetrahedron (has volume > 0)
// if it is not simplex can't be expanded and thus no intersection is
// found
dist = Vec3PointTriDist2(&A->v, &B->v, &C->v, &D->v, NULL);
if (IsZERO(dist)) {
return -1;
}
// check if origin lies on some of tetrahedron's face - if so objects
// intersect
dist = Vec3PointTriDist2(originPtr, &A->v, &B->v, &C->v, NULL);
if (IsZERO(dist))
return 1;
dist = Vec3PointTriDist2(originPtr, &A->v, &C->v, &D->v, NULL);
if (IsZERO(dist))
return 1;
dist = Vec3PointTriDist2(originPtr, &A->v, &B->v, &D->v, NULL);
if (IsZERO(dist))
return 1;
dist = Vec3PointTriDist2(originPtr, &B->v, &C->v, &D->v, NULL);
if (IsZERO(dist))
return 1;
// compute AO, AB, AC, AD segments and ABC, ACD, ADB normal vectors
float3Copy(&AO, &A->v);
float3Scale(&AO, -ONE);
float3Sub(&AB, &B->v, &A->v);
float3Sub(&AC, &C->v, &A->v);
float3Sub(&AD, &D->v, &A->v);
float3Cross(&ABC, &AB, &AC);
float3Cross(&ACD, &AC, &AD);
float3Cross(&ADB, &AD, &AB);
// side (positive or negative) of B, C, D relative to planes ACD, ADB
// and ABC respectively
B_on_ACD = Signum(float3Dot(&ACD, &AB));
C_on_ADB = Signum(float3Dot(&ADB, &AC));
D_on_ABC = Signum(float3Dot(&ABC, &AD));
// whether origin is on same side of ACD, ADB, ABC as B, C, D
// respectively
AB_O = Signum(float3Dot(&ACD, &AO)) == B_on_ACD;
AC_O = Signum(float3Dot(&ADB, &AO)) == C_on_ADB;
AD_O = Signum(float3Dot(&ABC, &AO)) == D_on_ABC;
if (AB_O && AC_O && AD_O) {
// origin is in tetrahedron
return 1;
// rearrange simplex to triangle and call doSimplex3()
}
else if (!AB_O) {
// B is farthest from the origin among all of the tetrahedron's
// points, so remove it from the list and go on with the triangle
// case
// D and C are in place
SimplexSet(simplex, 2, A);
SimplexSetSize(simplex, 3);
}
else if (!AC_O) {
// C is farthest
SimplexSet(simplex, 1, D);
SimplexSet(simplex, 0, B);
SimplexSet(simplex, 2, A);
SimplexSetSize(simplex, 3);
}
else { // (!AD_O)
SimplexSet(simplex, 0, C);
SimplexSet(simplex, 1, B);
SimplexSet(simplex, 2, A);
SimplexSetSize(simplex, 3);
}
return doSimplex3(simplex, dir);
}
#pragma endregion
__device__ float Vec3PointTriDist2(const float3 *P, const float3 *x0, const float3 *B, const float3 *C, float3 *witness)
{
// Computation comes from analytic expression for triangle (x0, B, C)
// T(s, t) = x0 + s.d1 + t.d2, where d1 = B - x0 and d2 = C - x0 and
// Then equation for distance is:
// D(s, t) = | T(s, t) - P |^2
// This leads to minimization of quadratic function of two variables.
// The solution from is taken only if s is between 0 and 1, t is
// between 0 and 1 and t + s < 1, otherwise distance from segment is
// computed.
float3 d1, d2, a;
float u, v, w, p, q, r, d;
float s, t, dist, dist2;
float3 witness2;
float3Sub(&d1, B, x0);
float3Sub(&d2, C, x0);
float3Sub(&a, x0, P);
u = float3Dot(&a, &a);
v = float3Dot(&d1, &d1);
w = float3Dot(&d2, &d2);
p = float3Dot(&a, &d1);
q = float3Dot(&a, &d2);
r = float3Dot(&d1, &d2);
d = w * v - r * r;
if (IsZERO(d)) {
// To avoid division by zero for zero (or near zero) area triangles
s = t = -1.f;
}
else {
s = (q * r - w * p) / d;
t = (-s * r - q) / w;
}
if ((IsZERO(s) || s > ZERO)
&& (floatEq(s, ONE) || s < ONE)
&& (IsZERO(t) || t > ZERO)
&& (floatEq(t, ONE) || t < ONE)
&& (floatEq(t + s, ONE) || t + s < ONE)) {
if (witness) {
float3Scale(&d1, s);
float3Scale(&d2, t);
float3Copy(witness, x0);
float3Add(witness, witness, &d1);
float3Add(witness, witness, &d2);
dist = float3Dist(witness, P);
}
else {
dist = s * s * v;
dist += t * t * w;
dist += 2.f * s * t * r;
dist += 2.f * s * p;
dist += 2.f * t * q;
dist += u;
}
}
else {
dist = PointSegmentDist(P, x0, B, witness);
dist2 = PointSegmentDist(P, x0, C, &witness2);
if (dist2 < dist) {
dist = dist2;
if (witness)
float3Copy(witness, &witness2);
}
dist2 = PointSegmentDist(P, B, C, &witness2);
if (dist2 < dist) {
dist = dist2;
if (witness)
float3Copy(witness, &witness2);
}
}
return dist;
}
__device__ float PointSegmentDist(const float3 *P, const float3 *x0, const float3 *b, float3 *witness)
{
// The computation comes from solving equation of segment:
// S(t) = x0 + t.d
// where - x0 is initial point of segment
// - d is direction of segment from x0 (|d| > 0)
// - t belongs to <0, 1> interval
//
// Than, distance from a segment to some point P can be expressed:
// D(t) = |x0 + t.d - P|^2
// which is distance from any point on segment. Minimization
// of this function brings distance from P to segment.
// Minimization of D(t) leads to simple quadratic equation that's
// solving is straightforward.
//
// Bonus of this method is witness point for free.
float dist, t;
float3 d, a;
// direction of segment
float3Sub(&d, b, x0);
// precompute vector from P to x0
float3Sub(&a, x0, P);
t = -1.f * float3Dot(&a, &d);
t /= float3Len(&d);
if (t < ZERO || IsZERO(t)) {
dist = float3Dist(x0, P);
if (witness)
float3Copy(witness, x0);
}
else if (t > ONE || floatEq(t, ONE)) {
dist = float3Dist(b, P);
if (witness)
float3Copy(witness, b);
}
else {
if (witness) {
float3Copy(witness, &d);
float3Scale(witness, t);
float3Add(witness, witness, x0);
dist = float3Dist(witness, P);
}
else {
// recycling variables
float3Scale(&d, t);
float3Add(&d, &d, &a);
dist = float3Len(&d);
}
}
return dist;
}
__device__ void quatRotVec(float3 *v, const float4 *q)
{
// original version: 31 mul + 21 add
// optimized version: 18 mul + 12 add
// formula: v = v + 2 * cross(q.xyz, cross(q.xyz, v) + q.w * v)
float cross1_x, cross1_y, cross1_z, cross2_x, cross2_y, cross2_z;
float x, y, z, w;
float vx, vy, vz;
vx = v->x;
vy = v->y;
vz = v->z;
w = q->w;
x = q->x;
y = q->y;
z = q->z;
cross1_x = y * vz - z * vy + w * vx;
cross1_y = z * vx - x * vz + w * vy;
cross1_z = x * vy - y * vx + w * vz;
cross2_x = y * cross1_z - z * cross1_y;
cross2_y = z * cross1_x - x * cross1_z;
cross2_z = x * cross1_y - y * cross1_x;
*v = make_float3(vx + 2 * cross2_x, vy + 2 * cross2_y, vz + 2 * cross2_z);
}
__device__ int quatInvert2(float4 *dest, const float4 *src)
{
float4Copy(dest, src);
return quatInvert(dest);
}
__device__ int quatInvert(float4 *q)
{
float len2 = magnitudeSqr(*q);
if (len2 < FLT_EPSILON)
return -1;
len2 = ONE / len2;
q->x = -q->x * len2;
q->y = -q->y * len2;
q->z = -q->z * len2;
q->w = q->w * len2;
return 0;
}
#pragma region inlines
__device__ int Signum(float val)
{
if (IsZERO(val)) {
return 0;
}
else if (val < ZERO) {
return -1;
}
return 1;
}
__device__ void tripleCross(const float3 *a, const float3 *b,
const float3 *c, float3 *d)
{
float3 e;
float3Cross(&e, a, b);
float3Cross(d, &e, c);
}
__device__ int IsZERO(float val)
{
return absolute(val) < COL_EPS;
}
__device__ int floatEq(float a, float b)
{
return a == b;
}
__device__ float absolute(float val)
{
return val > 0 ? val : -val;
}
#pragma endregion
#pragma endregion
|
4fde29a1c764a7064bf35b5cb2294818f2a38789.hip | // !!! This is a file automatically generated by hipify!!!
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_FUNC cxx11_tensor_complex_cwise_ops
#define EIGEN_USE_GPU
#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
#include <hip/hip_fp16.h>
#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
template<typename T>
void test_cuda_complex_cwise_ops() {
const int kNumItems = 2;
std::size_t complex_bytes = kNumItems * sizeof(std::complex<T>);
std::complex<T>* d_in1;
std::complex<T>* d_in2;
std::complex<T>* d_out;
hipMalloc((void**)(&d_in1), complex_bytes);
hipMalloc((void**)(&d_in2), complex_bytes);
hipMalloc((void**)(&d_out), complex_bytes);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_in1(
d_in1, kNumItems);
Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_in2(
d_in2, kNumItems);
Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_out(
d_out, kNumItems);
const std::complex<T> a(3.14f, 2.7f);
const std::complex<T> b(-10.6f, 1.4f);
gpu_in1.device(gpu_device) = gpu_in1.constant(a);
gpu_in2.device(gpu_device) = gpu_in2.constant(b);
enum CwiseOp {
Add = 0,
Sub,
Mul,
Div
};
Tensor<std::complex<T>, 1, 0, int> actual(kNumItems);
for (int op = Add; op <= Div; op++) {
std::complex<T> expected;
switch (static_cast<CwiseOp>(op)) {
case Add:
gpu_out.device(gpu_device) = gpu_in1 + gpu_in2;
expected = a + b;
break;
case Sub:
gpu_out.device(gpu_device) = gpu_in1 - gpu_in2;
expected = a - b;
break;
case Mul:
gpu_out.device(gpu_device) = gpu_in1 * gpu_in2;
expected = a * b;
break;
case Div:
gpu_out.device(gpu_device) = gpu_in1 / gpu_in2;
expected = a / b;
break;
}
assert(hipMemcpyAsync(actual.data(), d_out, complex_bytes, hipMemcpyDeviceToHost,
gpu_device.stream()) == hipSuccess);
assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess);
for (int i = 0; i < kNumItems; ++i) {
VERIFY_IS_APPROX(actual(i), expected);
}
}
hipFree(d_in1);
hipFree(d_in2);
hipFree(d_out);
}
void test_cxx11_tensor_complex_cwise_ops()
{
CALL_SUBTEST(test_cuda_complex_cwise_ops<float>());
CALL_SUBTEST(test_cuda_complex_cwise_ops<double>());
}
| 4fde29a1c764a7064bf35b5cb2294818f2a38789.cu | // This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2016 Benoit Steiner <[email protected]>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#define EIGEN_TEST_NO_LONGDOUBLE
#define EIGEN_TEST_FUNC cxx11_tensor_complex_cwise_ops
#define EIGEN_USE_GPU
#if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500
#include <cuda_fp16.h>
#endif
#include "main.h"
#include <unsupported/Eigen/CXX11/Tensor>
using Eigen::Tensor;
template<typename T>
void test_cuda_complex_cwise_ops() {
const int kNumItems = 2;
std::size_t complex_bytes = kNumItems * sizeof(std::complex<T>);
std::complex<T>* d_in1;
std::complex<T>* d_in2;
std::complex<T>* d_out;
cudaMalloc((void**)(&d_in1), complex_bytes);
cudaMalloc((void**)(&d_in2), complex_bytes);
cudaMalloc((void**)(&d_out), complex_bytes);
Eigen::CudaStreamDevice stream;
Eigen::GpuDevice gpu_device(&stream);
Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_in1(
d_in1, kNumItems);
Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_in2(
d_in2, kNumItems);
Eigen::TensorMap<Eigen::Tensor<std::complex<T>, 1, 0, int>, Eigen::Aligned> gpu_out(
d_out, kNumItems);
const std::complex<T> a(3.14f, 2.7f);
const std::complex<T> b(-10.6f, 1.4f);
gpu_in1.device(gpu_device) = gpu_in1.constant(a);
gpu_in2.device(gpu_device) = gpu_in2.constant(b);
enum CwiseOp {
Add = 0,
Sub,
Mul,
Div
};
Tensor<std::complex<T>, 1, 0, int> actual(kNumItems);
for (int op = Add; op <= Div; op++) {
std::complex<T> expected;
switch (static_cast<CwiseOp>(op)) {
case Add:
gpu_out.device(gpu_device) = gpu_in1 + gpu_in2;
expected = a + b;
break;
case Sub:
gpu_out.device(gpu_device) = gpu_in1 - gpu_in2;
expected = a - b;
break;
case Mul:
gpu_out.device(gpu_device) = gpu_in1 * gpu_in2;
expected = a * b;
break;
case Div:
gpu_out.device(gpu_device) = gpu_in1 / gpu_in2;
expected = a / b;
break;
}
assert(cudaMemcpyAsync(actual.data(), d_out, complex_bytes, cudaMemcpyDeviceToHost,
gpu_device.stream()) == cudaSuccess);
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
for (int i = 0; i < kNumItems; ++i) {
VERIFY_IS_APPROX(actual(i), expected);
}
}
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out);
}
void test_cxx11_tensor_complex_cwise_ops()
{
CALL_SUBTEST(test_cuda_complex_cwise_ops<float>());
CALL_SUBTEST(test_cuda_complex_cwise_ops<double>());
}
|
fbf5da8ec76b1032bd11e391e98f8dcc54fe38f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "json_common.h"
#include "json_gpu.h"
#include <io/csv/datetime.cuh>
#include <io/utilities/parsing_utils.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/detail/utilities/trie.cuh>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/lists/list_view.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/device_buffer.hpp>
#include <thrust/detail/copy.h>
#include <thrust/find.h>
using cudf::detail::device_span;
namespace cudf {
namespace io {
namespace json {
namespace gpu {
using namespace ::cudf;
using string_pair = std::pair<const char *, size_t>;
namespace {
/**
* @brief CUDA Kernel that adjusts the row range to exclude the character outside of the top level
* brackets.
*
* The top level brackets characters are excluded from the resulting range.
*
* @param[in] begin Pointer to the first character in the row
* @param[in] end pointer to the first character after the row
*/
__device__ std::pair<char const *, char const *> limit_range_to_brackets(char const *begin,
char const *end)
{
begin = thrust::find_if(
thrust::seq, begin, end, [] __device__(auto c) { return c == '[' || c == '{'; });
end = thrust::find_if(thrust::seq,
thrust::make_reverse_iterator(end),
thrust::make_reverse_iterator(++begin),
[](auto c) { return c == ']' || c == '}'; })
.base();
return {begin, --end};
}
/**
* @brief Find the first JSON object key in the range.
*
* Assumes that begin is not in the middle of a field.
*
* @param[in] begin Pointer to the first character in the parsing range
* @param[in] end pointer to the first character after the parsing range
* @param[in] quotechar The character used to denote quotes
*
* @return Begin and end iterators of the key name; (`end`, `end`) if a key is not found
*/
__device__ std::pair<char const *, char const *> get_next_key(char const *begin,
char const *end,
char quotechar)
{
// Key starts after the first quote
auto const key_begin = thrust::find(thrust::seq, begin, end, quotechar) + 1;
if (key_begin > end) return {end, end};
// Key ends after the next unescaped quote
auto const key_end_pair = thrust::mismatch(
thrust::seq, key_begin, end - 1, key_begin + 1, [quotechar] __device__(auto prev_ch, auto ch) {
return !(ch == quotechar && prev_ch != '\\');
});
return {key_begin, key_end_pair.second};
}
/**
* @brief Decodes a numeric value base on templated cudf type T with specified
* base.
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed numeric value
*/
template <typename T, int base>
__inline__ __device__ T decode_value(const char *begin, uint64_t end, ParseOptions const &opts)
{
return cudf::io::gpu::parse_numeric<T, base>(begin, end, opts);
}
/**
* @brief Decodes a numeric value base on templated cudf type T
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed numeric value
*/
template <typename T>
__inline__ __device__ T decode_value(const char *begin, const char *end, ParseOptions const &opts)
{
return cudf::io::gpu::parse_numeric<T>(begin, end, opts);
}
/**
* @brief Decodes a timestamp_D
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed timestamp_D
*/
template <>
__inline__ __device__ cudf::timestamp_D decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return cudf::timestamp_D{cudf::duration_D{parseDateFormat(begin, end, opts.dayfirst)}};
}
/**
* @brief Decodes a timestamp_s
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed timestamp_s
*/
template <>
__inline__ __device__ cudf::timestamp_s decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
auto milli = parseDateTimeFormat(begin, end, opts.dayfirst);
return cudf::timestamp_s{cudf::duration_s{milli / 1000}};
}
/**
* @brief Decodes a timestamp_ms
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed timestamp_ms
*/
template <>
__inline__ __device__ cudf::timestamp_ms decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
auto milli = parseDateTimeFormat(begin, end, opts.dayfirst);
return cudf::timestamp_ms{cudf::duration_ms{milli}};
}
/**
* @brief Decodes a timestamp_us
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed timestamp_us
*/
template <>
__inline__ __device__ cudf::timestamp_us decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
auto milli = parseDateTimeFormat(begin, end, opts.dayfirst);
return cudf::timestamp_us{cudf::duration_us{milli * 1000}};
}
/**
* @brief Decodes a timestamp_ns
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed timestamp_ns
*/
template <>
__inline__ __device__ cudf::timestamp_ns decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
auto milli = parseDateTimeFormat(begin, end, opts.dayfirst);
return cudf::timestamp_ns{cudf::duration_ns{milli * 1000000}};
}
#ifndef DURATION_DECODE_VALUE
#define DURATION_DECODE_VALUE(Type) \
template <> \
__inline__ __device__ Type decode_value( \
const char *begin, const char *end, ParseOptions const &opts) \
{ \
return Type{parseTimeDeltaFormat<Type>(begin, 0, end - begin)}; \
}
#endif
DURATION_DECODE_VALUE(duration_D)
DURATION_DECODE_VALUE(duration_s)
DURATION_DECODE_VALUE(duration_ms)
DURATION_DECODE_VALUE(duration_us)
DURATION_DECODE_VALUE(duration_ns)
// The purpose of these is merely to allow compilation ONLY
template <>
__inline__ __device__ cudf::string_view decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return cudf::string_view{};
}
template <>
__inline__ __device__ cudf::dictionary32 decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return cudf::dictionary32{};
}
template <>
__inline__ __device__ cudf::list_view decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return cudf::list_view{};
}
template <>
__inline__ __device__ cudf::struct_view decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return cudf::struct_view{};
}
template <>
__inline__ __device__ numeric::decimal32 decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return numeric::decimal32{};
}
template <>
__inline__ __device__ numeric::decimal64 decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return numeric::decimal64{};
}
/**
* @brief Functor for converting plain text data to cuDF data type value.
*/
struct ConvertFunctor {
/**
* @brief Template specialization for operator() for types whose values can be
* convertible to a 0 or 1 to represent false/true. The converting is done by
* checking against the default and user-specified true/false values list.
*
* It is handled here rather than within convertStrToValue() as that function
* is used by other types (ex. timestamp) that aren't 'booleable'.
*/
template <typename T, typename std::enable_if_t<std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ bool operator()(char const *begin,
char const *end,
void *output_column,
cudf::size_type row,
const ParseOptions &opts)
{
T &value{static_cast<T *>(output_column)[row]};
// Check for user-specified true/false values first, where the output is
// replaced with 1/0 respectively
value = [&opts, end, begin]() -> T {
if (serializedTrieContains(opts.trueValuesTrie, begin, end - begin)) {
return 1;
} else if (serializedTrieContains(opts.falseValuesTrie, begin, end - begin)) {
return 0;
} else {
return decode_value<T>(begin, end - 1, opts);
}
}();
return true;
}
/**
* @brief Dispatch for floating points, which are set to NaN if the input
* is not valid. In such case, the validity mask is set to zero too.
*/
template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value> * = nullptr>
__host__ __device__ __forceinline__ bool operator()(
char const *begin, char const *end, void *out_buffer, size_t row, ParseOptions const &opts)
{
auto &value{static_cast<T *>(out_buffer)[row]};
value = decode_value<T>(begin, end - 1, opts);
return !std::isnan(value);
}
/**
* @brief Default template operator() dispatch specialization all data types
* (including wrapper types) that is not covered by above.
*/
template <typename T,
typename std::enable_if_t<!std::is_floating_point<T>::value and
!std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ bool operator()(char const *begin,
char const *end,
void *output_column,
cudf::size_type row,
const ParseOptions &opts)
{
T &value{static_cast<T *>(output_column)[row]};
value = decode_value<T>(begin, end - 1, opts);
return true;
}
};
/**
* @brief Checks whether the given character is a whitespace character.
*
* @param[in] ch The character to check
*
* @return True if the input is whitespace, False otherwise
*/
__inline__ __device__ bool is_whitespace(char ch) { return ch == '\t' || ch == ' '; }
/**
* @brief Adjusts the range to ignore starting/trailing whitespace and quotation characters.
*
* @param[in] begin Pointer to the first character in the parsing range
* @param[in] end pointer to the first character after the parsing range
* @param[in] quotechar The character used to denote quotes; '\0' if none
*
* @return Trimmed range
*/
__inline__ __device__ std::pair<char const *, char const *> trim_whitespaces_quotes(
char const *begin, char const *end, char quotechar = '\0')
{
auto not_whitespace = [] __device__(auto c) { return !is_whitespace(c); };
begin = thrust::find_if(thrust::seq, begin, end, not_whitespace);
end = thrust::find_if(thrust::seq,
thrust::make_reverse_iterator(end),
thrust::make_reverse_iterator(begin),
not_whitespace)
.base();
return {(*begin == quotechar) ? ++begin : begin, (*(end - 1) == quotechar) ? end - 1 : end};
}
/**
* @brief Returns true is the input character is a valid digit.
* Supports both decimal and hexadecimal digits (uppercase and lowercase).
*
* @param c Character to check
* @param is_hex Whether to check as a hexadecimal
*
* @return `true` if it is digit-like, `false` otherwise
*/
__device__ __inline__ bool is_digit(char c, bool is_hex = false)
{
if (c >= '0' && c <= '9') return true;
if (is_hex) {
if (c >= 'A' && c <= 'F') return true;
if (c >= 'a' && c <= 'f') return true;
}
return false;
}
/**
* @brief Returns true if the counters indicate a potentially valid float.
* False positives are possible because positions are not taken into account.
* For example, field "e.123-" would match the pattern.
*/
__device__ __inline__ bool is_like_float(
long len, long digit_cnt, long decimal_cnt, long dash_cnt, long exponent_cnt)
{
// Can't have more than one exponent and one decimal point
if (decimal_cnt > 1) return false;
if (exponent_cnt > 1) return false;
// Without the exponent or a decimal point, this is an integer, not a float
if (decimal_cnt == 0 && exponent_cnt == 0) return false;
// Can only have one '-' per component
if (dash_cnt > 1 + exponent_cnt) return false;
// If anything other than these characters is present, it's not a float
if (digit_cnt + decimal_cnt + dash_cnt + exponent_cnt != len) return false;
// Needs at least 1 digit, 2 if exponent is present
if (digit_cnt < 1 + exponent_cnt) return false;
return true;
}
/**
* @brief Contains information on a JSON file field.
*/
struct field_descriptor {
cudf::size_type column;
char const *value_begin;
char const *value_end;
};
/**
* @brief Parse the first field in the given range and return its descriptor.
*
* @param[in] begin Pointer to the first character in the parsing range
* @param[in] end pointer to the first character after the parsing range
* @param[in] opts The global parsing behavior options
* @param[in] field_idx Index of the current field in the input row
* @param[in] col_map Pointer to the (column name hash -> solumn index) map in device memory.
* nullptr is passed when the input file does not consist of objects.
* @return Descriptor of the parsed field
*/
__device__ field_descriptor next_field_descriptor(const char *begin,
const char *end,
ParseOptions const &opts,
cudf::size_type field_idx,
col_map_type *col_map)
{
auto const desc_pre_trim =
col_map == nullptr
// No key - column and begin are trivial
? field_descriptor{field_idx, begin, cudf::io::gpu::seek_field_end(begin, end, opts, true)}
: [&]() {
auto const key_range = get_next_key(begin, end, opts.quotechar);
auto const key_hash = MurmurHash3_32<cudf::string_view>{}(
cudf::string_view(key_range.first, key_range.second - key_range.first));
auto const hash_col = col_map->find(key_hash);
// Fall back to field index if not found (parsing error)
auto const column = (hash_col != col_map->end()) ? (*hash_col).second : field_idx;
// Skip the colon between the key and the value
auto const value_begin = thrust::find(thrust::seq, key_range.second, end, ':') + 1;
return field_descriptor{
column, value_begin, cudf::io::gpu::seek_field_end(value_begin, end, opts, true)};
}();
// Modify start & end to ignore whitespace and quotechars
auto const trimmed_value_range =
trim_whitespaces_quotes(desc_pre_trim.value_begin, desc_pre_trim.value_end, opts.quotechar);
return {desc_pre_trim.column, trimmed_value_range.first, trimmed_value_range.second};
}
/**
* @brief Returns the range that contains the data in a given row.
*
* Excludes the top-level brackets.
*
* @param[in] data Pointer to the JSON data in device memory
* @param[in] data_size Size of the data buffer, in bytes
* @param[in] rec_starts The offset of each row in the input
* @param[in] num_rows The number of lines/rows
* @param[in] row Index of the row for which the range is returned
*
* @return The begin and end iterators of the row data.
*/
__device__ std::pair<char const *, char const *> get_row_data_range(
device_span<char const> const data, device_span<uint64_t const> const row_offsets, size_type row)
{
auto const row_begin = data.begin() + row_offsets[row];
auto const row_end =
data.begin() + ((row < row_offsets.size() - 1) ? row_offsets[row + 1] : data.size());
return limit_range_to_brackets(row_begin, row_end);
}
/**
* @brief CUDA kernel that parses and converts plain text data into cuDF column data.
*
* Data is processed one record at a time
*
* @param[in] opts A set of parsing options
* @param[in] data The entire data to read
* @param[in] row_offsets The offset of each row in the input
* @param[in] column_types The data type of each column
* @param[in] col_map Pointer to the (column name hash -> solumn index) map in device memory.
* nullptr is passed when the input file does not consist of objects.
* @param[out] output_columns The output column data
* @param[out] valid_fields The bitmaps indicating whether column fields are valid
* @param[out] num_valid_fields The numbers of valid fields in columns
*/
__global__ void convert_data_to_columns_kernel(ParseOptions opts,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
device_span<data_type const> const column_types,
col_map_type *col_map,
device_span<void *const> const output_columns,
device_span<bitmask_type *const> const valid_fields,
device_span<cudf::size_type> const num_valid_fields)
{
const auto rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= row_offsets.size()) return;
auto const row_data_range = get_row_data_range(data, row_offsets, rec_id);
auto current = row_data_range.first;
for (size_type input_field_index = 0;
input_field_index < column_types.size() && current < row_data_range.second;
input_field_index++) {
auto const desc =
next_field_descriptor(current, row_data_range.second, opts, input_field_index, col_map);
auto const value_len = desc.value_end - desc.value_begin;
current = desc.value_end + 1;
// Empty fields are not legal values
if (value_len > 0 && !serializedTrieContains(opts.naValuesTrie, desc.value_begin, value_len)) {
// Type dispatcher does not handle strings
if (column_types[desc.column].id() == type_id::STRING) {
auto str_list = static_cast<string_pair *>(output_columns[desc.column]);
str_list[rec_id].first = desc.value_begin;
str_list[rec_id].second = value_len;
// set the valid bitmap - all bits were set to 0 to start
set_bit(valid_fields[desc.column], rec_id);
atomicAdd(&num_valid_fields[desc.column], 1);
} else {
if (cudf::type_dispatcher(column_types[desc.column],
ConvertFunctor{},
desc.value_begin,
desc.value_end,
output_columns[desc.column],
rec_id,
opts)) {
// set the valid bitmap - all bits were set to 0 to start
set_bit(valid_fields[desc.column], rec_id);
atomicAdd(&num_valid_fields[desc.column], 1);
}
}
} else if (column_types[desc.column].id() == type_id::STRING) {
auto str_list = static_cast<string_pair *>(output_columns[desc.column]);
str_list[rec_id].first = nullptr;
str_list[rec_id].second = 0;
}
}
}
/**
* @brief CUDA kernel that processes a buffer of data and determines information about the
* column types within.
*
* Data is processed in one row/record at a time, so the number of total
* threads (tid) is equal to the number of rows.
*
* @param[in] opts A set of parsing options
* @param[in] data Input data buffer
* @param[in] rec_starts The offset of each row in the input
* @param[in] col_map Pointer to the (column name hash -> column index) map in device memory.
* nullptr is passed when the input file does not consist of objects.
* @param[in] num_columns The number of columns of input data
* @param[out] column_infos The count for each column data type
*/
__global__ void detect_data_types_kernel(ParseOptions const opts,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
col_map_type *col_map,
int num_columns,
device_span<column_info> const column_infos)
{
auto const rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= row_offsets.size()) return;
auto const are_rows_objects = col_map != nullptr;
auto const row_data_range = get_row_data_range(data, row_offsets, rec_id);
size_type input_field_index = 0;
for (auto current = row_data_range.first;
input_field_index < num_columns && current < row_data_range.second;
input_field_index++) {
auto const desc =
next_field_descriptor(current, row_data_range.second, opts, input_field_index, col_map);
auto const value_len = desc.value_end - desc.value_begin;
// Advance to the next field; +1 to skip the delimiter
current = desc.value_end + 1;
// Checking if the field is empty/valid
if (value_len <= 0 || serializedTrieContains(opts.naValuesTrie, desc.value_begin, value_len)) {
// Increase the null count for array rows, where the null count is initialized to zero.
if (!are_rows_objects) { atomicAdd(&column_infos[desc.column].null_count, 1); }
continue;
} else if (are_rows_objects) {
// For files with object rows, null count is initialized to row count. The value is decreased
// here for every valid field.
atomicAdd(&column_infos[desc.column].null_count, -1);
}
// Don't need counts to detect strings, any field in quotes is deduced to be a string
if (*(desc.value_begin - 1) == opts.quotechar && *desc.value_end == opts.quotechar) {
atomicAdd(&column_infos[desc.column].string_count, 1);
continue;
}
int digit_count = 0;
int decimal_count = 0;
int slash_count = 0;
int dash_count = 0;
int colon_count = 0;
int exponent_count = 0;
int other_count = 0;
const bool maybe_hex =
((value_len > 2 && *desc.value_begin == '0' && *(desc.value_begin + 1) == 'x') ||
(value_len > 3 && *desc.value_begin == '-' && *(desc.value_begin + 1) == '0' &&
*(desc.value_begin + 2) == 'x'));
for (auto pos = desc.value_begin; pos < desc.value_end; ++pos) {
if (is_digit(*pos, maybe_hex)) {
digit_count++;
continue;
}
// Looking for unique characters that will help identify column types
switch (*pos) {
case '.': decimal_count++; break;
case '-': dash_count++; break;
case '/': slash_count++; break;
case ':': colon_count++; break;
case 'e':
case 'E':
if (!maybe_hex && pos > desc.value_begin && pos < desc.value_end - 1) exponent_count++;
break;
default: other_count++; break;
}
}
// Integers have to have the length of the string
int int_req_number_cnt = value_len;
// Off by one if they start with a minus sign
if (*desc.value_begin == '-' && value_len > 1) { --int_req_number_cnt; }
// Off by one if they are a hexadecimal number
if (maybe_hex) { --int_req_number_cnt; }
if (serializedTrieContains(opts.trueValuesTrie, desc.value_begin, value_len) ||
serializedTrieContains(opts.falseValuesTrie, desc.value_begin, value_len)) {
atomicAdd(&column_infos[desc.column].bool_count, 1);
} else if (digit_count == int_req_number_cnt) {
atomicAdd(&column_infos[desc.column].int_count, 1);
} else if (is_like_float(value_len, digit_count, decimal_count, dash_count, exponent_count)) {
atomicAdd(&column_infos[desc.column].float_count, 1);
}
// A date-time field cannot have more than 3 non-special characters
// A number field cannot have more than one decimal point
else if (other_count > 3 || decimal_count > 1) {
atomicAdd(&column_infos[desc.column].string_count, 1);
} else {
// A date field can have either one or two '-' or '\'; A legal combination will only have one
// of them To simplify the process of auto column detection, we are not covering all the
// date-time formation permutations
if ((dash_count > 0 && dash_count <= 2 && slash_count == 0) ||
(dash_count == 0 && slash_count > 0 && slash_count <= 2)) {
if (colon_count <= 2) {
atomicAdd(&column_infos[desc.column].datetime_count, 1);
} else {
atomicAdd(&column_infos[desc.column].string_count, 1);
}
} else {
// Default field type is string
atomicAdd(&column_infos[desc.column].string_count, 1);
}
}
}
if (!are_rows_objects) {
// For array rows, mark missing fields as null
for (; input_field_index < num_columns; ++input_field_index)
atomicAdd(&column_infos[input_field_index].null_count, 1);
}
}
/**
* @brief Input data range that contains a field in key:value format.
*/
struct key_value_range {
char const *key_begin;
char const *key_end;
char const *value_begin;
char const *value_end;
};
/**
* @brief Parse the next field in key:value format and return ranges of its parts.
*/
__device__ key_value_range get_next_key_value_range(char const *begin,
char const *end,
ParseOptions const &opts)
{
auto const key_range = get_next_key(begin, end, opts.quotechar);
// Colon between the key and the value
auto const colon = thrust::find(thrust::seq, key_range.second, end, ':');
if (colon == end) return {end, end, end};
// Field value (including delimiters)
auto const value_end = cudf::io::gpu::seek_field_end(colon + 1, end, opts, true);
return {key_range.first, key_range.second, colon + 1, value_end};
}
/**
* @brief Cuda kernel that collects information about JSON object keys in the file.
*
* @param[in] options A set of parsing options
* @param[in] data Input data buffer
* @param[in] row_offsets The offset of each row in the input
* @param[out] keys_cnt Number of keys found in the file
* @param[out] keys_info optional, information (offset, length, hash) for each found key
*
*/
__global__ void collect_keys_info_kernel(ParseOptions const options,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
unsigned long long int *keys_cnt,
thrust::optional<mutable_table_device_view> keys_info)
{
auto const rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= row_offsets.size()) return;
auto const row_data_range = get_row_data_range(data, row_offsets, rec_id);
auto advance = [&](const char *begin) {
return get_next_key_value_range(begin, row_data_range.second, options);
};
for (auto field_range = advance(row_data_range.first);
field_range.key_begin < row_data_range.second;
field_range = advance(field_range.value_end)) {
auto const idx = atomicAdd(keys_cnt, 1);
if (keys_info.has_value()) {
auto const len = field_range.key_end - field_range.key_begin;
keys_info->column(0).element<uint64_t>(idx) = field_range.key_begin - data.begin();
keys_info->column(1).element<uint16_t>(idx) = len;
keys_info->column(2).element<uint32_t>(idx) =
MurmurHash3_32<cudf::string_view>{}(cudf::string_view(field_range.key_begin, len));
}
}
}
} // namespace
/**
* @copydoc cudf::io::json::gpu::convert_json_to_columns
*/
void convert_json_to_columns(ParseOptions const &opts,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
device_span<data_type const> const column_types,
col_map_type *col_map,
device_span<void *const> const output_columns,
device_span<bitmask_type *const> const valid_fields,
device_span<cudf::size_type> num_valid_fields,
hipStream_t stream)
{
int block_size;
int min_grid_size;
CUDA_TRY(hipOccupancyMaxPotentialBlockSize(
&min_grid_size, &block_size, convert_data_to_columns_kernel));
const int grid_size = (row_offsets.size() + block_size - 1) / block_size;
hipLaunchKernelGGL(( convert_data_to_columns_kernel), dim3(grid_size), dim3(block_size), 0, stream,
opts, data, row_offsets, column_types, col_map, output_columns, valid_fields, num_valid_fields);
CUDA_TRY(hipGetLastError());
}
/**
* @copydoc cudf::io::json::gpu::detect_data_types
*/
std::vector<cudf::io::json::column_info> detect_data_types(
const ParseOptions &options,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
bool do_set_null_count,
int num_columns,
col_map_type *col_map,
hipStream_t stream)
{
int block_size;
int min_grid_size;
CUDA_TRY(
hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, detect_data_types_kernel));
rmm::device_vector<cudf::io::json::column_info> d_column_infos(num_columns,
cudf::io::json::column_info{});
if (do_set_null_count) {
// Set the null count to the row count (all fields assumes to be null).
thrust::for_each(
rmm::exec_policy(stream)->on(stream),
d_column_infos.begin(),
d_column_infos.end(),
[num_records = row_offsets.size()] __device__(auto &info) { info.null_count = num_records; });
}
// Calculate actual block count to use based on records count
const int grid_size = (row_offsets.size() + block_size - 1) / block_size;
hipLaunchKernelGGL(( detect_data_types_kernel), dim3(grid_size), dim3(block_size), 0, stream,
options, data, row_offsets, col_map, num_columns, d_column_infos);
CUDA_TRY(hipGetLastError());
auto h_column_infos = std::vector<cudf::io::json::column_info>(num_columns);
thrust::copy(d_column_infos.begin(), d_column_infos.end(), h_column_infos.begin());
return h_column_infos;
}
/**
* @copydoc cudf::io::json::gpu::gpu_collect_keys_info
*/
void collect_keys_info(ParseOptions const &options,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
unsigned long long int *keys_cnt,
thrust::optional<mutable_table_device_view> keys_info,
hipStream_t stream)
{
int block_size;
int min_grid_size;
CUDA_TRY(
hipOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, collect_keys_info_kernel));
// Calculate actual block count to use based on records count
const int grid_size = (row_offsets.size() + block_size - 1) / block_size;
hipLaunchKernelGGL(( collect_keys_info_kernel), dim3(grid_size), dim3(block_size), 0, stream,
options, data, row_offsets, keys_cnt, keys_info);
CUDA_TRY(hipGetLastError());
}
} // namespace gpu
} // namespace json
} // namespace io
} // namespace cudf
| fbf5da8ec76b1032bd11e391e98f8dcc54fe38f0.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "json_common.h"
#include "json_gpu.h"
#include <io/csv/datetime.cuh>
#include <io/utilities/parsing_utils.cuh>
#include <cudf/detail/utilities/hash_functions.cuh>
#include <cudf/detail/utilities/trie.cuh>
#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/lists/list_view.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/utilities/bit.hpp>
#include <cudf/utilities/span.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <rmm/device_buffer.hpp>
#include <thrust/detail/copy.h>
#include <thrust/find.h>
using cudf::detail::device_span;
namespace cudf {
namespace io {
namespace json {
namespace gpu {
using namespace ::cudf;
using string_pair = std::pair<const char *, size_t>;
namespace {
/**
* @brief CUDA Kernel that adjusts the row range to exclude the character outside of the top level
* brackets.
*
* The top level brackets characters are excluded from the resulting range.
*
* @param[in] begin Pointer to the first character in the row
* @param[in] end pointer to the first character after the row
*/
__device__ std::pair<char const *, char const *> limit_range_to_brackets(char const *begin,
char const *end)
{
begin = thrust::find_if(
thrust::seq, begin, end, [] __device__(auto c) { return c == '[' || c == '{'; });
end = thrust::find_if(thrust::seq,
thrust::make_reverse_iterator(end),
thrust::make_reverse_iterator(++begin),
[](auto c) { return c == ']' || c == '}'; })
.base();
return {begin, --end};
}
/**
* @brief Find the first JSON object key in the range.
*
* Assumes that begin is not in the middle of a field.
*
* @param[in] begin Pointer to the first character in the parsing range
* @param[in] end pointer to the first character after the parsing range
* @param[in] quotechar The character used to denote quotes
*
* @return Begin and end iterators of the key name; (`end`, `end`) if a key is not found
*/
__device__ std::pair<char const *, char const *> get_next_key(char const *begin,
char const *end,
char quotechar)
{
// Key starts after the first quote
auto const key_begin = thrust::find(thrust::seq, begin, end, quotechar) + 1;
if (key_begin > end) return {end, end};
// Key ends after the next unescaped quote
auto const key_end_pair = thrust::mismatch(
thrust::seq, key_begin, end - 1, key_begin + 1, [quotechar] __device__(auto prev_ch, auto ch) {
return !(ch == quotechar && prev_ch != '\\');
});
return {key_begin, key_end_pair.second};
}
/**
* @brief Decodes a numeric value base on templated cudf type T with specified
* base.
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed numeric value
*/
template <typename T, int base>
__inline__ __device__ T decode_value(const char *begin, uint64_t end, ParseOptions const &opts)
{
return cudf::io::gpu::parse_numeric<T, base>(begin, end, opts);
}
/**
* @brief Decodes a numeric value base on templated cudf type T
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed numeric value
*/
template <typename T>
__inline__ __device__ T decode_value(const char *begin, const char *end, ParseOptions const &opts)
{
return cudf::io::gpu::parse_numeric<T>(begin, end, opts);
}
/**
* @brief Decodes a timestamp_D
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed timestamp_D
*/
template <>
__inline__ __device__ cudf::timestamp_D decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return cudf::timestamp_D{cudf::duration_D{parseDateFormat(begin, end, opts.dayfirst)}};
}
/**
* @brief Decodes a timestamp_s
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed timestamp_s
*/
template <>
__inline__ __device__ cudf::timestamp_s decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
auto milli = parseDateTimeFormat(begin, end, opts.dayfirst);
return cudf::timestamp_s{cudf::duration_s{milli / 1000}};
}
/**
* @brief Decodes a timestamp_ms
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed timestamp_ms
*/
template <>
__inline__ __device__ cudf::timestamp_ms decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
auto milli = parseDateTimeFormat(begin, end, opts.dayfirst);
return cudf::timestamp_ms{cudf::duration_ms{milli}};
}
/**
* @brief Decodes a timestamp_us
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed timestamp_us
*/
template <>
__inline__ __device__ cudf::timestamp_us decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
auto milli = parseDateTimeFormat(begin, end, opts.dayfirst);
return cudf::timestamp_us{cudf::duration_us{milli * 1000}};
}
/**
* @brief Decodes a timestamp_ns
*
* @param[in] begin Beginning of the character string
* @param[in] end End of the character string
* @param opts The global parsing behavior options
*
* @return The parsed timestamp_ns
*/
template <>
__inline__ __device__ cudf::timestamp_ns decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
auto milli = parseDateTimeFormat(begin, end, opts.dayfirst);
return cudf::timestamp_ns{cudf::duration_ns{milli * 1000000}};
}
#ifndef DURATION_DECODE_VALUE
#define DURATION_DECODE_VALUE(Type) \
template <> \
__inline__ __device__ Type decode_value( \
const char *begin, const char *end, ParseOptions const &opts) \
{ \
return Type{parseTimeDeltaFormat<Type>(begin, 0, end - begin)}; \
}
#endif
DURATION_DECODE_VALUE(duration_D)
DURATION_DECODE_VALUE(duration_s)
DURATION_DECODE_VALUE(duration_ms)
DURATION_DECODE_VALUE(duration_us)
DURATION_DECODE_VALUE(duration_ns)
// The purpose of these is merely to allow compilation ONLY
template <>
__inline__ __device__ cudf::string_view decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return cudf::string_view{};
}
template <>
__inline__ __device__ cudf::dictionary32 decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return cudf::dictionary32{};
}
template <>
__inline__ __device__ cudf::list_view decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return cudf::list_view{};
}
template <>
__inline__ __device__ cudf::struct_view decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return cudf::struct_view{};
}
template <>
__inline__ __device__ numeric::decimal32 decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return numeric::decimal32{};
}
template <>
__inline__ __device__ numeric::decimal64 decode_value(const char *begin,
const char *end,
ParseOptions const &opts)
{
return numeric::decimal64{};
}
/**
* @brief Functor for converting plain text data to cuDF data type value.
*/
struct ConvertFunctor {
/**
* @brief Template specialization for operator() for types whose values can be
* convertible to a 0 or 1 to represent false/true. The converting is done by
* checking against the default and user-specified true/false values list.
*
* It is handled here rather than within convertStrToValue() as that function
* is used by other types (ex. timestamp) that aren't 'booleable'.
*/
template <typename T, typename std::enable_if_t<std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ bool operator()(char const *begin,
char const *end,
void *output_column,
cudf::size_type row,
const ParseOptions &opts)
{
T &value{static_cast<T *>(output_column)[row]};
// Check for user-specified true/false values first, where the output is
// replaced with 1/0 respectively
value = [&opts, end, begin]() -> T {
if (serializedTrieContains(opts.trueValuesTrie, begin, end - begin)) {
return 1;
} else if (serializedTrieContains(opts.falseValuesTrie, begin, end - begin)) {
return 0;
} else {
return decode_value<T>(begin, end - 1, opts);
}
}();
return true;
}
/**
* @brief Dispatch for floating points, which are set to NaN if the input
* is not valid. In such case, the validity mask is set to zero too.
*/
template <typename T, typename std::enable_if_t<std::is_floating_point<T>::value> * = nullptr>
__host__ __device__ __forceinline__ bool operator()(
char const *begin, char const *end, void *out_buffer, size_t row, ParseOptions const &opts)
{
auto &value{static_cast<T *>(out_buffer)[row]};
value = decode_value<T>(begin, end - 1, opts);
return !std::isnan(value);
}
/**
* @brief Default template operator() dispatch specialization all data types
* (including wrapper types) that is not covered by above.
*/
template <typename T,
typename std::enable_if_t<!std::is_floating_point<T>::value and
!std::is_integral<T>::value> * = nullptr>
__host__ __device__ __forceinline__ bool operator()(char const *begin,
char const *end,
void *output_column,
cudf::size_type row,
const ParseOptions &opts)
{
T &value{static_cast<T *>(output_column)[row]};
value = decode_value<T>(begin, end - 1, opts);
return true;
}
};
/**
* @brief Checks whether the given character is a whitespace character.
*
* @param[in] ch The character to check
*
* @return True if the input is whitespace, False otherwise
*/
__inline__ __device__ bool is_whitespace(char ch) { return ch == '\t' || ch == ' '; }
/**
* @brief Adjusts the range to ignore starting/trailing whitespace and quotation characters.
*
* @param[in] begin Pointer to the first character in the parsing range
* @param[in] end pointer to the first character after the parsing range
* @param[in] quotechar The character used to denote quotes; '\0' if none
*
* @return Trimmed range
*/
__inline__ __device__ std::pair<char const *, char const *> trim_whitespaces_quotes(
char const *begin, char const *end, char quotechar = '\0')
{
auto not_whitespace = [] __device__(auto c) { return !is_whitespace(c); };
begin = thrust::find_if(thrust::seq, begin, end, not_whitespace);
end = thrust::find_if(thrust::seq,
thrust::make_reverse_iterator(end),
thrust::make_reverse_iterator(begin),
not_whitespace)
.base();
return {(*begin == quotechar) ? ++begin : begin, (*(end - 1) == quotechar) ? end - 1 : end};
}
/**
* @brief Returns true is the input character is a valid digit.
* Supports both decimal and hexadecimal digits (uppercase and lowercase).
*
* @param c Character to check
* @param is_hex Whether to check as a hexadecimal
*
* @return `true` if it is digit-like, `false` otherwise
*/
__device__ __inline__ bool is_digit(char c, bool is_hex = false)
{
if (c >= '0' && c <= '9') return true;
if (is_hex) {
if (c >= 'A' && c <= 'F') return true;
if (c >= 'a' && c <= 'f') return true;
}
return false;
}
/**
* @brief Returns true if the counters indicate a potentially valid float.
* False positives are possible because positions are not taken into account.
* For example, field "e.123-" would match the pattern.
*/
__device__ __inline__ bool is_like_float(
long len, long digit_cnt, long decimal_cnt, long dash_cnt, long exponent_cnt)
{
// Can't have more than one exponent and one decimal point
if (decimal_cnt > 1) return false;
if (exponent_cnt > 1) return false;
// Without the exponent or a decimal point, this is an integer, not a float
if (decimal_cnt == 0 && exponent_cnt == 0) return false;
// Can only have one '-' per component
if (dash_cnt > 1 + exponent_cnt) return false;
// If anything other than these characters is present, it's not a float
if (digit_cnt + decimal_cnt + dash_cnt + exponent_cnt != len) return false;
// Needs at least 1 digit, 2 if exponent is present
if (digit_cnt < 1 + exponent_cnt) return false;
return true;
}
/**
* @brief Contains information on a JSON file field.
*/
struct field_descriptor {
cudf::size_type column;
char const *value_begin;
char const *value_end;
};
/**
* @brief Parse the first field in the given range and return its descriptor.
*
* @param[in] begin Pointer to the first character in the parsing range
* @param[in] end pointer to the first character after the parsing range
* @param[in] opts The global parsing behavior options
* @param[in] field_idx Index of the current field in the input row
* @param[in] col_map Pointer to the (column name hash -> solumn index) map in device memory.
* nullptr is passed when the input file does not consist of objects.
* @return Descriptor of the parsed field
*/
__device__ field_descriptor next_field_descriptor(const char *begin,
const char *end,
ParseOptions const &opts,
cudf::size_type field_idx,
col_map_type *col_map)
{
auto const desc_pre_trim =
col_map == nullptr
// No key - column and begin are trivial
? field_descriptor{field_idx, begin, cudf::io::gpu::seek_field_end(begin, end, opts, true)}
: [&]() {
auto const key_range = get_next_key(begin, end, opts.quotechar);
auto const key_hash = MurmurHash3_32<cudf::string_view>{}(
cudf::string_view(key_range.first, key_range.second - key_range.first));
auto const hash_col = col_map->find(key_hash);
// Fall back to field index if not found (parsing error)
auto const column = (hash_col != col_map->end()) ? (*hash_col).second : field_idx;
// Skip the colon between the key and the value
auto const value_begin = thrust::find(thrust::seq, key_range.second, end, ':') + 1;
return field_descriptor{
column, value_begin, cudf::io::gpu::seek_field_end(value_begin, end, opts, true)};
}();
// Modify start & end to ignore whitespace and quotechars
auto const trimmed_value_range =
trim_whitespaces_quotes(desc_pre_trim.value_begin, desc_pre_trim.value_end, opts.quotechar);
return {desc_pre_trim.column, trimmed_value_range.first, trimmed_value_range.second};
}
/**
* @brief Returns the range that contains the data in a given row.
*
* Excludes the top-level brackets.
*
* @param[in] data Pointer to the JSON data in device memory
* @param[in] data_size Size of the data buffer, in bytes
* @param[in] rec_starts The offset of each row in the input
* @param[in] num_rows The number of lines/rows
* @param[in] row Index of the row for which the range is returned
*
* @return The begin and end iterators of the row data.
*/
__device__ std::pair<char const *, char const *> get_row_data_range(
device_span<char const> const data, device_span<uint64_t const> const row_offsets, size_type row)
{
auto const row_begin = data.begin() + row_offsets[row];
auto const row_end =
data.begin() + ((row < row_offsets.size() - 1) ? row_offsets[row + 1] : data.size());
return limit_range_to_brackets(row_begin, row_end);
}
/**
* @brief CUDA kernel that parses and converts plain text data into cuDF column data.
*
* Data is processed one record at a time
*
* @param[in] opts A set of parsing options
* @param[in] data The entire data to read
* @param[in] row_offsets The offset of each row in the input
* @param[in] column_types The data type of each column
* @param[in] col_map Pointer to the (column name hash -> solumn index) map in device memory.
* nullptr is passed when the input file does not consist of objects.
* @param[out] output_columns The output column data
* @param[out] valid_fields The bitmaps indicating whether column fields are valid
* @param[out] num_valid_fields The numbers of valid fields in columns
*/
__global__ void convert_data_to_columns_kernel(ParseOptions opts,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
device_span<data_type const> const column_types,
col_map_type *col_map,
device_span<void *const> const output_columns,
device_span<bitmask_type *const> const valid_fields,
device_span<cudf::size_type> const num_valid_fields)
{
const auto rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= row_offsets.size()) return;
auto const row_data_range = get_row_data_range(data, row_offsets, rec_id);
auto current = row_data_range.first;
for (size_type input_field_index = 0;
input_field_index < column_types.size() && current < row_data_range.second;
input_field_index++) {
auto const desc =
next_field_descriptor(current, row_data_range.second, opts, input_field_index, col_map);
auto const value_len = desc.value_end - desc.value_begin;
current = desc.value_end + 1;
// Empty fields are not legal values
if (value_len > 0 && !serializedTrieContains(opts.naValuesTrie, desc.value_begin, value_len)) {
// Type dispatcher does not handle strings
if (column_types[desc.column].id() == type_id::STRING) {
auto str_list = static_cast<string_pair *>(output_columns[desc.column]);
str_list[rec_id].first = desc.value_begin;
str_list[rec_id].second = value_len;
// set the valid bitmap - all bits were set to 0 to start
set_bit(valid_fields[desc.column], rec_id);
atomicAdd(&num_valid_fields[desc.column], 1);
} else {
if (cudf::type_dispatcher(column_types[desc.column],
ConvertFunctor{},
desc.value_begin,
desc.value_end,
output_columns[desc.column],
rec_id,
opts)) {
// set the valid bitmap - all bits were set to 0 to start
set_bit(valid_fields[desc.column], rec_id);
atomicAdd(&num_valid_fields[desc.column], 1);
}
}
} else if (column_types[desc.column].id() == type_id::STRING) {
auto str_list = static_cast<string_pair *>(output_columns[desc.column]);
str_list[rec_id].first = nullptr;
str_list[rec_id].second = 0;
}
}
}
/**
* @brief CUDA kernel that processes a buffer of data and determines information about the
* column types within.
*
* Data is processed in one row/record at a time, so the number of total
* threads (tid) is equal to the number of rows.
*
* @param[in] opts A set of parsing options
* @param[in] data Input data buffer
* @param[in] rec_starts The offset of each row in the input
* @param[in] col_map Pointer to the (column name hash -> column index) map in device memory.
* nullptr is passed when the input file does not consist of objects.
* @param[in] num_columns The number of columns of input data
* @param[out] column_infos The count for each column data type
*/
__global__ void detect_data_types_kernel(ParseOptions const opts,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
col_map_type *col_map,
int num_columns,
device_span<column_info> const column_infos)
{
auto const rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= row_offsets.size()) return;
auto const are_rows_objects = col_map != nullptr;
auto const row_data_range = get_row_data_range(data, row_offsets, rec_id);
size_type input_field_index = 0;
for (auto current = row_data_range.first;
input_field_index < num_columns && current < row_data_range.second;
input_field_index++) {
auto const desc =
next_field_descriptor(current, row_data_range.second, opts, input_field_index, col_map);
auto const value_len = desc.value_end - desc.value_begin;
// Advance to the next field; +1 to skip the delimiter
current = desc.value_end + 1;
// Checking if the field is empty/valid
if (value_len <= 0 || serializedTrieContains(opts.naValuesTrie, desc.value_begin, value_len)) {
// Increase the null count for array rows, where the null count is initialized to zero.
if (!are_rows_objects) { atomicAdd(&column_infos[desc.column].null_count, 1); }
continue;
} else if (are_rows_objects) {
// For files with object rows, null count is initialized to row count. The value is decreased
// here for every valid field.
atomicAdd(&column_infos[desc.column].null_count, -1);
}
// Don't need counts to detect strings, any field in quotes is deduced to be a string
if (*(desc.value_begin - 1) == opts.quotechar && *desc.value_end == opts.quotechar) {
atomicAdd(&column_infos[desc.column].string_count, 1);
continue;
}
int digit_count = 0;
int decimal_count = 0;
int slash_count = 0;
int dash_count = 0;
int colon_count = 0;
int exponent_count = 0;
int other_count = 0;
const bool maybe_hex =
((value_len > 2 && *desc.value_begin == '0' && *(desc.value_begin + 1) == 'x') ||
(value_len > 3 && *desc.value_begin == '-' && *(desc.value_begin + 1) == '0' &&
*(desc.value_begin + 2) == 'x'));
for (auto pos = desc.value_begin; pos < desc.value_end; ++pos) {
if (is_digit(*pos, maybe_hex)) {
digit_count++;
continue;
}
// Looking for unique characters that will help identify column types
switch (*pos) {
case '.': decimal_count++; break;
case '-': dash_count++; break;
case '/': slash_count++; break;
case ':': colon_count++; break;
case 'e':
case 'E':
if (!maybe_hex && pos > desc.value_begin && pos < desc.value_end - 1) exponent_count++;
break;
default: other_count++; break;
}
}
// Integers have to have the length of the string
int int_req_number_cnt = value_len;
// Off by one if they start with a minus sign
if (*desc.value_begin == '-' && value_len > 1) { --int_req_number_cnt; }
// Off by one if they are a hexadecimal number
if (maybe_hex) { --int_req_number_cnt; }
if (serializedTrieContains(opts.trueValuesTrie, desc.value_begin, value_len) ||
serializedTrieContains(opts.falseValuesTrie, desc.value_begin, value_len)) {
atomicAdd(&column_infos[desc.column].bool_count, 1);
} else if (digit_count == int_req_number_cnt) {
atomicAdd(&column_infos[desc.column].int_count, 1);
} else if (is_like_float(value_len, digit_count, decimal_count, dash_count, exponent_count)) {
atomicAdd(&column_infos[desc.column].float_count, 1);
}
// A date-time field cannot have more than 3 non-special characters
// A number field cannot have more than one decimal point
else if (other_count > 3 || decimal_count > 1) {
atomicAdd(&column_infos[desc.column].string_count, 1);
} else {
// A date field can have either one or two '-' or '\'; A legal combination will only have one
// of them To simplify the process of auto column detection, we are not covering all the
// date-time formation permutations
if ((dash_count > 0 && dash_count <= 2 && slash_count == 0) ||
(dash_count == 0 && slash_count > 0 && slash_count <= 2)) {
if (colon_count <= 2) {
atomicAdd(&column_infos[desc.column].datetime_count, 1);
} else {
atomicAdd(&column_infos[desc.column].string_count, 1);
}
} else {
// Default field type is string
atomicAdd(&column_infos[desc.column].string_count, 1);
}
}
}
if (!are_rows_objects) {
// For array rows, mark missing fields as null
for (; input_field_index < num_columns; ++input_field_index)
atomicAdd(&column_infos[input_field_index].null_count, 1);
}
}
/**
* @brief Input data range that contains a field in key:value format.
*/
struct key_value_range {
char const *key_begin;
char const *key_end;
char const *value_begin;
char const *value_end;
};
/**
* @brief Parse the next field in key:value format and return ranges of its parts.
*/
__device__ key_value_range get_next_key_value_range(char const *begin,
char const *end,
ParseOptions const &opts)
{
auto const key_range = get_next_key(begin, end, opts.quotechar);
// Colon between the key and the value
auto const colon = thrust::find(thrust::seq, key_range.second, end, ':');
if (colon == end) return {end, end, end};
// Field value (including delimiters)
auto const value_end = cudf::io::gpu::seek_field_end(colon + 1, end, opts, true);
return {key_range.first, key_range.second, colon + 1, value_end};
}
/**
* @brief Cuda kernel that collects information about JSON object keys in the file.
*
* @param[in] options A set of parsing options
* @param[in] data Input data buffer
* @param[in] row_offsets The offset of each row in the input
* @param[out] keys_cnt Number of keys found in the file
* @param[out] keys_info optional, information (offset, length, hash) for each found key
*
*/
__global__ void collect_keys_info_kernel(ParseOptions const options,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
unsigned long long int *keys_cnt,
thrust::optional<mutable_table_device_view> keys_info)
{
auto const rec_id = threadIdx.x + (blockDim.x * blockIdx.x);
if (rec_id >= row_offsets.size()) return;
auto const row_data_range = get_row_data_range(data, row_offsets, rec_id);
auto advance = [&](const char *begin) {
return get_next_key_value_range(begin, row_data_range.second, options);
};
for (auto field_range = advance(row_data_range.first);
field_range.key_begin < row_data_range.second;
field_range = advance(field_range.value_end)) {
auto const idx = atomicAdd(keys_cnt, 1);
if (keys_info.has_value()) {
auto const len = field_range.key_end - field_range.key_begin;
keys_info->column(0).element<uint64_t>(idx) = field_range.key_begin - data.begin();
keys_info->column(1).element<uint16_t>(idx) = len;
keys_info->column(2).element<uint32_t>(idx) =
MurmurHash3_32<cudf::string_view>{}(cudf::string_view(field_range.key_begin, len));
}
}
}
} // namespace
/**
* @copydoc cudf::io::json::gpu::convert_json_to_columns
*/
void convert_json_to_columns(ParseOptions const &opts,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
device_span<data_type const> const column_types,
col_map_type *col_map,
device_span<void *const> const output_columns,
device_span<bitmask_type *const> const valid_fields,
device_span<cudf::size_type> num_valid_fields,
cudaStream_t stream)
{
int block_size;
int min_grid_size;
CUDA_TRY(cudaOccupancyMaxPotentialBlockSize(
&min_grid_size, &block_size, convert_data_to_columns_kernel));
const int grid_size = (row_offsets.size() + block_size - 1) / block_size;
convert_data_to_columns_kernel<<<grid_size, block_size, 0, stream>>>(
opts, data, row_offsets, column_types, col_map, output_columns, valid_fields, num_valid_fields);
CUDA_TRY(cudaGetLastError());
}
/**
* @copydoc cudf::io::json::gpu::detect_data_types
*/
std::vector<cudf::io::json::column_info> detect_data_types(
const ParseOptions &options,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
bool do_set_null_count,
int num_columns,
col_map_type *col_map,
cudaStream_t stream)
{
int block_size;
int min_grid_size;
CUDA_TRY(
cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, detect_data_types_kernel));
rmm::device_vector<cudf::io::json::column_info> d_column_infos(num_columns,
cudf::io::json::column_info{});
if (do_set_null_count) {
// Set the null count to the row count (all fields assumes to be null).
thrust::for_each(
rmm::exec_policy(stream)->on(stream),
d_column_infos.begin(),
d_column_infos.end(),
[num_records = row_offsets.size()] __device__(auto &info) { info.null_count = num_records; });
}
// Calculate actual block count to use based on records count
const int grid_size = (row_offsets.size() + block_size - 1) / block_size;
detect_data_types_kernel<<<grid_size, block_size, 0, stream>>>(
options, data, row_offsets, col_map, num_columns, d_column_infos);
CUDA_TRY(cudaGetLastError());
auto h_column_infos = std::vector<cudf::io::json::column_info>(num_columns);
thrust::copy(d_column_infos.begin(), d_column_infos.end(), h_column_infos.begin());
return h_column_infos;
}
/**
* @copydoc cudf::io::json::gpu::gpu_collect_keys_info
*/
void collect_keys_info(ParseOptions const &options,
device_span<char const> const data,
device_span<uint64_t const> const row_offsets,
unsigned long long int *keys_cnt,
thrust::optional<mutable_table_device_view> keys_info,
cudaStream_t stream)
{
int block_size;
int min_grid_size;
CUDA_TRY(
cudaOccupancyMaxPotentialBlockSize(&min_grid_size, &block_size, collect_keys_info_kernel));
// Calculate actual block count to use based on records count
const int grid_size = (row_offsets.size() + block_size - 1) / block_size;
collect_keys_info_kernel<<<grid_size, block_size, 0, stream>>>(
options, data, row_offsets, keys_cnt, keys_info);
CUDA_TRY(cudaGetLastError());
}
} // namespace gpu
} // namespace json
} // namespace io
} // namespace cudf
|
6da25e0da03372295d3341a57d1a8207a6c4e8ae.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef __VMML__VMMLIB_CUBLAS_DGEMM__HPP__
#define __VMML__VMMLIB_CUBLAS_DGEMM__HPP__
#include <vmmlib/matrix.hpp>
#include <vmmlib/exception.hpp>
#include <vmmlib/cublas_includes.hpp>
#include <vmmlib/cublas_types.hpp>
/**
*
* a wrapper for CUBLAS DGEMM routine.
hipblasStatus_t hipblasDgemm(
hipblasHandle_t handle,
hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k,
const double const double const double const double double
*alpha, *A, int lda, *B, int ldb, *beta,
*C, int ldc
)
*
* Purpose
* =======
*
* CUBLAS DGEMM is a CUDA implementation of the level3 BLAS DGEMM implementation
* performs one of the matrix-matrix operations
*
* C := alpha*op( A )*op( B ) + beta*C,
*
* where op( X ) is one of
*
* op( X ) = X or op( X ) = X**T,
*
* alpha and beta are scalars, and A, B and C are matrices, with op( A )
* an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
*
*
* more information in: http://www.netlib.org/blas/dgemm.f
* or http://www.netlib.org/clapack/cblas/dgemm.c
**
*/
namespace vmml
{
namespace cublas
{
#if 0
/* CUBLAS DGEMM Subroutine */
hipblasStatus_t hipblasDgemm(
hipblasHandle_t handle,
hipblasOperation_t transa, hipblasOperation_t transb,
int m, int n, int k,
const double *alpha,
const double *A, int lda,
const double *B, int ldb,
const *beta,
double *C, int ldc
)
/* DGEMM Subroutine */
void cblas_dgemm(enum CBLAS_ORDER Order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANSPOSE TransB,
blasint M, blasint N, blasint K,
double alpha, double *A, blasint lda, double *B, blasint ldb, double beta, double *C, blasint ldc);
#endif
template< typename float_t >
struct dgemm_params
{
//FIXME maybe add const
hipblasHandle_t handle;
hipblasOperation_t trans_a;
hipblasOperation_t trans_b;
cublas_int m;
cublas_int n;
cublas_int k;
float_t alpha;
float_t* h_a; //host
float_t* d_a; //device
cublas_int lda; //leading dimension of input array matrix left
float_t* h_b;
float_t* d_b;
cublas_int ldb; //leading dimension of input array matrix right
float_t beta;
float_t* h_c;
float_t* d_c;
cublas_int ldc; //leading dimension of output array matrix right
friend std::ostream& operator << ( std::ostream& os,
const dgemm_params< float_t >& p )
{
os
<< " (1)\thandle " << p.handle << std::endl
<< " (2)\ttrans_a " << p.trans_a << std::endl
<< " (3)\ttrans_b " << p.trans_b << std::endl
<< " (4)\tm " << p.m << std::endl
<< " (6)\tn " << p.n << std::endl
<< " (5)\tk " << p.k << std::endl
<< " (7)\talpha " << p.alpha << std::endl
<< " (8)\th_a " << p.h_a << std::endl
<< " (9)\tlda " << p.lda << std::endl
<< " (10)\th_b " << p.h_b << std::endl
<< " (11)\tldb " << p.ldb << std::endl
<< " (12)\tbeta " << p.beta << std::endl
<< " (13)\th_c " << p.h_c << std::endl
<< " (14)\tldc " << p.ldc << std::endl
<< std::endl;
return os;
}
};
template< typename float_t >
inline void
dgemm_call( dgemm_params< float_t >& p )
{
VMMLIB_ERROR( "not implemented for this type.", VMMLIB_HERE );
}
template<>
inline void
dgemm_call( dgemm_params< float >& p )
{
//std::cout << "calling cublas sgemm (single precision) " << std::endl;
hipblasStatus_t stat = hipblasSgemm(
p.handle,
p.trans_a,
p.trans_b,
p.m,
p.n,
p.k,
&p.alpha,
p.d_a,
p.lda,
p.d_b,
p.ldb,
&p.beta,
p.d_c,
p.ldc
);
}
template<>
inline void
dgemm_call( dgemm_params< double >& p )
{
//std::cout << "calling cublas dgemm (double precision) " << std::endl;
hipblasStatus_t stat = hipblasDgemm(
p.handle,
p.trans_a,
p.trans_b,
p.m,
p.n,
p.k,
&p.alpha,
p.d_a,
p.lda,
p.d_b,
p.ldb,
&p.beta,
p.d_c,
p.ldc
);
}
} // namespace cublas
template< size_t M, size_t K, size_t N, typename float_t >
struct cublas_dgemm
{
typedef matrix< M, K, float_t > matrix_left_t;
typedef matrix< K, M, float_t > matrix_left_t_t;
typedef matrix< K, N, float_t > matrix_right_t;
typedef matrix< N, K, float_t > matrix_right_t_t;
typedef matrix< M, N, float_t > matrix_out_t;
typedef vector< M, float_t > vector_left_t;
typedef vector< N, float_t > vector_right_t;
cublas_dgemm();
~cublas_dgemm();
bool compute( const matrix_left_t& A_, const matrix_right_t& B_, matrix_out_t& C_ );
bool compute( const matrix_left_t& A_, matrix_out_t& C_ );
cublas::dgemm_params< float_t > p;
const cublas::dgemm_params< float_t >& get_params(){ return p; };
}; // struct cublas_dgemm
template< size_t M, size_t K, size_t N, typename float_t >
cublas_dgemm< M, K, N, float_t >::cublas_dgemm()
{
hipblasStatus_t cstat = hipblasCreate( &p.handle ); if ( cstat > 0 ) { printf( "hipblasCreate: status error=%d\n", cstat ); }
p.trans_a = HIPBLAS_OP_N;
p.trans_b = HIPBLAS_OP_N;
p.m = M;
p.n = N;
p.k = K;
p.alpha = 1.0f;
p.h_a = 0;
p.d_a = 0;
p.lda = M;
p.h_b = 0;
p.d_b = 0;
p.ldb = K; //no transpose, use N for transpose
p.beta = 0.0;
p.h_c = 0;
p.d_c = 0;
p.ldc = M;
}
template< size_t M, size_t K, size_t N, typename float_t >
cublas_dgemm< M, K, N, float_t >::~cublas_dgemm()
{
/*hipblasStatus_t cuerr = hipblasDestroy( p.handle );
if ( cuerr > 0 )
{ printf( "hipMemcpy: cublas error=%d\n", cuerr ); }*/
}
template< size_t M, size_t K, size_t N, typename float_t >
bool
cublas_dgemm< M, K, N, float_t >::compute(
const matrix_left_t& A_,
const matrix_right_t& B_,
matrix_out_t& C_
)
{
// cublas needs non-const data
matrix_left_t* AA = new matrix_left_t( A_ );
matrix_right_t* BB = new matrix_right_t( B_ );
C_.zero();
p.h_a = AA->array;
p.h_b = BB->array;
p.h_c = C_.array;
// memory sizes of matrices
size_t mem_size_A = sizeof(float_t) * M * K;
size_t mem_size_B = sizeof(float_t) * K * N;
size_t mem_size_C = sizeof(float_t) * M * N;
// allocate device memory
hipError_t cuerr = hipMalloc( (void**) &p.d_a, mem_size_A ); if ( cuerr > 0 ) { printf( "hipMalloc: cublas error=%d\n", cuerr ); }
cuerr = hipMalloc( (void**) &p.d_b, mem_size_B ); if ( cuerr > 0 ) { printf( "hipMalloc: cublas error=%d\n", cuerr ); }
cuerr = hipMalloc( (void**) &p.d_c, mem_size_C ); if ( cuerr > 0 ) { printf( "hipMalloc: cublas error=%d\n", cuerr ); }
// copy host memory to device
cuerr = hipMemcpy( p.d_a, p.h_a, mem_size_A, hipMemcpyHostToDevice); if ( cuerr > 0 ) { printf( "hipMemcpy: cublas error=%d\n", cuerr ); }
cuerr = hipMemcpy( p.d_b, p.h_b, mem_size_B, hipMemcpyHostToDevice); if ( cuerr > 0 ) { printf( "hipMemcpy: cublas error=%d\n", cuerr ); }
// call CUBLAS V2
cublas::dgemm_call< float_t >( p );
//std::cout << p << std::endl; //debug
hipDeviceSynchronize();
// copy result from device to host
cuerr = hipMemcpy( p.h_c, p.d_c, mem_size_C, hipMemcpyDeviceToHost); if ( cuerr > 0 ) { printf( "hipMemcpy: cublas error=%d\n", cuerr ); }
// clean up memory
hipFree( p.d_a );
hipFree( p.d_b );
hipFree( p.d_c );
hipDeviceReset();
delete AA;
delete BB;
return true;
}
template< size_t M, size_t K, size_t N, typename float_t >
bool
cublas_dgemm< M, K, N, float_t >::compute(
const matrix_left_t& A_,
matrix_out_t& C_
)
{
// cublas needs non-const data
matrix_left_t* AA = new matrix_left_t( A_ );
C_.zero();
p.h_a = AA->array;
p.h_b = AA->array;
p.h_c = C_.array;
p.trans_b = HIPBLAS_OP_T;
p.ldb = N;
// memory sizes of matrices
size_t mem_size_A = sizeof(float_t) * M * K;
size_t mem_size_B = sizeof(float_t) * K * N;
size_t mem_size_C = sizeof(float_t) * M * N;
// allocate device memory
hipError_t cuerr = hipMalloc( (void**) &p.d_a, mem_size_A ); if ( cuerr > 0 ) { printf( "hipMalloc: cublas error=%d\n", cuerr ); }
cuerr = hipMalloc( (void**) &p.d_b, mem_size_B ); if ( cuerr > 0 ) { printf( "hipMalloc: cublas error=%d\n", cuerr ); }
cuerr = hipMalloc( (void**) &p.d_c, mem_size_C ); if ( cuerr > 0 ) { printf( "hipMalloc: cublas error=%d\n", cuerr ); }
// copy host memory to device
cuerr = hipMemcpy( p.d_a, p.h_a, mem_size_A, hipMemcpyHostToDevice); if ( cuerr > 0 ) { printf( "hipMemcpy: cublas error=%d\n", cuerr ); }
cuerr = hipMemcpy( p.d_b, p.h_b, mem_size_B, hipMemcpyHostToDevice); if ( cuerr > 0 ) { printf( "hipMemcpy: cublas error=%d\n", cuerr ); }
// call CUBLAS V2
cublas::dgemm_call< float_t >( p );
//std::cout << p << std::endl; //debug
hipDeviceSynchronize();
// copy result from device to host
cuerr = hipMemcpy( p.h_c, p.d_c, mem_size_C, hipMemcpyDeviceToHost); if ( cuerr > 0 ) { printf( "hipMemcpy: cublas error=%d\n", cuerr ); }
// clean up memory
hipFree( p.d_a );
hipFree( p.d_b );
hipFree( p.d_c );
hipDeviceReset();
delete AA;
return true;
}
} // namespace vmml
#endif
| 6da25e0da03372295d3341a57d1a8207a6c4e8ae.cu | #ifndef __VMML__VMMLIB_CUBLAS_DGEMM__HPP__
#define __VMML__VMMLIB_CUBLAS_DGEMM__HPP__
#include <vmmlib/matrix.hpp>
#include <vmmlib/exception.hpp>
#include <vmmlib/cublas_includes.hpp>
#include <vmmlib/cublas_types.hpp>
/**
*
* a wrapper for CUBLAS DGEMM routine.
cublasStatus_t cublasDgemm(
cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k,
const double const double const double const double double
*alpha, *A, int lda, *B, int ldb, *beta,
*C, int ldc
)
*
* Purpose
* =======
*
* CUBLAS DGEMM is a CUDA implementation of the level3 BLAS DGEMM implementation
* performs one of the matrix-matrix operations
*
* C := alpha*op( A )*op( B ) + beta*C,
*
* where op( X ) is one of
*
* op( X ) = X or op( X ) = X**T,
*
* alpha and beta are scalars, and A, B and C are matrices, with op( A )
* an m by k matrix, op( B ) a k by n matrix and C an m by n matrix.
*
*
* more information in: http://www.netlib.org/blas/dgemm.f
* or http://www.netlib.org/clapack/cblas/dgemm.c
**
*/
namespace vmml
{
namespace cublas
{
#if 0
/* CUBLAS DGEMM Subroutine */
cublasStatus_t cublasDgemm(
cublasHandle_t handle,
cublasOperation_t transa, cublasOperation_t transb,
int m, int n, int k,
const double *alpha,
const double *A, int lda,
const double *B, int ldb,
const *beta,
double *C, int ldc
)
/* DGEMM Subroutine */
void cblas_dgemm(enum CBLAS_ORDER Order, enum CBLAS_TRANSPOSE TransA, enum CBLAS_TRANSPOSE TransB,
blasint M, blasint N, blasint K,
double alpha, double *A, blasint lda, double *B, blasint ldb, double beta, double *C, blasint ldc);
#endif
template< typename float_t >
struct dgemm_params
{
//FIXME maybe add const
cublasHandle_t handle;
cublasOperation_t trans_a;
cublasOperation_t trans_b;
cublas_int m;
cublas_int n;
cublas_int k;
float_t alpha;
float_t* h_a; //host
float_t* d_a; //device
cublas_int lda; //leading dimension of input array matrix left
float_t* h_b;
float_t* d_b;
cublas_int ldb; //leading dimension of input array matrix right
float_t beta;
float_t* h_c;
float_t* d_c;
cublas_int ldc; //leading dimension of output array matrix right
friend std::ostream& operator << ( std::ostream& os,
const dgemm_params< float_t >& p )
{
os
<< " (1)\thandle " << p.handle << std::endl
<< " (2)\ttrans_a " << p.trans_a << std::endl
<< " (3)\ttrans_b " << p.trans_b << std::endl
<< " (4)\tm " << p.m << std::endl
<< " (6)\tn " << p.n << std::endl
<< " (5)\tk " << p.k << std::endl
<< " (7)\talpha " << p.alpha << std::endl
<< " (8)\th_a " << p.h_a << std::endl
<< " (9)\tlda " << p.lda << std::endl
<< " (10)\th_b " << p.h_b << std::endl
<< " (11)\tldb " << p.ldb << std::endl
<< " (12)\tbeta " << p.beta << std::endl
<< " (13)\th_c " << p.h_c << std::endl
<< " (14)\tldc " << p.ldc << std::endl
<< std::endl;
return os;
}
};
template< typename float_t >
inline void
dgemm_call( dgemm_params< float_t >& p )
{
VMMLIB_ERROR( "not implemented for this type.", VMMLIB_HERE );
}
template<>
inline void
dgemm_call( dgemm_params< float >& p )
{
//std::cout << "calling cublas sgemm (single precision) " << std::endl;
cublasStatus_t stat = cublasSgemm(
p.handle,
p.trans_a,
p.trans_b,
p.m,
p.n,
p.k,
&p.alpha,
p.d_a,
p.lda,
p.d_b,
p.ldb,
&p.beta,
p.d_c,
p.ldc
);
}
template<>
inline void
dgemm_call( dgemm_params< double >& p )
{
//std::cout << "calling cublas dgemm (double precision) " << std::endl;
cublasStatus_t stat = cublasDgemm(
p.handle,
p.trans_a,
p.trans_b,
p.m,
p.n,
p.k,
&p.alpha,
p.d_a,
p.lda,
p.d_b,
p.ldb,
&p.beta,
p.d_c,
p.ldc
);
}
} // namespace cublas
template< size_t M, size_t K, size_t N, typename float_t >
struct cublas_dgemm
{
typedef matrix< M, K, float_t > matrix_left_t;
typedef matrix< K, M, float_t > matrix_left_t_t;
typedef matrix< K, N, float_t > matrix_right_t;
typedef matrix< N, K, float_t > matrix_right_t_t;
typedef matrix< M, N, float_t > matrix_out_t;
typedef vector< M, float_t > vector_left_t;
typedef vector< N, float_t > vector_right_t;
cublas_dgemm();
~cublas_dgemm();
bool compute( const matrix_left_t& A_, const matrix_right_t& B_, matrix_out_t& C_ );
bool compute( const matrix_left_t& A_, matrix_out_t& C_ );
cublas::dgemm_params< float_t > p;
const cublas::dgemm_params< float_t >& get_params(){ return p; };
}; // struct cublas_dgemm
template< size_t M, size_t K, size_t N, typename float_t >
cublas_dgemm< M, K, N, float_t >::cublas_dgemm()
{
cublasStatus_t cstat = cublasCreate( &p.handle ); if ( cstat > 0 ) { printf( "cublasCreate: status error=%d\n", cstat ); }
p.trans_a = CUBLAS_OP_N;
p.trans_b = CUBLAS_OP_N;
p.m = M;
p.n = N;
p.k = K;
p.alpha = 1.0f;
p.h_a = 0;
p.d_a = 0;
p.lda = M;
p.h_b = 0;
p.d_b = 0;
p.ldb = K; //no transpose, use N for transpose
p.beta = 0.0;
p.h_c = 0;
p.d_c = 0;
p.ldc = M;
}
template< size_t M, size_t K, size_t N, typename float_t >
cublas_dgemm< M, K, N, float_t >::~cublas_dgemm()
{
/*cublasStatus_t cuerr = cublasDestroy( p.handle );
if ( cuerr > 0 )
{ printf( "cudaMemcpy: cublas error=%d\n", cuerr ); }*/
}
template< size_t M, size_t K, size_t N, typename float_t >
bool
cublas_dgemm< M, K, N, float_t >::compute(
const matrix_left_t& A_,
const matrix_right_t& B_,
matrix_out_t& C_
)
{
// cublas needs non-const data
matrix_left_t* AA = new matrix_left_t( A_ );
matrix_right_t* BB = new matrix_right_t( B_ );
C_.zero();
p.h_a = AA->array;
p.h_b = BB->array;
p.h_c = C_.array;
// memory sizes of matrices
size_t mem_size_A = sizeof(float_t) * M * K;
size_t mem_size_B = sizeof(float_t) * K * N;
size_t mem_size_C = sizeof(float_t) * M * N;
// allocate device memory
cudaError_t cuerr = cudaMalloc( (void**) &p.d_a, mem_size_A ); if ( cuerr > 0 ) { printf( "cudaMalloc: cublas error=%d\n", cuerr ); }
cuerr = cudaMalloc( (void**) &p.d_b, mem_size_B ); if ( cuerr > 0 ) { printf( "cudaMalloc: cublas error=%d\n", cuerr ); }
cuerr = cudaMalloc( (void**) &p.d_c, mem_size_C ); if ( cuerr > 0 ) { printf( "cudaMalloc: cublas error=%d\n", cuerr ); }
// copy host memory to device
cuerr = cudaMemcpy( p.d_a, p.h_a, mem_size_A, cudaMemcpyHostToDevice); if ( cuerr > 0 ) { printf( "cudaMemcpy: cublas error=%d\n", cuerr ); }
cuerr = cudaMemcpy( p.d_b, p.h_b, mem_size_B, cudaMemcpyHostToDevice); if ( cuerr > 0 ) { printf( "cudaMemcpy: cublas error=%d\n", cuerr ); }
// call CUBLAS V2
cublas::dgemm_call< float_t >( p );
//std::cout << p << std::endl; //debug
cudaDeviceSynchronize();
// copy result from device to host
cuerr = cudaMemcpy( p.h_c, p.d_c, mem_size_C, cudaMemcpyDeviceToHost); if ( cuerr > 0 ) { printf( "cudaMemcpy: cublas error=%d\n", cuerr ); }
// clean up memory
cudaFree( p.d_a );
cudaFree( p.d_b );
cudaFree( p.d_c );
cudaDeviceReset();
delete AA;
delete BB;
return true;
}
template< size_t M, size_t K, size_t N, typename float_t >
bool
cublas_dgemm< M, K, N, float_t >::compute(
const matrix_left_t& A_,
matrix_out_t& C_
)
{
// cublas needs non-const data
matrix_left_t* AA = new matrix_left_t( A_ );
C_.zero();
p.h_a = AA->array;
p.h_b = AA->array;
p.h_c = C_.array;
p.trans_b = CUBLAS_OP_T;
p.ldb = N;
// memory sizes of matrices
size_t mem_size_A = sizeof(float_t) * M * K;
size_t mem_size_B = sizeof(float_t) * K * N;
size_t mem_size_C = sizeof(float_t) * M * N;
// allocate device memory
cudaError_t cuerr = cudaMalloc( (void**) &p.d_a, mem_size_A ); if ( cuerr > 0 ) { printf( "cudaMalloc: cublas error=%d\n", cuerr ); }
cuerr = cudaMalloc( (void**) &p.d_b, mem_size_B ); if ( cuerr > 0 ) { printf( "cudaMalloc: cublas error=%d\n", cuerr ); }
cuerr = cudaMalloc( (void**) &p.d_c, mem_size_C ); if ( cuerr > 0 ) { printf( "cudaMalloc: cublas error=%d\n", cuerr ); }
// copy host memory to device
cuerr = cudaMemcpy( p.d_a, p.h_a, mem_size_A, cudaMemcpyHostToDevice); if ( cuerr > 0 ) { printf( "cudaMemcpy: cublas error=%d\n", cuerr ); }
cuerr = cudaMemcpy( p.d_b, p.h_b, mem_size_B, cudaMemcpyHostToDevice); if ( cuerr > 0 ) { printf( "cudaMemcpy: cublas error=%d\n", cuerr ); }
// call CUBLAS V2
cublas::dgemm_call< float_t >( p );
//std::cout << p << std::endl; //debug
cudaDeviceSynchronize();
// copy result from device to host
cuerr = cudaMemcpy( p.h_c, p.d_c, mem_size_C, cudaMemcpyDeviceToHost); if ( cuerr > 0 ) { printf( "cudaMemcpy: cublas error=%d\n", cuerr ); }
// clean up memory
cudaFree( p.d_a );
cudaFree( p.d_b );
cudaFree( p.d_c );
cudaDeviceReset();
delete AA;
return true;
}
} // namespace vmml
#endif
|
7e4365aabaed3d856d9fe2ac97658d80ec1cdae9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* GPURandomWalker.cpp
*
* Created on: 26 sie 2019
* Author: pkua
*/
#include <stdexcept>
#include <ostream>
#include "GPURandomWalker.h"
#include "utils/Assertions.h"
#include "utils/CudaCheck.h"
#include "simulation/Timer.h"
__global__
void gpu_random_walk(size_t numberOfTrajectories, RandomWalker::WalkParameters walkParameters,
MoveGenerator* moveGenerator, MoveFilter* moveFilter, Tracer *initialTracers, Point **trajectories,
size_t *acceptedSteps)
{
int i = CUDA_THREAD_IDX;
if (i >= numberOfTrajectories)
return;
Tracer tracer = initialTracers[i];
trajectories[i][0] = tracer.getPosition();
acceptedSteps[i] = 0;
Move rescaledDrift = walkParameters.drift * walkParameters.integrationStep;
for (size_t step = 1; step <= walkParameters.numberOfSteps; step++) {
Move move = moveGenerator->generateMove() + rescaledDrift;
if (moveFilter->isMoveValid(tracer, move)) {
tracer += move;
trajectories[i][step] = tracer.getPosition();
acceptedSteps[i]++;
} else {
trajectories[i][step] = tracer.getPosition();
}
}
}
__global__
void setup_move_filter(MoveFilter* moveFilter, float tracerRadius) {
moveFilter->setupForTracerRadius(tracerRadius);
}
__global__
void random_valid_tracer_vector(MoveFilter* moveFilter, Tracer *validTracersVector, size_t numberOfTrajectories) {
int i = CUDA_THREAD_IDX;
if (i >= numberOfTrajectories)
return;
validTracersVector[i] = moveFilter->randomValidTracer();
}
__global__
void delete_objects(MoveGenerator *moveGenerator, MoveFilter *moveFilter) {
if (!CUDA_IS_IT_FIRST_THREAD)
return;
delete moveGenerator;
delete moveFilter;
}
GPURandomWalker::TrajectoriesOnGPU::TrajectoriesOnGPU(std::size_t numberOfTrajectories, std::size_t numberOfSteps) :
numberOfTrajectories{numberOfTrajectories}, numberOfSteps{numberOfSteps},
cpuVectorOfGPUTrajectories(numberOfTrajectories), cpuVectorOfAcceptedSteps(numberOfTrajectories)
{
cudaCheck( hipMalloc(&this->gpuArrayOfGPUTrajectories, this->numberOfTrajectories*sizeof(Point*)) );
cudaCheck( hipMalloc(&this->gpuArrayOfAcceptedSteps, this->numberOfTrajectories*sizeof(size_t)) );
for (std::size_t i = 0; i < this->numberOfTrajectories; i++) {
// Number of steps plus ONE STEP for initial tracer
cudaCheck( hipMalloc(&(this->cpuVectorOfGPUTrajectories[i]),
(this->numberOfSteps + 1) * sizeof(Point)) );
}
cudaCheck( hipMemcpy(this->gpuArrayOfGPUTrajectories, this->cpuVectorOfGPUTrajectories.data(),
this->numberOfTrajectories*sizeof(Point*), hipMemcpyHostToDevice) );
}
GPURandomWalker::TrajectoriesOnGPU::~TrajectoriesOnGPU() {
cudaCheck( hipFree(this->gpuArrayOfGPUTrajectories) );
for (auto gpuTrajectory : cpuVectorOfGPUTrajectories)
cudaCheck( hipFree(gpuTrajectory) );
cudaCheck( hipFree(this->gpuArrayOfAcceptedSteps) );
}
void GPURandomWalker::TrajectoriesOnGPU::copyToCPU(std::vector<Trajectory> &trajectories) {
cudaCheck( hipMemcpy(this->cpuVectorOfAcceptedSteps.data(), this->gpuArrayOfAcceptedSteps,
this->numberOfTrajectories*sizeof(size_t), hipMemcpyDeviceToHost) );
for (std::size_t i = 0; i < this->numberOfTrajectories; i++) {
trajectories[i].copyGPUData(this->cpuVectorOfGPUTrajectories[i], this->numberOfSteps + 1,
this->cpuVectorOfAcceptedSteps[i]);
}
}
GPURandomWalker::GPURandomWalker(std::size_t numberOfTrajectories, RandomWalker::WalkParameters walkParameters,
std::size_t numberOfMoveFilterSetupThreads, MoveGenerator *moveGenerator,
MoveFilter *moveFilter, std::ostream &logger) :
numberOfTrajectories{numberOfTrajectories}, walkParameters{walkParameters},
numberOfMoveFilterSetupThreads{numberOfMoveFilterSetupThreads}, moveGenerator{moveGenerator},
moveFilter{moveFilter}, trajectoriesOnGPU(numberOfTrajectories, walkParameters.numberOfSteps)
{
Expects(numberOfTrajectories > 0);
Expects(walkParameters.numberOfSteps > 0);
this->trajectories.resize(numberOfTrajectories);
this->setupMoveFilterForTracerRadius(logger);
}
GPURandomWalker::~GPURandomWalker() {
hipLaunchKernelGGL(( delete_objects), dim3(1), dim3(32), 0, 0, this->moveGenerator, this->moveFilter);
cudaCheck( hipDeviceSynchronize() );
}
void GPURandomWalker::setupMoveFilterForTracerRadius(std::ostream& logger) {
int numberOfBlocks = (this->numberOfMoveFilterSetupThreads + blockSize - 1)
/ blockSize;
logger << "[GPURandomWalker] Setting up MoveFilter... " << std::flush;
hipLaunchKernelGGL(( setup_move_filter), dim3(numberOfBlocks), dim3(blockSize), 0, 0, this->moveFilter, this->walkParameters.tracerRadius);
cudaCheck(hipDeviceSynchronize());
logger << "completed." << std::endl;
}
void GPURandomWalker::printTimerInfo(const Timer &kernelTimer, const Timer ©Timer, std::ostream &logger) {
auto kernelTimeInMus = kernelTimer.countMicroseconds();
auto copyTimeInMus = copyTimer.countMicroseconds();
auto totalTimeInMus = kernelTimeInMus + copyTimeInMus;
auto onlyKernelSingleTrajectoryTimeInMus = kernelTimeInMus / this->numberOfTrajectories;
auto totalSingleTrajectoryTimeInMus = totalTimeInMus / this->numberOfTrajectories;
logger << "[GPURandomWalker::run] Finished after " << totalTimeInMus << " s, which gives ";
logger << onlyKernelSingleTrajectoryTimeInMus << " s per trajectory on average (";
logger << totalSingleTrajectoryTimeInMus << " s with memory fetch)." << std::endl;
}
void GPURandomWalker::run(std::ostream& logger, const std::vector<Tracer> &initialTracers) {
Tracer *gpuInitialTracers;
cudaCheck( hipMalloc(&gpuInitialTracers, this->numberOfTrajectories*sizeof(Tracer)) );
cudaCheck( hipMemcpy(gpuInitialTracers, initialTracers.data(), this->numberOfTrajectories*sizeof(Tracer),
hipMemcpyHostToDevice) );
logger << "[GPURandomWalker::run] Starting simulation... " << std::flush;
Timer kernelTimer;
kernelTimer.start();
int numberOfBlocks = (numberOfTrajectories + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( gpu_random_walk), dim3(numberOfBlocks), dim3(blockSize), 0, 0, this->numberOfTrajectories, this->walkParameters,
this->moveGenerator, this->moveFilter, gpuInitialTracers,
trajectoriesOnGPU.getTrajectoriesArray(),
trajectoriesOnGPU.getAcceptedStepsArray());
cudaCheck( hipDeviceSynchronize() );
kernelTimer.stop();
logger << "completed." << std::endl;
cudaCheck( hipFree(gpuInitialTracers) );
logger << "[GPURandomWalker::run] Fetching data from video memory... " << std::flush;
Timer copyTimer;
copyTimer.start();
trajectoriesOnGPU.copyToCPU(this->trajectories);
copyTimer.stop();
logger << "completed." << std::endl;
this->printTimerInfo(kernelTimer, copyTimer, logger);
}
std::size_t GPURandomWalker::getNumberOfTrajectories() const {
return this->numberOfTrajectories;
}
std::size_t GPURandomWalker::getNumberOfSteps() const {
return this->walkParameters.numberOfSteps;
}
std::vector<Tracer> GPURandomWalker::getRandomInitialTracersVector() {
std::vector<Tracer> cpuInitialTracers(this->numberOfTrajectories);
Tracer *gpuInitialTracers;
cudaCheck( hipMalloc(&gpuInitialTracers, this->numberOfTrajectories*sizeof(Tracer)) );
int numberOfBlocks = (numberOfTrajectories + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( random_valid_tracer_vector), dim3(numberOfBlocks), dim3(blockSize), 0, 0, this->moveFilter, gpuInitialTracers,
this->numberOfTrajectories);
cudaCheck( hipMemcpy(cpuInitialTracers.data(), gpuInitialTracers, this->numberOfTrajectories*sizeof(Tracer),
hipMemcpyDeviceToHost) );
cudaCheck( hipFree(gpuInitialTracers) );
return cpuInitialTracers;
}
const Trajectory &GPURandomWalker::getTrajectory(std::size_t index) const {
return this->trajectories[index];
}
const std::vector<Trajectory> &GPURandomWalker::getTrajectories() const {
return this->trajectories;
}
| 7e4365aabaed3d856d9fe2ac97658d80ec1cdae9.cu | /*
* GPURandomWalker.cpp
*
* Created on: 26 sie 2019
* Author: pkua
*/
#include <stdexcept>
#include <ostream>
#include "GPURandomWalker.h"
#include "utils/Assertions.h"
#include "utils/CudaCheck.h"
#include "simulation/Timer.h"
__global__
void gpu_random_walk(size_t numberOfTrajectories, RandomWalker::WalkParameters walkParameters,
MoveGenerator* moveGenerator, MoveFilter* moveFilter, Tracer *initialTracers, Point **trajectories,
size_t *acceptedSteps)
{
int i = CUDA_THREAD_IDX;
if (i >= numberOfTrajectories)
return;
Tracer tracer = initialTracers[i];
trajectories[i][0] = tracer.getPosition();
acceptedSteps[i] = 0;
Move rescaledDrift = walkParameters.drift * walkParameters.integrationStep;
for (size_t step = 1; step <= walkParameters.numberOfSteps; step++) {
Move move = moveGenerator->generateMove() + rescaledDrift;
if (moveFilter->isMoveValid(tracer, move)) {
tracer += move;
trajectories[i][step] = tracer.getPosition();
acceptedSteps[i]++;
} else {
trajectories[i][step] = tracer.getPosition();
}
}
}
__global__
void setup_move_filter(MoveFilter* moveFilter, float tracerRadius) {
moveFilter->setupForTracerRadius(tracerRadius);
}
__global__
void random_valid_tracer_vector(MoveFilter* moveFilter, Tracer *validTracersVector, size_t numberOfTrajectories) {
int i = CUDA_THREAD_IDX;
if (i >= numberOfTrajectories)
return;
validTracersVector[i] = moveFilter->randomValidTracer();
}
__global__
void delete_objects(MoveGenerator *moveGenerator, MoveFilter *moveFilter) {
if (!CUDA_IS_IT_FIRST_THREAD)
return;
delete moveGenerator;
delete moveFilter;
}
GPURandomWalker::TrajectoriesOnGPU::TrajectoriesOnGPU(std::size_t numberOfTrajectories, std::size_t numberOfSteps) :
numberOfTrajectories{numberOfTrajectories}, numberOfSteps{numberOfSteps},
cpuVectorOfGPUTrajectories(numberOfTrajectories), cpuVectorOfAcceptedSteps(numberOfTrajectories)
{
cudaCheck( cudaMalloc(&this->gpuArrayOfGPUTrajectories, this->numberOfTrajectories*sizeof(Point*)) );
cudaCheck( cudaMalloc(&this->gpuArrayOfAcceptedSteps, this->numberOfTrajectories*sizeof(size_t)) );
for (std::size_t i = 0; i < this->numberOfTrajectories; i++) {
// Number of steps plus ONE STEP for initial tracer
cudaCheck( cudaMalloc(&(this->cpuVectorOfGPUTrajectories[i]),
(this->numberOfSteps + 1) * sizeof(Point)) );
}
cudaCheck( cudaMemcpy(this->gpuArrayOfGPUTrajectories, this->cpuVectorOfGPUTrajectories.data(),
this->numberOfTrajectories*sizeof(Point*), cudaMemcpyHostToDevice) );
}
GPURandomWalker::TrajectoriesOnGPU::~TrajectoriesOnGPU() {
cudaCheck( cudaFree(this->gpuArrayOfGPUTrajectories) );
for (auto gpuTrajectory : cpuVectorOfGPUTrajectories)
cudaCheck( cudaFree(gpuTrajectory) );
cudaCheck( cudaFree(this->gpuArrayOfAcceptedSteps) );
}
void GPURandomWalker::TrajectoriesOnGPU::copyToCPU(std::vector<Trajectory> &trajectories) {
cudaCheck( cudaMemcpy(this->cpuVectorOfAcceptedSteps.data(), this->gpuArrayOfAcceptedSteps,
this->numberOfTrajectories*sizeof(size_t), cudaMemcpyDeviceToHost) );
for (std::size_t i = 0; i < this->numberOfTrajectories; i++) {
trajectories[i].copyGPUData(this->cpuVectorOfGPUTrajectories[i], this->numberOfSteps + 1,
this->cpuVectorOfAcceptedSteps[i]);
}
}
GPURandomWalker::GPURandomWalker(std::size_t numberOfTrajectories, RandomWalker::WalkParameters walkParameters,
std::size_t numberOfMoveFilterSetupThreads, MoveGenerator *moveGenerator,
MoveFilter *moveFilter, std::ostream &logger) :
numberOfTrajectories{numberOfTrajectories}, walkParameters{walkParameters},
numberOfMoveFilterSetupThreads{numberOfMoveFilterSetupThreads}, moveGenerator{moveGenerator},
moveFilter{moveFilter}, trajectoriesOnGPU(numberOfTrajectories, walkParameters.numberOfSteps)
{
Expects(numberOfTrajectories > 0);
Expects(walkParameters.numberOfSteps > 0);
this->trajectories.resize(numberOfTrajectories);
this->setupMoveFilterForTracerRadius(logger);
}
GPURandomWalker::~GPURandomWalker() {
delete_objects<<<1, 32>>>(this->moveGenerator, this->moveFilter);
cudaCheck( cudaDeviceSynchronize() );
}
void GPURandomWalker::setupMoveFilterForTracerRadius(std::ostream& logger) {
int numberOfBlocks = (this->numberOfMoveFilterSetupThreads + blockSize - 1)
/ blockSize;
logger << "[GPURandomWalker] Setting up MoveFilter... " << std::flush;
setup_move_filter<<<numberOfBlocks, blockSize>>>(this->moveFilter, this->walkParameters.tracerRadius);
cudaCheck(cudaDeviceSynchronize());
logger << "completed." << std::endl;
}
void GPURandomWalker::printTimerInfo(const Timer &kernelTimer, const Timer ©Timer, std::ostream &logger) {
auto kernelTimeInMus = kernelTimer.countMicroseconds();
auto copyTimeInMus = copyTimer.countMicroseconds();
auto totalTimeInMus = kernelTimeInMus + copyTimeInMus;
auto onlyKernelSingleTrajectoryTimeInMus = kernelTimeInMus / this->numberOfTrajectories;
auto totalSingleTrajectoryTimeInMus = totalTimeInMus / this->numberOfTrajectories;
logger << "[GPURandomWalker::run] Finished after " << totalTimeInMus << " μs, which gives ";
logger << onlyKernelSingleTrajectoryTimeInMus << " μs per trajectory on average (";
logger << totalSingleTrajectoryTimeInMus << " μs with memory fetch)." << std::endl;
}
void GPURandomWalker::run(std::ostream& logger, const std::vector<Tracer> &initialTracers) {
Tracer *gpuInitialTracers;
cudaCheck( cudaMalloc(&gpuInitialTracers, this->numberOfTrajectories*sizeof(Tracer)) );
cudaCheck( cudaMemcpy(gpuInitialTracers, initialTracers.data(), this->numberOfTrajectories*sizeof(Tracer),
cudaMemcpyHostToDevice) );
logger << "[GPURandomWalker::run] Starting simulation... " << std::flush;
Timer kernelTimer;
kernelTimer.start();
int numberOfBlocks = (numberOfTrajectories + blockSize - 1) / blockSize;
gpu_random_walk<<<numberOfBlocks, blockSize>>>(this->numberOfTrajectories, this->walkParameters,
this->moveGenerator, this->moveFilter, gpuInitialTracers,
trajectoriesOnGPU.getTrajectoriesArray(),
trajectoriesOnGPU.getAcceptedStepsArray());
cudaCheck( cudaDeviceSynchronize() );
kernelTimer.stop();
logger << "completed." << std::endl;
cudaCheck( cudaFree(gpuInitialTracers) );
logger << "[GPURandomWalker::run] Fetching data from video memory... " << std::flush;
Timer copyTimer;
copyTimer.start();
trajectoriesOnGPU.copyToCPU(this->trajectories);
copyTimer.stop();
logger << "completed." << std::endl;
this->printTimerInfo(kernelTimer, copyTimer, logger);
}
std::size_t GPURandomWalker::getNumberOfTrajectories() const {
return this->numberOfTrajectories;
}
std::size_t GPURandomWalker::getNumberOfSteps() const {
return this->walkParameters.numberOfSteps;
}
std::vector<Tracer> GPURandomWalker::getRandomInitialTracersVector() {
std::vector<Tracer> cpuInitialTracers(this->numberOfTrajectories);
Tracer *gpuInitialTracers;
cudaCheck( cudaMalloc(&gpuInitialTracers, this->numberOfTrajectories*sizeof(Tracer)) );
int numberOfBlocks = (numberOfTrajectories + blockSize - 1) / blockSize;
random_valid_tracer_vector<<<numberOfBlocks, blockSize>>>(this->moveFilter, gpuInitialTracers,
this->numberOfTrajectories);
cudaCheck( cudaMemcpy(cpuInitialTracers.data(), gpuInitialTracers, this->numberOfTrajectories*sizeof(Tracer),
cudaMemcpyDeviceToHost) );
cudaCheck( cudaFree(gpuInitialTracers) );
return cpuInitialTracers;
}
const Trajectory &GPURandomWalker::getTrajectory(std::size_t index) const {
return this->trajectories[index];
}
const std::vector<Trajectory> &GPURandomWalker::getTrajectories() const {
return this->trajectories;
}
|
e998a82876a91a7e8f742fae30cbfa3e0462cf44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <random>
#include <time.h>
#define NUM_PARTICLES 100000000
#define NUM_ITERATIONS 100
#define BLOCK_SIZE 64 //Number of threads
#define RANDOM_C 1000
#define RANDOM_V 10
#define VELOCITY_DEC 0.0001
// A particle
typedef struct {
float3 pos;
float3 velocity;
}Particle;
//Update the velocity of a particle given an particle array and an index
__device__ void updateVelocity(Particle* par, int index) {
par[index].pos.x -= VELOCITY_DEC;
par[index].pos.y -= VELOCITY_DEC;
par[index].pos.z -= VELOCITY_DEC;
}
//Update the position of a particle given an particle array and an index
__device__ void updatePos(Particle* par, int index) {
par[index].pos = make_float3(par[index].pos.x + par[index].velocity.x,
par[index].pos.y + par[index].velocity.y, par[index].pos.z + par[index].velocity.z);
}
//Kernal function
__global__ void particleSim(Particle* par, int len) {
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= len) return;
updateVelocity(par, id);
updatePos(par, id);
}
// Calculate simulation on CPU
void particleCPU(Particle* par, int len) {
for (int i = 0; i < len; i++) {
//update velocity
par[i].pos.x -= VELOCITY_DEC;
par[i].pos.y -= VELOCITY_DEC;
par[i].pos.z -= VELOCITY_DEC;
//update position
par[i].pos = make_float3(par[i].pos.x + par[i].velocity.x,
par[i].pos.y + par[i].velocity.y, par[i].pos.z + par[i].velocity.z);
}
}
// Evaluate if the cpu and gpu solution are the same
bool equivalent(Particle* p_cpu, Particle* p_gpu, int len){
float margin = 0.00001;
for (int i = 0; i < len; i++) {
//printf("X: %f %f, Y: %f %f Z: %f %f \n", p_gpu[i].pos.x, p_cpu[i].pos.x, p_gpu[i].pos.y,p_cpu[i].pos.y , p_gpu[i].pos.z, p_cpu[i].pos.z);
//Check position
if (fabs(p_gpu[i].pos.x - p_cpu[i].pos.x) > margin ||
fabs(p_gpu[i].pos.y - p_cpu[i].pos.y) > margin ||
fabs(p_gpu[i].pos.z - p_cpu[i].pos.z) > margin) {
return false;
}
//Check velocity
if (fabs(p_gpu[i].velocity.x - p_cpu[i].velocity.x) > margin ||
fabs(p_gpu[i].velocity.y - p_cpu[i].velocity.y) > margin ||
fabs(p_gpu[i].velocity.z - p_cpu[i].velocity.z) > margin) {
return false;
}
}
return true;
}
void runSimulation() {
//To ensure number of blocks is rounded up
dim3 numberOfBlocks((NUM_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 numberOfThreads(BLOCK_SIZE);
Particle* particles = (Particle*)malloc(NUM_PARTICLES * sizeof(Particle));
//Store the result from gpu here
Particle* parallel_results; // = (Particle*)malloc(NUM_PARTICLES * sizeof(Particle));
//Pin host memory
if (hipHostMalloc(¶llel_results, sizeof(Particle) * NUM_PARTICLES, hipHostMallocDefault) != hipSuccess) {
printf("Error in hipHostMalloc\n");
exit(-1);
}
//Fill random values particles
srand((unsigned int)time(NULL));
for (int i = 0; i < NUM_PARTICLES; i++) {
particles[i].pos.x = ((float)rand() / (float)RAND_MAX) * RANDOM_C;
particles[i].pos.y = ((float)rand() / (float)RAND_MAX) * RANDOM_C;
particles[i].pos.z = ((float)rand() / (float)RAND_MAX) * RANDOM_C;
particles[i].velocity.x = ((float)rand() / (float)RAND_MAX) * RANDOM_V;
particles[i].velocity.y = ((float)rand() / (float)RAND_MAX) * RANDOM_V;
particles[i].velocity.z = ((float)rand() / (float)RAND_MAX) * RANDOM_V;
parallel_results[i].pos.x = particles[i].pos.x;
parallel_results[i].pos.y = particles[i].pos.y;
parallel_results[i].pos.z = particles[i].pos.z;
parallel_results[i].velocity.x = particles[i].velocity.x;
parallel_results[i].velocity.y = particles[i].velocity.y;
parallel_results[i].velocity.z = particles[i].velocity.z;
}
Particle* particles_parallel;
//Allocate gpu memory
if (hipMalloc(&particles_parallel, sizeof(Particle) * NUM_PARTICLES) != hipSuccess) {
printf("Error in cudamalloc 1 \n");
exit(-1);
}
//Transfer to gpu memory
for (int i = 0; i < NUM_ITERATIONS; i++) {
hipMemcpy(particles_parallel, parallel_results, sizeof(Particle) * NUM_PARTICLES, hipMemcpyHostToDevice);
particleSim << <numberOfBlocks, numberOfThreads >> > (particles_parallel, NUM_PARTICLES);
hipDeviceSynchronize();
hipMemcpy(parallel_results, particles_parallel, sizeof(Particle) * NUM_PARTICLES, hipMemcpyDeviceToHost);
}
//Run simulation on CPU
clock_t start = clock();
for (int i = 0; i < NUM_ITERATIONS; i++) {
//printf("%d\n",i);
particleCPU(particles, NUM_PARTICLES);
}
double time = (double)(clock() - start) / CLOCKS_PER_SEC;
printf("CPU done in %f seconds!\n", time);
bool res = equivalent(particles, parallel_results, NUM_PARTICLES);
// Free memory
hipFree(particles_parallel);
free(particles);
hipHostFree(parallel_results);
if (res) {
printf("Comparing the output for each implementation, Correct!\n");
}
else {
printf("Comparing the output for each implementation, Wrong \n");
}
}
int main() {
runSimulation();
} | e998a82876a91a7e8f742fae30cbfa3e0462cf44.cu | #include <stdio.h>
#include <random>
#include <time.h>
#define NUM_PARTICLES 100000000
#define NUM_ITERATIONS 100
#define BLOCK_SIZE 64 //Number of threads
#define RANDOM_C 1000
#define RANDOM_V 10
#define VELOCITY_DEC 0.0001
// A particle
typedef struct {
float3 pos;
float3 velocity;
}Particle;
//Update the velocity of a particle given an particle array and an index
__device__ void updateVelocity(Particle* par, int index) {
par[index].pos.x -= VELOCITY_DEC;
par[index].pos.y -= VELOCITY_DEC;
par[index].pos.z -= VELOCITY_DEC;
}
//Update the position of a particle given an particle array and an index
__device__ void updatePos(Particle* par, int index) {
par[index].pos = make_float3(par[index].pos.x + par[index].velocity.x,
par[index].pos.y + par[index].velocity.y, par[index].pos.z + par[index].velocity.z);
}
//Kernal function
__global__ void particleSim(Particle* par, int len) {
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= len) return;
updateVelocity(par, id);
updatePos(par, id);
}
// Calculate simulation on CPU
void particleCPU(Particle* par, int len) {
for (int i = 0; i < len; i++) {
//update velocity
par[i].pos.x -= VELOCITY_DEC;
par[i].pos.y -= VELOCITY_DEC;
par[i].pos.z -= VELOCITY_DEC;
//update position
par[i].pos = make_float3(par[i].pos.x + par[i].velocity.x,
par[i].pos.y + par[i].velocity.y, par[i].pos.z + par[i].velocity.z);
}
}
// Evaluate if the cpu and gpu solution are the same
bool equivalent(Particle* p_cpu, Particle* p_gpu, int len){
float margin = 0.00001;
for (int i = 0; i < len; i++) {
//printf("X: %f %f, Y: %f %f Z: %f %f \n", p_gpu[i].pos.x, p_cpu[i].pos.x, p_gpu[i].pos.y,p_cpu[i].pos.y , p_gpu[i].pos.z, p_cpu[i].pos.z);
//Check position
if (fabs(p_gpu[i].pos.x - p_cpu[i].pos.x) > margin ||
fabs(p_gpu[i].pos.y - p_cpu[i].pos.y) > margin ||
fabs(p_gpu[i].pos.z - p_cpu[i].pos.z) > margin) {
return false;
}
//Check velocity
if (fabs(p_gpu[i].velocity.x - p_cpu[i].velocity.x) > margin ||
fabs(p_gpu[i].velocity.y - p_cpu[i].velocity.y) > margin ||
fabs(p_gpu[i].velocity.z - p_cpu[i].velocity.z) > margin) {
return false;
}
}
return true;
}
void runSimulation() {
//To ensure number of blocks is rounded up
dim3 numberOfBlocks((NUM_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 numberOfThreads(BLOCK_SIZE);
Particle* particles = (Particle*)malloc(NUM_PARTICLES * sizeof(Particle));
//Store the result from gpu here
Particle* parallel_results; // = (Particle*)malloc(NUM_PARTICLES * sizeof(Particle));
//Pin host memory
if (cudaHostAlloc(¶llel_results, sizeof(Particle) * NUM_PARTICLES, cudaHostAllocDefault) != cudaSuccess) {
printf("Error in cudaHostAlloc\n");
exit(-1);
}
//Fill random values particles
srand((unsigned int)time(NULL));
for (int i = 0; i < NUM_PARTICLES; i++) {
particles[i].pos.x = ((float)rand() / (float)RAND_MAX) * RANDOM_C;
particles[i].pos.y = ((float)rand() / (float)RAND_MAX) * RANDOM_C;
particles[i].pos.z = ((float)rand() / (float)RAND_MAX) * RANDOM_C;
particles[i].velocity.x = ((float)rand() / (float)RAND_MAX) * RANDOM_V;
particles[i].velocity.y = ((float)rand() / (float)RAND_MAX) * RANDOM_V;
particles[i].velocity.z = ((float)rand() / (float)RAND_MAX) * RANDOM_V;
parallel_results[i].pos.x = particles[i].pos.x;
parallel_results[i].pos.y = particles[i].pos.y;
parallel_results[i].pos.z = particles[i].pos.z;
parallel_results[i].velocity.x = particles[i].velocity.x;
parallel_results[i].velocity.y = particles[i].velocity.y;
parallel_results[i].velocity.z = particles[i].velocity.z;
}
Particle* particles_parallel;
//Allocate gpu memory
if (cudaMalloc(&particles_parallel, sizeof(Particle) * NUM_PARTICLES) != cudaSuccess) {
printf("Error in cudamalloc 1 \n");
exit(-1);
}
//Transfer to gpu memory
for (int i = 0; i < NUM_ITERATIONS; i++) {
cudaMemcpy(particles_parallel, parallel_results, sizeof(Particle) * NUM_PARTICLES, cudaMemcpyHostToDevice);
particleSim << <numberOfBlocks, numberOfThreads >> > (particles_parallel, NUM_PARTICLES);
cudaDeviceSynchronize();
cudaMemcpy(parallel_results, particles_parallel, sizeof(Particle) * NUM_PARTICLES, cudaMemcpyDeviceToHost);
}
//Run simulation on CPU
clock_t start = clock();
for (int i = 0; i < NUM_ITERATIONS; i++) {
//printf("%d\n",i);
particleCPU(particles, NUM_PARTICLES);
}
double time = (double)(clock() - start) / CLOCKS_PER_SEC;
printf("CPU done in %f seconds!\n", time);
bool res = equivalent(particles, parallel_results, NUM_PARTICLES);
// Free memory
cudaFree(particles_parallel);
free(particles);
cudaFreeHost(parallel_results);
if (res) {
printf("Comparing the output for each implementation, Correct!\n");
}
else {
printf("Comparing the output for each implementation, Wrong \n");
}
}
int main() {
runSimulation();
} |
dc16953970756173095021030fd6c99eb79169ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_hypot (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, REAL* c, const int offset_c, const int ld_c) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
c[offset_c + gid_0 + gid_1 * ld_c] =
CAST(hypot)(a[offset_a + gid_0 + gid_1 * ld_a], b[offset_b + gid_0 + gid_1 * ld_b]);
}
} | dc16953970756173095021030fd6c99eb79169ee.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void ge_hypot (const int sd, const int fd, const REAL* a, const int offset_a, const int ld_a, const REAL* b, const int offset_b, const int ld_b, REAL* c, const int offset_c, const int ld_c) {
const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x;
const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y;
const bool valid = (gid_0 < sd) && (gid_1 < fd);
if (valid) {
c[offset_c + gid_0 + gid_1 * ld_c] =
CAST(hypot)(a[offset_a + gid_0 + gid_1 * ld_a], b[offset_b + gid_0 + gid_1 * ld_b]);
}
} |
326a561ebff9a171a0ec4fdabdb5240e125378c5.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <omp.h>
#include <stdlib.h>
#include <rocblas.h>
template <typename T>
struct Task
{
unsigned int size, id;
T *data;
T *result;
T *vector;
Task() : size(0), id(0), data(NULL), result(NULL), vector(NULL) {};
Task(unsigned int s): size(s), id(0), data(NULL), result(NULL), vector(NULL)
{
hipMallocManaged(&data, sizeof(T)*size*size);
hipMallocManaged(&result, sizeof(T)*size);
hipMallocManaged(&vector, sizeof(T)*size);
hipDeviceSynchronize();
}
~Task()
{
hipDeviceSynchronize();
hipFree(data);
hipFree(result);
hipFree(vector);
}
void allocate(const unsigned int s, const unsigned int unique_id)
{
id = unique_id;
size = s;
cudaMallocManged(&data, sizeof(T)*size*size);
cudaMallocManged(&data, sizeof(T)*size);
cudaMallocManged(&data, sizeof(T)*size);
for (int i=0;i<size*size;i++)
{
data[i] = drand48();
}
for (int i=0;i<size;i++)
{
result[i] = 0.;
vector[i] = drand48();
}
}
};
// data is row-major and square
// matrix by vector
template <typename T>
void gemv(int m, int n, T alpha, T *A, T *x, T beta, T *result)
{
for (int i=0; i<n; i++)
{
result[i] *= beta;
for (int j=0; j<n;j++)
{
result[i] += A[i*n+j] * x[j];
}
}
}
template <typename T>
void execute(Task<T> &t, hipblasHandle_t *handle, hipStream_t *stream, int tid)
{
if (t.size < 100)
{
printf("t.id%d,tid.t%d,t.size%d\n", (int)t.id,tid.t,t.size);
hipStreamAttachMemAsync(stream[0], t.data, 0, hipMemAttachHost);
hipStreamAttachMemAsync(stream[0], t.result, 0, hipMemAttachHost);
hipStreamAttachMemAsync(stream[0], t.vector, 0, hipMemAttachHost);
hipStreamSynchronize(stream[0]);
gemv(t.size,t.size, 1.0, t.data, t.vector, 0.0, t.result);
}
else
{
printf("t.id%d,tid.t%d,t.size%d\n",t.id,tid.t,t.size);
double one = 1.0;
double zero = 0.0;
hipblasSetStream(handle[tid+1], stream[tid+1]); // ???
hipStreamAttachMemAsync(stream[tid+1],t.data,0,hipMemAttachSingle);
hipStreamAttachMemAsync(stream[tid+1],t.vector,0,hipMemAttachSingle);
hipStreamAttachMemAsync(stream[tid+1],t.result,0,hipMemAttachSingle);
hipblasDgemv(handle[tid+1], HIPBLAS_OP_N, t.size, t.size, &one, t.data,
t.size, t.vector, 1, &zero, t.result, 1);
}
}
template <typename T>
void initialise_tasks(std::vector< Task<T> > &TaskList)
{
for (unsigned int i=0; i< TaskList.size(); i++)
{
int size;
size = ::max((int)(drand48()*1000.0), 64);
TaskList[i].allocate(size,i);
}
}
int main(void)
{
hipDeviceProp_t device_prop;
hipGetDeviceProperties(&device_prop, 1);
if (device_prop.managedMemory)
{
printf("mangedMemory is true\n");
}
else
{
printf("mangedMemory is false\n");
}
const int nthreads = 4;
omp_set_num_threads(nthreads);
hipStream_t *streams = new hipStream_t[nthreads+1];
hipblasHandle_t *handles = new cublashandle_t[nthreads+1];
for (int i=0; i<nthreads+1;i++)
{
hipStreamCreate(&streams[i]);
hipblasCreate(&handles[i]);
}
unsigned int N = 40;
std::vector< Task<double> > TaskList(N);
initialise_tasks(TaskList);
unsigned int i;
#pragma omp parallel for schedule(dynamic)
for (i=0; i<TaskList.size();i++)
{
hipSetDevice(dev_id);
int tid = omp_get_thread_num();
execute(TaskList[i], handles, streams, tid);
}
hipDeviceSynchronize();
for (int i=0; i<nthreads+1;i++)
{
hipStreamDestroy(streams[i]);
hipblasDestroy(handles[i]);
}
std::vector< Task<double> >().swap(TaskList);
hipDeviceReset();
return 0;
}
| 326a561ebff9a171a0ec4fdabdb5240e125378c5.cu | #include <cstdio>
#include <ctime>
#include <vector>
#include <algorithm>
#include <omp.h>
#include <stdlib.h>
#include <cublas_v2.h>
template <typename T>
struct Task
{
unsigned int size, id;
T *data;
T *result;
T *vector;
Task() : size(0), id(0), data(NULL), result(NULL), vector(NULL) {};
Task(unsigned int s): size(s), id(0), data(NULL), result(NULL), vector(NULL)
{
cudaMallocManaged(&data, sizeof(T)*size*size);
cudaMallocManaged(&result, sizeof(T)*size);
cudaMallocManaged(&vector, sizeof(T)*size);
cudaDeviceSynchronize();
}
~Task()
{
cudaDeviceSynchronize();
cudaFree(data);
cudaFree(result);
cudaFree(vector);
}
void allocate(const unsigned int s, const unsigned int unique_id)
{
id = unique_id;
size = s;
cudaMallocManged(&data, sizeof(T)*size*size);
cudaMallocManged(&data, sizeof(T)*size);
cudaMallocManged(&data, sizeof(T)*size);
for (int i=0;i<size*size;i++)
{
data[i] = drand48();
}
for (int i=0;i<size;i++)
{
result[i] = 0.;
vector[i] = drand48();
}
}
};
// data is row-major and square
// matrix by vector
template <typename T>
void gemv(int m, int n, T alpha, T *A, T *x, T beta, T *result)
{
for (int i=0; i<n; i++)
{
result[i] *= beta;
for (int j=0; j<n;j++)
{
result[i] += A[i*n+j] * x[j];
}
}
}
template <typename T>
void execute(Task<T> &t, cublasHandle_t *handle, cudaStream_t *stream, int tid)
{
if (t.size < 100)
{
printf("t.id%d,tid.t%d,t.size%d\n", (int)t.id,tid.t,t.size);
cudaStreamAttachMemAsync(stream[0], t.data, 0, cudaMemAttachHost);
cudaStreamAttachMemAsync(stream[0], t.result, 0, cudaMemAttachHost);
cudaStreamAttachMemAsync(stream[0], t.vector, 0, cudaMemAttachHost);
cudaStreamSynchronize(stream[0]);
gemv(t.size,t.size, 1.0, t.data, t.vector, 0.0, t.result);
}
else
{
printf("t.id%d,tid.t%d,t.size%d\n",t.id,tid.t,t.size);
double one = 1.0;
double zero = 0.0;
cublasSetStream(handle[tid+1], stream[tid+1]); // ???
cudaStreamAttachMemAsync(stream[tid+1],t.data,0,cudaMemAttachSingle);
cudaStreamAttachMemAsync(stream[tid+1],t.vector,0,cudaMemAttachSingle);
cudaStreamAttachMemAsync(stream[tid+1],t.result,0,cudaMemAttachSingle);
cublasDgemv(handle[tid+1], CUBLAS_OP_N, t.size, t.size, &one, t.data,
t.size, t.vector, 1, &zero, t.result, 1);
}
}
template <typename T>
void initialise_tasks(std::vector< Task<T> > &TaskList)
{
for (unsigned int i=0; i< TaskList.size(); i++)
{
int size;
size = std::max((int)(drand48()*1000.0), 64);
TaskList[i].allocate(size,i);
}
}
int main(void)
{
cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, 1);
if (device_prop.managedMemory)
{
printf("mangedMemory is true\n");
}
else
{
printf("mangedMemory is false\n");
}
const int nthreads = 4;
omp_set_num_threads(nthreads);
cudaStream_t *streams = new cudaStream_t[nthreads+1];
cublasHandle_t *handles = new cublashandle_t[nthreads+1];
for (int i=0; i<nthreads+1;i++)
{
cudaStreamCreate(&streams[i]);
cublasCreate(&handles[i]);
}
unsigned int N = 40;
std::vector< Task<double> > TaskList(N);
initialise_tasks(TaskList);
unsigned int i;
#pragma omp parallel for schedule(dynamic)
for (i=0; i<TaskList.size();i++)
{
cudaSetDevice(dev_id);
int tid = omp_get_thread_num();
execute(TaskList[i], handles, streams, tid);
}
cudaDeviceSynchronize();
for (int i=0; i<nthreads+1;i++)
{
cudaStreamDestroy(streams[i]);
cublasDestroy(handles[i]);
}
std::vector< Task<double> >().swap(TaskList);
cudaDeviceReset();
return 0;
}
|
8421ad17bb8efda8f4e374b54aa03a0b2fd41cd3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*------------------------------------------------------------------------
Python extension for CUDA auxiliary routines used in
voxel-driven scatter modelling (VSM)
author: Pawel Markiewicz
Copyrights: 2020
------------------------------------------------------------------------*/
#include "sctaux.h"
#include <stdlib.h>
//======================================================================
// SCATTER RESULTS PROCESSING
//======================================================================
__constant__ short c_isrng[N_SRNG];
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__global__ void d_sct2sn1(float *scts1, float *srslt, size_t offtof, char *xsxu, short *offseg,
int NBIN) {
// scatter crystal index
char ics = threadIdx.x;
// scatter ring index
char irs = threadIdx.y;
// unscattered crystal index
char icu = blockIdx.x;
// unscattered crystal index
char iru = blockIdx.y;
// number of considered crystals and rings for scatter
char nscrs = gridDim.x;
char nsrng = gridDim.y;
// scatter bin index for one scatter sino/plane
short ssi = nscrs * icu + ics;
bool pos = ((2 * xsxu[ssi] - 1) * (irs - iru)) > 0;
// ring difference index used for addressing the segment offset to obtain sino index in span-1
unsigned short rd = __usad(c_isrng[irs], c_isrng[iru], 0);
unsigned short rdi = (2 * rd - 1 * pos);
unsigned short sni = offseg[rdi] + MIN(c_isrng[irs], c_isrng[iru]);
atomicAdd(scts1 + sni * NBIN + ssi,
srslt[offtof + iru * nscrs * nsrng * nscrs + icu * nsrng * nscrs + irs * nscrs + ics]);
}
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__global__ void d_sct_axinterp(float *sct3d, const float *scts1, const int4 *sctaxR,
const float4 *sctaxW, const short *sn1_sn11, int NBIN, int NSN1,
int SPN, int tof_off) {
// scatter crystal index
char ics = threadIdx.x;
// unscattered crystal index (the 4s are done in the loop below)
char icu = blockIdx.x;
// span-1 sino index
short sni = blockIdx.y;
float tmp = sctaxW[sni].x * scts1[NBIN * sctaxR[sni].x + icu * blockDim.x + ics] +
sctaxW[sni].y * scts1[NBIN * sctaxR[sni].y + icu * blockDim.x + ics] +
sctaxW[sni].z * scts1[NBIN * sctaxR[sni].z + icu * blockDim.x + ics] +
sctaxW[sni].w * scts1[NBIN * sctaxR[sni].w + icu * blockDim.x + ics];
// span-1 or span-11 scatter pre-sinogram interpolation
if (SPN == 1)
sct3d[tof_off + sni * NBIN + icu * blockDim.x + ics] = tmp;
else if (SPN == 11)
if (sni < NSN1)
atomicAdd(sct3d + tof_off + sn1_sn11[sni] * NBIN + icu * blockDim.x + ics, tmp);
}
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//======================================================================
float *srslt2sino(float *d_srslt, char *d_xsxu, scrsDEF d_scrsdef, int *sctaxR, float *sctaxW,
short *offseg, short *isrng, short *sn1_rno, short *sn1_sn11, Cnst Cnt) {
// scatter pre-sino in span-1 (tmporary)
float *d_scts1;
HANDLE_ERROR(
hipMalloc(&d_scts1, Cnt.NSN64 * d_scrsdef.nscrs * d_scrsdef.nscrs * sizeof(float)));
// axially interpolated scatter pre-sino; full span-1 without MRD limit or span-11 with MRD=60
float *d_sct3di;
int tbins = 0;
if (Cnt.SPN == 1) tbins = Cnt.NSN64 * d_scrsdef.nscrs * d_scrsdef.nscrs;
// scatter pre-sino, span-11
else if (Cnt.SPN == 11)
tbins = Cnt.NSN11 * d_scrsdef.nscrs * d_scrsdef.nscrs;
HANDLE_ERROR(hipMalloc(&d_sct3di, Cnt.TOFBINN * tbins * sizeof(float)));
HANDLE_ERROR(hipMemset(d_sct3di, 0, Cnt.TOFBINN * tbins * sizeof(float)));
// number of all scatter estimated values (sevn) for one TOF 3D sino
int sevn = d_scrsdef.nsrng * d_scrsdef.nscrs * d_scrsdef.nsrng * d_scrsdef.nscrs;
//---- constants
int4 *d_sctaxR;
HANDLE_ERROR(hipMalloc(&d_sctaxR, Cnt.NSN64 * sizeof(int4)));
HANDLE_ERROR(hipMemcpy(d_sctaxR, sctaxR, Cnt.NSN64 * sizeof(int4), hipMemcpyHostToDevice));
float4 *d_sctaxW;
HANDLE_ERROR(hipMalloc(&d_sctaxW, Cnt.NSN64 * sizeof(float4)));
HANDLE_ERROR(hipMemcpy(d_sctaxW, sctaxW, Cnt.NSN64 * sizeof(float4), hipMemcpyHostToDevice));
short *d_offseg;
HANDLE_ERROR(hipMalloc(&d_offseg, (Cnt.NSEG0 + 1) * sizeof(short)));
HANDLE_ERROR(
hipMemcpy(d_offseg, offseg, (Cnt.NSEG0 + 1) * sizeof(short), hipMemcpyHostToDevice));
if (N_SRNG != Cnt.NSRNG)
printf("e> Number of scatter rings is different in definitions from Python! "
"<<<<<<<<<<<<<<<<<<< error \n");
//---scatter ring indices to constant memory (GPU)
HANDLE_ERROR(hipMemcpyToSymbol(c_isrng, isrng, Cnt.NSRNG * sizeof(short)));
//---
short2 *d_sn1_rno;
HANDLE_ERROR(hipMalloc(&d_sn1_rno, Cnt.NSN1 * sizeof(short2)));
HANDLE_ERROR(hipMemcpy(d_sn1_rno, sn1_rno, Cnt.NSN1 * sizeof(short2), hipMemcpyHostToDevice));
short *d_sn1_sn11;
HANDLE_ERROR(hipMalloc(&d_sn1_sn11, Cnt.NSN1 * sizeof(short)));
HANDLE_ERROR(hipMemcpy(d_sn1_sn11, sn1_sn11, Cnt.NSN1 * sizeof(short), hipMemcpyHostToDevice));
//----
for (int i = 0; i < Cnt.TOFBINN; i++) {
// offset for given TOF bin
size_t offtof = i * sevn;
// init to zeros
HANDLE_ERROR(
hipMemset(d_scts1, 0, Cnt.NSN64 * d_scrsdef.nscrs * d_scrsdef.nscrs * sizeof(float)));
if (Cnt.LOG <= LOGDEBUG)
printf("d> 3D scatter results into span-1 pre-sino for TOF bin %d...", i);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
dim3 grid(d_scrsdef.nscrs, d_scrsdef.nsrng, 1);
dim3 block(d_scrsdef.nscrs, d_scrsdef.nsrng, 1);
hipLaunchKernelGGL(( d_sct2sn1), dim3(grid), dim3(block), 0, 0, d_scts1, d_srslt, offtof, d_xsxu, d_offseg,
(int)(d_scrsdef.nscrs * d_scrsdef.nscrs));
HANDLE_ERROR(hipGetLastError());
//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
if (Cnt.LOG <= LOGDEBUG) printf("DONE in %fs.\n", 1e-3 * elapsedTime);
if (Cnt.LOG <= LOGDEBUG) printf("d> 3D scatter axial interpolation...");
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
block.x = d_scrsdef.nscrs;
block.y = 1;
block.z = 1;
grid.x = d_scrsdef.nscrs;
grid.y = Cnt.NSN1;
grid.z = 1;
hipLaunchKernelGGL(( d_sct_axinterp), dim3(grid), dim3(block), 0, 0, d_sct3di, d_scts1, d_sctaxR, d_sctaxW, d_sn1_sn11,
(int)(d_scrsdef.nscrs * d_scrsdef.nscrs), Cnt.NSN1, Cnt.SPN,
i * tbins);
HANDLE_ERROR(hipGetLastError());
//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
if (Cnt.LOG <= LOGDEBUG) printf("DONE in %fs.\n", 1e-3 * elapsedTime);
}
hipFree(d_scts1);
return d_sct3di;
// hipFree(d_sct3di);
// return d_scts1;
}
//===================================================================
//------ CREATE MASK BASED ON THRESHOLD (SCATTER EMISSION DATA)------------
iMSK get_imskEm(IMflt imvol, float thrshld, Cnst Cnt) {
// check which device is going to be used
int dev_id;
hipGetDevice(&dev_id);
if (Cnt.LOG <= LOGDEBUG) printf("d> emission data masking using CUDA device #%d\n", dev_id);
iMSK msk;
int nvx = 0;
for (int i = 0; i < (SSE_IMX * SSE_IMY * SSE_IMZ); i++) {
if (imvol.im[i] > thrshld) nvx++;
}
//------------------------------------------------------------------
// create the mask thru indexes
int *d_i2v, *d_v2i;
#ifdef WIN32
int *h_i2v, *h_v2i;
HANDLE_ERROR(hipHostMalloc(&h_i2v, nvx * sizeof(int)));
HANDLE_ERROR(hipHostMalloc(&h_v2i, SSE_IMX * SSE_IMY * SSE_IMZ * sizeof(int)));
HANDLE_ERROR(hipMalloc(&d_i2v, nvx * sizeof(int)));
HANDLE_ERROR(hipMalloc(&d_v2i, SSE_IMX * SSE_IMY * SSE_IMZ * sizeof(int)));
nvx = 0;
for (int i = 0; i < (SSE_IMX * SSE_IMY * SSE_IMZ); i++) {
// if not in the mask then set to -1
h_v2i[i] = 0;
// image-based TFOV
if (imvol.im[i] > thrshld) {
h_i2v[nvx] = i;
h_v2i[i] = nvx;
nvx++;
}
}
HANDLE_ERROR(hipMemcpy(d_i2v, h_i2v, nvx * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(
hipMemcpy(d_v2i, h_v2i, SSE_IMX * SSE_IMY * SSE_IMZ * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipHostFree(h_i2v));
HANDLE_ERROR(hipHostFree(h_v2i));
#else
// printf(">>>>> NVX:%d, THRESHOLD:%f\n", nvx, thrshld);
HANDLE_ERROR(hipMallocManaged(&d_i2v, nvx * sizeof(int)));
HANDLE_ERROR(hipMallocManaged(&d_v2i, SSE_IMX * SSE_IMY * SSE_IMZ * sizeof(int)));
nvx = 0;
for (int i = 0; i < (SSE_IMX * SSE_IMY * SSE_IMZ); i++) {
// if not in the mask then set to -1
d_v2i[i] = 0;
// image-based TFOV
if (imvol.im[i] > thrshld) {
d_i2v[nvx] = i;
d_v2i[i] = nvx;
nvx++;
}
}
#endif
if (Cnt.LOG <= LOGDEBUG)
printf("d> number of voxel values greater than %3.2f is %d out of %d (ratio: %3.2f)\n",
thrshld, nvx, SSE_IMX * SSE_IMY * SSE_IMZ, nvx / (float)(SSE_IMX * SSE_IMY * SSE_IMZ));
msk.nvx = nvx;
msk.i2v = d_i2v;
msk.v2i = d_v2i;
return msk;
}
//===================================================================
//===================================================================
//----------- CREATE MASK BASED ON MASK PROVIDED ----------------
iMSK get_imskMu(IMflt imvol, char *msk, Cnst Cnt) {
// check which device is going to be used
int dev_id;
hipGetDevice(&dev_id);
if (Cnt.LOG <= LOGDEBUG) printf("d> masking using CUDA device #%d\n", dev_id);
int nvx = 0;
for (int i = 0; i < (SS_IMX * SS_IMY * SS_IMZ); i++) {
if (msk[i] > 0) nvx++;
}
//------------------------------------------------------------------
// create the mask thru indecies
int *d_i2v, *d_v2i;
#ifdef WIN32
int *h_i2v, *h_v2i;
HANDLE_ERROR(hipHostMalloc(&h_i2v, nvx * sizeof(int)));
HANDLE_ERROR(hipHostMalloc(&h_v2i, SS_IMX * SS_IMY * SS_IMZ * sizeof(int)));
HANDLE_ERROR(hipMalloc(&d_i2v, nvx * sizeof(int)));
HANDLE_ERROR(hipMalloc(&d_v2i, SS_IMX * SS_IMY * SS_IMZ * sizeof(int)));
nvx = 0;
for (int i = 0; i < (SS_IMX * SS_IMY * SS_IMZ); i++) {
// if not in the mask then set to -1
h_v2i[i] = -1;
// image-based TFOV
if (msk[i] > 0) {
h_i2v[nvx] = i;
h_v2i[i] = nvx;
nvx++;
}
}
HANDLE_ERROR(hipMemcpy(d_i2v, h_i2v, nvx * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(
hipMemcpy(d_v2i, h_v2i, SS_IMX * SS_IMY * SS_IMZ * sizeof(int), hipMemcpyHostToDevice));
HANDLE_ERROR(hipHostFree(h_i2v));
HANDLE_ERROR(hipHostFree(h_v2i));
#else
HANDLE_ERROR(hipMallocManaged(&d_i2v, nvx * sizeof(int)));
HANDLE_ERROR(hipMallocManaged(&d_v2i, SS_IMX * SS_IMY * SS_IMZ * sizeof(int)));
nvx = 0;
for (int i = 0; i < (SS_IMX * SS_IMY * SS_IMZ); i++) {
// if not in the mask then set to -1
d_v2i[i] = -1;
// image-based TFOV
if (msk[i] > 0) {
d_i2v[nvx] = i;
d_v2i[i] = nvx;
nvx++;
}
}
#endif
if (Cnt.LOG <= LOGDEBUG)
printf("d> number of voxels within the mu-mask is %d out of %d (ratio: %3.2f)\n", nvx,
SS_IMX * SS_IMY * SS_IMZ, nvx / (float)(SS_IMX * SS_IMY * SS_IMZ));
iMSK mlut;
mlut.nvx = nvx;
mlut.i2v = d_i2v;
mlut.v2i = d_v2i;
return mlut;
}
| 8421ad17bb8efda8f4e374b54aa03a0b2fd41cd3.cu | /*------------------------------------------------------------------------
Python extension for CUDA auxiliary routines used in
voxel-driven scatter modelling (VSM)
author: Pawel Markiewicz
Copyrights: 2020
------------------------------------------------------------------------*/
#include "sctaux.h"
#include <stdlib.h>
//======================================================================
// SCATTER RESULTS PROCESSING
//======================================================================
__constant__ short c_isrng[N_SRNG];
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__global__ void d_sct2sn1(float *scts1, float *srslt, size_t offtof, char *xsxu, short *offseg,
int NBIN) {
// scatter crystal index
char ics = threadIdx.x;
// scatter ring index
char irs = threadIdx.y;
// unscattered crystal index
char icu = blockIdx.x;
// unscattered crystal index
char iru = blockIdx.y;
// number of considered crystals and rings for scatter
char nscrs = gridDim.x;
char nsrng = gridDim.y;
// scatter bin index for one scatter sino/plane
short ssi = nscrs * icu + ics;
bool pos = ((2 * xsxu[ssi] - 1) * (irs - iru)) > 0;
// ring difference index used for addressing the segment offset to obtain sino index in span-1
unsigned short rd = __usad(c_isrng[irs], c_isrng[iru], 0);
unsigned short rdi = (2 * rd - 1 * pos);
unsigned short sni = offseg[rdi] + MIN(c_isrng[irs], c_isrng[iru]);
atomicAdd(scts1 + sni * NBIN + ssi,
srslt[offtof + iru * nscrs * nsrng * nscrs + icu * nsrng * nscrs + irs * nscrs + ics]);
}
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
__global__ void d_sct_axinterp(float *sct3d, const float *scts1, const int4 *sctaxR,
const float4 *sctaxW, const short *sn1_sn11, int NBIN, int NSN1,
int SPN, int tof_off) {
// scatter crystal index
char ics = threadIdx.x;
// unscattered crystal index (the 4s are done in the loop below)
char icu = blockIdx.x;
// span-1 sino index
short sni = blockIdx.y;
float tmp = sctaxW[sni].x * scts1[NBIN * sctaxR[sni].x + icu * blockDim.x + ics] +
sctaxW[sni].y * scts1[NBIN * sctaxR[sni].y + icu * blockDim.x + ics] +
sctaxW[sni].z * scts1[NBIN * sctaxR[sni].z + icu * blockDim.x + ics] +
sctaxW[sni].w * scts1[NBIN * sctaxR[sni].w + icu * blockDim.x + ics];
// span-1 or span-11 scatter pre-sinogram interpolation
if (SPN == 1)
sct3d[tof_off + sni * NBIN + icu * blockDim.x + ics] = tmp;
else if (SPN == 11)
if (sni < NSN1)
atomicAdd(sct3d + tof_off + sn1_sn11[sni] * NBIN + icu * blockDim.x + ics, tmp);
}
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//======================================================================
float *srslt2sino(float *d_srslt, char *d_xsxu, scrsDEF d_scrsdef, int *sctaxR, float *sctaxW,
short *offseg, short *isrng, short *sn1_rno, short *sn1_sn11, Cnst Cnt) {
// scatter pre-sino in span-1 (tmporary)
float *d_scts1;
HANDLE_ERROR(
cudaMalloc(&d_scts1, Cnt.NSN64 * d_scrsdef.nscrs * d_scrsdef.nscrs * sizeof(float)));
// axially interpolated scatter pre-sino; full span-1 without MRD limit or span-11 with MRD=60
float *d_sct3di;
int tbins = 0;
if (Cnt.SPN == 1) tbins = Cnt.NSN64 * d_scrsdef.nscrs * d_scrsdef.nscrs;
// scatter pre-sino, span-11
else if (Cnt.SPN == 11)
tbins = Cnt.NSN11 * d_scrsdef.nscrs * d_scrsdef.nscrs;
HANDLE_ERROR(cudaMalloc(&d_sct3di, Cnt.TOFBINN * tbins * sizeof(float)));
HANDLE_ERROR(cudaMemset(d_sct3di, 0, Cnt.TOFBINN * tbins * sizeof(float)));
// number of all scatter estimated values (sevn) for one TOF 3D sino
int sevn = d_scrsdef.nsrng * d_scrsdef.nscrs * d_scrsdef.nsrng * d_scrsdef.nscrs;
//---- constants
int4 *d_sctaxR;
HANDLE_ERROR(cudaMalloc(&d_sctaxR, Cnt.NSN64 * sizeof(int4)));
HANDLE_ERROR(cudaMemcpy(d_sctaxR, sctaxR, Cnt.NSN64 * sizeof(int4), cudaMemcpyHostToDevice));
float4 *d_sctaxW;
HANDLE_ERROR(cudaMalloc(&d_sctaxW, Cnt.NSN64 * sizeof(float4)));
HANDLE_ERROR(cudaMemcpy(d_sctaxW, sctaxW, Cnt.NSN64 * sizeof(float4), cudaMemcpyHostToDevice));
short *d_offseg;
HANDLE_ERROR(cudaMalloc(&d_offseg, (Cnt.NSEG0 + 1) * sizeof(short)));
HANDLE_ERROR(
cudaMemcpy(d_offseg, offseg, (Cnt.NSEG0 + 1) * sizeof(short), cudaMemcpyHostToDevice));
if (N_SRNG != Cnt.NSRNG)
printf("e> Number of scatter rings is different in definitions from Python! "
"<<<<<<<<<<<<<<<<<<< error \n");
//---scatter ring indices to constant memory (GPU)
HANDLE_ERROR(cudaMemcpyToSymbol(c_isrng, isrng, Cnt.NSRNG * sizeof(short)));
//---
short2 *d_sn1_rno;
HANDLE_ERROR(cudaMalloc(&d_sn1_rno, Cnt.NSN1 * sizeof(short2)));
HANDLE_ERROR(cudaMemcpy(d_sn1_rno, sn1_rno, Cnt.NSN1 * sizeof(short2), cudaMemcpyHostToDevice));
short *d_sn1_sn11;
HANDLE_ERROR(cudaMalloc(&d_sn1_sn11, Cnt.NSN1 * sizeof(short)));
HANDLE_ERROR(cudaMemcpy(d_sn1_sn11, sn1_sn11, Cnt.NSN1 * sizeof(short), cudaMemcpyHostToDevice));
//----
for (int i = 0; i < Cnt.TOFBINN; i++) {
// offset for given TOF bin
size_t offtof = i * sevn;
// init to zeros
HANDLE_ERROR(
cudaMemset(d_scts1, 0, Cnt.NSN64 * d_scrsdef.nscrs * d_scrsdef.nscrs * sizeof(float)));
if (Cnt.LOG <= LOGDEBUG)
printf("d> 3D scatter results into span-1 pre-sino for TOF bin %d...", i);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
dim3 grid(d_scrsdef.nscrs, d_scrsdef.nsrng, 1);
dim3 block(d_scrsdef.nscrs, d_scrsdef.nsrng, 1);
d_sct2sn1<<<grid, block>>>(d_scts1, d_srslt, offtof, d_xsxu, d_offseg,
(int)(d_scrsdef.nscrs * d_scrsdef.nscrs));
HANDLE_ERROR(cudaGetLastError());
//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
if (Cnt.LOG <= LOGDEBUG) printf("DONE in %fs.\n", 1e-3 * elapsedTime);
if (Cnt.LOG <= LOGDEBUG) printf("d> 3D scatter axial interpolation...");
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
block.x = d_scrsdef.nscrs;
block.y = 1;
block.z = 1;
grid.x = d_scrsdef.nscrs;
grid.y = Cnt.NSN1;
grid.z = 1;
d_sct_axinterp<<<grid, block>>>(d_sct3di, d_scts1, d_sctaxR, d_sctaxW, d_sn1_sn11,
(int)(d_scrsdef.nscrs * d_scrsdef.nscrs), Cnt.NSN1, Cnt.SPN,
i * tbins);
HANDLE_ERROR(cudaGetLastError());
//<><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
if (Cnt.LOG <= LOGDEBUG) printf("DONE in %fs.\n", 1e-3 * elapsedTime);
}
cudaFree(d_scts1);
return d_sct3di;
// cudaFree(d_sct3di);
// return d_scts1;
}
//===================================================================
//------ CREATE MASK BASED ON THRESHOLD (SCATTER EMISSION DATA)------------
iMSK get_imskEm(IMflt imvol, float thrshld, Cnst Cnt) {
// check which device is going to be used
int dev_id;
cudaGetDevice(&dev_id);
if (Cnt.LOG <= LOGDEBUG) printf("d> emission data masking using CUDA device #%d\n", dev_id);
iMSK msk;
int nvx = 0;
for (int i = 0; i < (SSE_IMX * SSE_IMY * SSE_IMZ); i++) {
if (imvol.im[i] > thrshld) nvx++;
}
//------------------------------------------------------------------
// create the mask thru indexes
int *d_i2v, *d_v2i;
#ifdef WIN32
int *h_i2v, *h_v2i;
HANDLE_ERROR(cudaMallocHost(&h_i2v, nvx * sizeof(int)));
HANDLE_ERROR(cudaMallocHost(&h_v2i, SSE_IMX * SSE_IMY * SSE_IMZ * sizeof(int)));
HANDLE_ERROR(cudaMalloc(&d_i2v, nvx * sizeof(int)));
HANDLE_ERROR(cudaMalloc(&d_v2i, SSE_IMX * SSE_IMY * SSE_IMZ * sizeof(int)));
nvx = 0;
for (int i = 0; i < (SSE_IMX * SSE_IMY * SSE_IMZ); i++) {
// if not in the mask then set to -1
h_v2i[i] = 0;
// image-based TFOV
if (imvol.im[i] > thrshld) {
h_i2v[nvx] = i;
h_v2i[i] = nvx;
nvx++;
}
}
HANDLE_ERROR(cudaMemcpy(d_i2v, h_i2v, nvx * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(
cudaMemcpy(d_v2i, h_v2i, SSE_IMX * SSE_IMY * SSE_IMZ * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaFreeHost(h_i2v));
HANDLE_ERROR(cudaFreeHost(h_v2i));
#else
// printf(">>>>> NVX:%d, THRESHOLD:%f\n", nvx, thrshld);
HANDLE_ERROR(cudaMallocManaged(&d_i2v, nvx * sizeof(int)));
HANDLE_ERROR(cudaMallocManaged(&d_v2i, SSE_IMX * SSE_IMY * SSE_IMZ * sizeof(int)));
nvx = 0;
for (int i = 0; i < (SSE_IMX * SSE_IMY * SSE_IMZ); i++) {
// if not in the mask then set to -1
d_v2i[i] = 0;
// image-based TFOV
if (imvol.im[i] > thrshld) {
d_i2v[nvx] = i;
d_v2i[i] = nvx;
nvx++;
}
}
#endif
if (Cnt.LOG <= LOGDEBUG)
printf("d> number of voxel values greater than %3.2f is %d out of %d (ratio: %3.2f)\n",
thrshld, nvx, SSE_IMX * SSE_IMY * SSE_IMZ, nvx / (float)(SSE_IMX * SSE_IMY * SSE_IMZ));
msk.nvx = nvx;
msk.i2v = d_i2v;
msk.v2i = d_v2i;
return msk;
}
//===================================================================
//===================================================================
//----------- CREATE MASK BASED ON MASK PROVIDED ----------------
iMSK get_imskMu(IMflt imvol, char *msk, Cnst Cnt) {
// check which device is going to be used
int dev_id;
cudaGetDevice(&dev_id);
if (Cnt.LOG <= LOGDEBUG) printf("d> masking using CUDA device #%d\n", dev_id);
int nvx = 0;
for (int i = 0; i < (SS_IMX * SS_IMY * SS_IMZ); i++) {
if (msk[i] > 0) nvx++;
}
//------------------------------------------------------------------
// create the mask thru indecies
int *d_i2v, *d_v2i;
#ifdef WIN32
int *h_i2v, *h_v2i;
HANDLE_ERROR(cudaMallocHost(&h_i2v, nvx * sizeof(int)));
HANDLE_ERROR(cudaMallocHost(&h_v2i, SS_IMX * SS_IMY * SS_IMZ * sizeof(int)));
HANDLE_ERROR(cudaMalloc(&d_i2v, nvx * sizeof(int)));
HANDLE_ERROR(cudaMalloc(&d_v2i, SS_IMX * SS_IMY * SS_IMZ * sizeof(int)));
nvx = 0;
for (int i = 0; i < (SS_IMX * SS_IMY * SS_IMZ); i++) {
// if not in the mask then set to -1
h_v2i[i] = -1;
// image-based TFOV
if (msk[i] > 0) {
h_i2v[nvx] = i;
h_v2i[i] = nvx;
nvx++;
}
}
HANDLE_ERROR(cudaMemcpy(d_i2v, h_i2v, nvx * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(
cudaMemcpy(d_v2i, h_v2i, SS_IMX * SS_IMY * SS_IMZ * sizeof(int), cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaFreeHost(h_i2v));
HANDLE_ERROR(cudaFreeHost(h_v2i));
#else
HANDLE_ERROR(cudaMallocManaged(&d_i2v, nvx * sizeof(int)));
HANDLE_ERROR(cudaMallocManaged(&d_v2i, SS_IMX * SS_IMY * SS_IMZ * sizeof(int)));
nvx = 0;
for (int i = 0; i < (SS_IMX * SS_IMY * SS_IMZ); i++) {
// if not in the mask then set to -1
d_v2i[i] = -1;
// image-based TFOV
if (msk[i] > 0) {
d_i2v[nvx] = i;
d_v2i[i] = nvx;
nvx++;
}
}
#endif
if (Cnt.LOG <= LOGDEBUG)
printf("d> number of voxels within the mu-mask is %d out of %d (ratio: %3.2f)\n", nvx,
SS_IMX * SS_IMY * SS_IMZ, nvx / (float)(SS_IMX * SS_IMY * SS_IMZ));
iMSK mlut;
mlut.nvx = nvx;
mlut.i2v = d_i2v;
mlut.v2i = d_v2i;
return mlut;
}
|
f92c66d6107f7e3f791aaa606d2cc0570d524f32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define NOISE_PARAMETER 0.01
#define GET_RAND ((double)rand()/(10.0 * (double)RAND_MAX)) - 0.05
__device__ struct XY {
double x;
double y;
} XY;
__device__ double d_f_pred;
__device__ int n;
__device__ double sum;
__device__ int count;
// Code to get the number of cores in a SM
int _ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct
{
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{
{ 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class
{ 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class
{ 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class
{ 0x32, 192}, // Kepler Generation (SM 3.2) GK10x class
{ 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class
{ 0x37, 192}, // Kepler Generation (SM 3.7) GK21x class
{ 0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class
{ 0x52, 128}, // Maxwell Generation (SM 5.2) GM20x class
{ 0x53, 128}, // Maxwell Generation (SM 5.3) GM20x class
{ 0x60, 64 }, // Pascal Generation (SM 6.0) GP100 class
{ 0x61, 128}, // Pascal Generation (SM 6.1) GP10x class
{ 0x62, 128}, // Pascal Generation (SM 6.2) GP10x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1)
{
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor))
{
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index-1].Cores);
return nGpuArchCoresPerSM[index-1].Cores;
}
__device__ void get_total_sum(double *partial_sum, int dummy) {
if(threadIdx.x == 0) {
count = dummy;
if(count %2 != 0) {
count++;
partial_sum[count-1] = 0;
}
}
__syncthreads();
for(int i = count/2; i > 0; i = i/2) {
if(threadIdx.x < i)
partial_sum[threadIdx.x] += partial_sum[threadIdx.x + i];
__syncthreads();
if(threadIdx.x == 0) {
if(i%2 != 0 && i != 1) {
partial_sum[0] += partial_sum[--i];
}
}
__syncthreads();
}
__syncthreads();
return;
}
//-------------------------------------------------------------------------------
// Kernel function to compute the K' matrix
__global__ void gpr_get_K(int N, int m, double *K, struct XY *xy)
{
// xy[r] store the x and y coordinates of the rth point
n = m * m;
double d[2];
// Allocate and initialze K
for(int i = threadIdx.x; i < n; i += N) {
for(int j = 0; j < n; j++) {
d[0] = pow(xy[i].x - xy[j].x, 2);
d[1] = pow(xy[i].y - xy[j].y, 2);
if(i == j)
K[ i*n + j] = exp(-1 * (d[0] + d[1])) + 0.01;
else
K[ i*n + j] = exp(-1 * (d[0] + d[1]));
}
}
}
// Kernel function to calculate the cholesfy factors
__global__ void gpr_cholesky(int N, double *K, double *L) {
// LU factorization
extern __shared__ double partial_sum[];
for(int k = 0; k < n; k++) {
partial_sum[threadIdx.x] = 0;
for(int j = threadIdx.x; j < k; j += N) {
partial_sum[threadIdx.x] = partial_sum[threadIdx.x] + (L[j * n + k] * L[j * n +k]);
}
__syncthreads();
get_total_sum(partial_sum, (N<k)?N:k);
if(threadIdx.x == 0) {
L[k * n + k] = sqrt(K[k * n + k] - partial_sum[0]);
}
__syncthreads();
for(int i = k + threadIdx.x + 1; i < n; i+=N) { //Removing zeroing
partial_sum[threadIdx.x] = 0;
for(int j = 0; j < k; j++) {
partial_sum[threadIdx.x] = partial_sum[threadIdx.x] + L[j * n + i] * L[j * n + k];
}
L[k * n + i] = (K[k * n + i] - partial_sum[threadIdx.x]) / L[k * n + k];
}
__syncthreads();
}
}
// Kernel code to solve for z
__global__ void gpr_solver(int N, double *Y, double *z, double *L, double *f)
{
extern __shared__ double partial_sum[];
// Solving K'z = f => LUz = F => LY = F
// Solving for Y
for(int i = 0; i < n; i++) {
partial_sum[threadIdx.x] = 0;
for(int j = threadIdx.x; j < i; j += N) {
partial_sum[threadIdx.x] += (L[j * n + i] * Y[j]);
}
__syncthreads();
get_total_sum(partial_sum, (N<i)?N:i);
if(threadIdx.x == 0) {
Y[i] = (f[i] - partial_sum[0]) / L[i * n + i];
}
__syncthreads();
}
__syncthreads();
// Solving for z
for(int i = n-1; i >= 0; i--) {
partial_sum[threadIdx.x] = 0;
for(int j = n-1-threadIdx.x; j > i; j -= N) {
partial_sum[threadIdx.x] += (L[i * n + j] * z[j]); // U component is nothing but L'
}
__syncthreads();
get_total_sum(partial_sum, (N < (n - 1 - i))?N:(n-1-i));
if(threadIdx.x == 0) {
z[i] = (Y[i] - partial_sum[0]) / L[i * n + i];
}
__syncthreads();
}
return;
}
//Kernel code to run the final prediction
__global__ void gpr_predict(int N, int m, double a, double b, double *k, double *z, struct XY *xy)
{
// Computing the f(predicted) value at rstar
double rstar[2] = {a, b};
extern __shared__ double partial_sum[];
// Initializing k
double d[2];
for(int i = threadIdx.x; i < n; i += N) {
d[0] = pow(xy[i].x - rstar[0], 2);
d[1] = pow(xy[i].y - rstar[1], 2);
k[i] = exp(-1 * (d[0] + d[1]));
}
partial_sum[threadIdx.x] = 0.0;
for(int i = threadIdx.x; i < n; i += N) {
partial_sum[threadIdx.x] += k[i] * z[i];
}
__syncthreads();
get_total_sum(partial_sum, (N<n)?N:n);
if(threadIdx.x == 0) {
d_f_pred = partial_sum[0];
}
return;
}
//Main function to take in the parameters and call the GPU kernel to calculate the predicted values
int main(int argc, char* argv[]) {
int m;
int num_threads;
double rstar[2];
if(argc != 4) {
printf("Aborting! Invalid number of input arguements. Please execute the binary as ./a.out m xstar ystar\n");
return 0;
} else {
m = atoi(argv[1]);
rstar[0] = atof(argv[2]);
rstar[1] = atof(argv[3]);
printf("Selected m value : %d \n", m);
printf("The required Rstar value : %f, %f\n", rstar[0], rstar[1]);
}
/* Validate the input parameters */
if(rstar[0] < 0 || rstar[0] >= m || rstar[1] < 0 || rstar[1] >= m ) {
printf("Aborting! Rstar selected out of Bound! \n");
return 0;
}
//Get the cores in a SM
int deviceCount;
hipGetDeviceCount(&deviceCount);
int device;
for ( device = 0; device < deviceCount; ++device ) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
int temp = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
if(temp > num_threads)
num_threads = temp;
}
//num_threads = 1;
printf("Input: N = %d, threads_per_block = %d\n", num_threads, num_threads);
double *f, *k, *Y, *z, *K, *L;
struct XY *xy;
//Allocating data structures for GPU
hipMalloc(&f, (m * m * sizeof(double)));
hipMalloc(&k, (m * m * sizeof(double)));
hipMalloc(&Y, (m * m * sizeof(double)));
hipMalloc(&z, (m * m * sizeof(double)));
hipMalloc(&xy, (m * m * sizeof(struct XY)));
int n = m*m;
hipMalloc(&K, (n * n * sizeof(double)));
hipMalloc(&L, (n * n * sizeof(double)));
// Initializing the grid and f
// xy[r] store the x and y coordinates of the rth point
n = m * m;
struct XY *h_xy = (struct XY *) malloc( n * sizeof(struct XY));
double h = 1.0 / (double)(m + 1);
int idx = 0;
for(int i = 0; i < m; i++) {
for(int j = 0; j < m; j++) {
h_xy[idx].x = (i + 1) * h;
h_xy[idx].y = (j +1) * h;
idx++;
}
}
//Exporting to the GPU
hipMemcpy(xy, h_xy, n*sizeof(struct XY), hipMemcpyHostToDevice);
// Allocate and initialize observed data vector f
double* h_f = (double*) malloc(n * sizeof(double));
for(idx = 0; idx < n; idx++) {
h_f[idx] = 1 - (((h_xy[idx].x - 0.5) * (h_xy[idx].x - 0.5)) +
((h_xy[idx].y - 0.5) * (h_xy[idx].y - 0.5))) + GET_RAND;
}
// Exporting to GPU
hipMemcpy(f, h_f, n*sizeof(double), hipMemcpyHostToDevice);
// Initialize timing events
hipEvent_t start_kernel, stop_kernel, start_cholesky, stop_cholesky, start_solver, stop_solver;
hipEventCreate(&start_kernel);
hipEventCreate(&stop_kernel);
hipEventCreate(&start_cholesky);
hipEventCreate(&stop_cholesky);
hipEventCreate(&start_solver);
hipEventCreate(&stop_solver);
//Connfiguring the shared memory banks for double precision
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
// Record timing event - start
hipEventRecord(start_kernel, 0);
hipLaunchKernelGGL(( gpr_get_K), dim3(1),dim3(num_threads), 0, 0, num_threads, m, K, xy);
hipEventRecord(start_cholesky, 0);
hipLaunchKernelGGL(( gpr_cholesky), dim3(1),dim3(num_threads), num_threads * sizeof(double), 0, num_threads, K, L);
hipEventRecord(stop_cholesky, 0);
hipEventSynchronize(stop_cholesky);
hipEventRecord(start_solver, 0);
hipLaunchKernelGGL(( gpr_solver), dim3(1),dim3(num_threads), num_threads * sizeof(double), 0, num_threads, Y, z, L, f);
hipEventRecord(stop_solver, 0);
hipEventSynchronize(stop_solver);
hipLaunchKernelGGL(( gpr_predict), dim3(1),dim3(num_threads), num_threads * sizeof(double), 0, num_threads, m, rstar[0], rstar[1], k, z, xy);
// Record timing event - stop
hipEventRecord(stop_kernel, 0);
hipEventSynchronize(stop_kernel);
typeof(d_f_pred) f_pred;
hipMemcpyFromSymbol(&f_pred, d_f_pred, sizeof(d_f_pred), 0, hipMemcpyDeviceToHost);
printf("The predicted value of f at r_star : %f\n", f_pred);
// Compute elapsed time
float elapsedTime_cholesky;
hipEventElapsedTime(&elapsedTime_cholesky, start_cholesky, stop_cholesky);
printf("Elapsed time: Cholesky = %f ms\n", elapsedTime_cholesky);
float elapsedTime_solver;
hipEventElapsedTime(&elapsedTime_solver, start_solver, stop_solver);
printf("Elapsed time: Solver = %f ms\n", elapsedTime_solver);
float elapsedTime_kernel;
hipEventElapsedTime(&elapsedTime_kernel, start_kernel, stop_kernel);
printf("Elapsed time: Kernel = %f ms\n", elapsedTime_kernel);
long flops_cholesky = 0;
long flops_solver = 0;
for(int i = 0; i < n; i++) {
flops_solver += (2*i + num_threads + 2);
}
flops_solver *= 2;
for(int i = 0; i < n; i++) {
flops_cholesky += (2 * i + num_threads + 2) * (n - i);
}
printf("Floating point operations Cholesky Factorization: %ld\n", flops_cholesky); //Update needed
printf("Floating point operations per second (FLOPS) Cholesky : %f Gflops\n", (flops_cholesky)/(elapsedTime_cholesky/1000.0)/(1024.0*1024*1024)); //Update Needed
printf("Floating point operations Solver: %ld\n", flops_solver); //Update needed
printf("Floating point operations per second (FLOPS) Solver: %f Gflops\n", (flops_solver)/(elapsedTime_solver/1000.0)/(1024.0*1024*1024)); //Update Needed
//for(int i = 0; i < m*m; i++)
// printf("%f \n", h_f[i]);
hipFree(f);
hipFree(k);
hipFree(Y);
hipFree(z);
hipFree(xy);
hipFree(K);
hipFree(L);
free(h_xy);
free(h_f);
// Delete timing events
hipEventDestroy(start_kernel);
hipEventDestroy(stop_kernel);
hipEventDestroy(start_cholesky);
hipEventDestroy(stop_cholesky);
hipEventDestroy(start_solver);
hipEventDestroy(stop_solver);
}
| f92c66d6107f7e3f791aaa606d2cc0570d524f32.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define NOISE_PARAMETER 0.01
#define GET_RAND ((double)rand()/(10.0 * (double)RAND_MAX)) - 0.05
__device__ struct XY {
double x;
double y;
} XY;
__device__ double d_f_pred;
__device__ int n;
__device__ double sum;
__device__ int count;
// Code to get the number of cores in a SM
int _ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct
{
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{
{ 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class
{ 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class
{ 0x30, 192}, // Kepler Generation (SM 3.0) GK10x class
{ 0x32, 192}, // Kepler Generation (SM 3.2) GK10x class
{ 0x35, 192}, // Kepler Generation (SM 3.5) GK11x class
{ 0x37, 192}, // Kepler Generation (SM 3.7) GK21x class
{ 0x50, 128}, // Maxwell Generation (SM 5.0) GM10x class
{ 0x52, 128}, // Maxwell Generation (SM 5.2) GM20x class
{ 0x53, 128}, // Maxwell Generation (SM 5.3) GM20x class
{ 0x60, 64 }, // Pascal Generation (SM 6.0) GP100 class
{ 0x61, 128}, // Pascal Generation (SM 6.1) GP10x class
{ 0x62, 128}, // Pascal Generation (SM 6.2) GP10x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1)
{
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor))
{
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index-1].Cores);
return nGpuArchCoresPerSM[index-1].Cores;
}
__device__ void get_total_sum(double *partial_sum, int dummy) {
if(threadIdx.x == 0) {
count = dummy;
if(count %2 != 0) {
count++;
partial_sum[count-1] = 0;
}
}
__syncthreads();
for(int i = count/2; i > 0; i = i/2) {
if(threadIdx.x < i)
partial_sum[threadIdx.x] += partial_sum[threadIdx.x + i];
__syncthreads();
if(threadIdx.x == 0) {
if(i%2 != 0 && i != 1) {
partial_sum[0] += partial_sum[--i];
}
}
__syncthreads();
}
__syncthreads();
return;
}
//-------------------------------------------------------------------------------
// Kernel function to compute the K' matrix
__global__ void gpr_get_K(int N, int m, double *K, struct XY *xy)
{
// xy[r] store the x and y coordinates of the rth point
n = m * m;
double d[2];
// Allocate and initialze K
for(int i = threadIdx.x; i < n; i += N) {
for(int j = 0; j < n; j++) {
d[0] = pow(xy[i].x - xy[j].x, 2);
d[1] = pow(xy[i].y - xy[j].y, 2);
if(i == j)
K[ i*n + j] = exp(-1 * (d[0] + d[1])) + 0.01;
else
K[ i*n + j] = exp(-1 * (d[0] + d[1]));
}
}
}
// Kernel function to calculate the cholesfy factors
__global__ void gpr_cholesky(int N, double *K, double *L) {
// LU factorization
extern __shared__ double partial_sum[];
for(int k = 0; k < n; k++) {
partial_sum[threadIdx.x] = 0;
for(int j = threadIdx.x; j < k; j += N) {
partial_sum[threadIdx.x] = partial_sum[threadIdx.x] + (L[j * n + k] * L[j * n +k]);
}
__syncthreads();
get_total_sum(partial_sum, (N<k)?N:k);
if(threadIdx.x == 0) {
L[k * n + k] = sqrt(K[k * n + k] - partial_sum[0]);
}
__syncthreads();
for(int i = k + threadIdx.x + 1; i < n; i+=N) { //Removing zeroing
partial_sum[threadIdx.x] = 0;
for(int j = 0; j < k; j++) {
partial_sum[threadIdx.x] = partial_sum[threadIdx.x] + L[j * n + i] * L[j * n + k];
}
L[k * n + i] = (K[k * n + i] - partial_sum[threadIdx.x]) / L[k * n + k];
}
__syncthreads();
}
}
// Kernel code to solve for z
__global__ void gpr_solver(int N, double *Y, double *z, double *L, double *f)
{
extern __shared__ double partial_sum[];
// Solving K'z = f => LUz = F => LY = F
// Solving for Y
for(int i = 0; i < n; i++) {
partial_sum[threadIdx.x] = 0;
for(int j = threadIdx.x; j < i; j += N) {
partial_sum[threadIdx.x] += (L[j * n + i] * Y[j]);
}
__syncthreads();
get_total_sum(partial_sum, (N<i)?N:i);
if(threadIdx.x == 0) {
Y[i] = (f[i] - partial_sum[0]) / L[i * n + i];
}
__syncthreads();
}
__syncthreads();
// Solving for z
for(int i = n-1; i >= 0; i--) {
partial_sum[threadIdx.x] = 0;
for(int j = n-1-threadIdx.x; j > i; j -= N) {
partial_sum[threadIdx.x] += (L[i * n + j] * z[j]); // U component is nothing but L'
}
__syncthreads();
get_total_sum(partial_sum, (N < (n - 1 - i))?N:(n-1-i));
if(threadIdx.x == 0) {
z[i] = (Y[i] - partial_sum[0]) / L[i * n + i];
}
__syncthreads();
}
return;
}
//Kernel code to run the final prediction
__global__ void gpr_predict(int N, int m, double a, double b, double *k, double *z, struct XY *xy)
{
// Computing the f(predicted) value at rstar
double rstar[2] = {a, b};
extern __shared__ double partial_sum[];
// Initializing k
double d[2];
for(int i = threadIdx.x; i < n; i += N) {
d[0] = pow(xy[i].x - rstar[0], 2);
d[1] = pow(xy[i].y - rstar[1], 2);
k[i] = exp(-1 * (d[0] + d[1]));
}
partial_sum[threadIdx.x] = 0.0;
for(int i = threadIdx.x; i < n; i += N) {
partial_sum[threadIdx.x] += k[i] * z[i];
}
__syncthreads();
get_total_sum(partial_sum, (N<n)?N:n);
if(threadIdx.x == 0) {
d_f_pred = partial_sum[0];
}
return;
}
//Main function to take in the parameters and call the GPU kernel to calculate the predicted values
int main(int argc, char* argv[]) {
int m;
int num_threads;
double rstar[2];
if(argc != 4) {
printf("Aborting! Invalid number of input arguements. Please execute the binary as ./a.out m xstar ystar\n");
return 0;
} else {
m = atoi(argv[1]);
rstar[0] = atof(argv[2]);
rstar[1] = atof(argv[3]);
printf("Selected m value : %d \n", m);
printf("The required Rstar value : %f, %f\n", rstar[0], rstar[1]);
}
/* Validate the input parameters */
if(rstar[0] < 0 || rstar[0] >= m || rstar[1] < 0 || rstar[1] >= m ) {
printf("Aborting! Rstar selected out of Bound! \n");
return 0;
}
//Get the cores in a SM
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for ( device = 0; device < deviceCount; ++device ) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
int temp = _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
if(temp > num_threads)
num_threads = temp;
}
//num_threads = 1;
printf("Input: N = %d, threads_per_block = %d\n", num_threads, num_threads);
double *f, *k, *Y, *z, *K, *L;
struct XY *xy;
//Allocating data structures for GPU
cudaMalloc(&f, (m * m * sizeof(double)));
cudaMalloc(&k, (m * m * sizeof(double)));
cudaMalloc(&Y, (m * m * sizeof(double)));
cudaMalloc(&z, (m * m * sizeof(double)));
cudaMalloc(&xy, (m * m * sizeof(struct XY)));
int n = m*m;
cudaMalloc(&K, (n * n * sizeof(double)));
cudaMalloc(&L, (n * n * sizeof(double)));
// Initializing the grid and f
// xy[r] store the x and y coordinates of the rth point
n = m * m;
struct XY *h_xy = (struct XY *) malloc( n * sizeof(struct XY));
double h = 1.0 / (double)(m + 1);
int idx = 0;
for(int i = 0; i < m; i++) {
for(int j = 0; j < m; j++) {
h_xy[idx].x = (i + 1) * h;
h_xy[idx].y = (j +1) * h;
idx++;
}
}
//Exporting to the GPU
cudaMemcpy(xy, h_xy, n*sizeof(struct XY), cudaMemcpyHostToDevice);
// Allocate and initialize observed data vector f
double* h_f = (double*) malloc(n * sizeof(double));
for(idx = 0; idx < n; idx++) {
h_f[idx] = 1 - (((h_xy[idx].x - 0.5) * (h_xy[idx].x - 0.5)) +
((h_xy[idx].y - 0.5) * (h_xy[idx].y - 0.5))) + GET_RAND;
}
// Exporting to GPU
cudaMemcpy(f, h_f, n*sizeof(double), cudaMemcpyHostToDevice);
// Initialize timing events
cudaEvent_t start_kernel, stop_kernel, start_cholesky, stop_cholesky, start_solver, stop_solver;
cudaEventCreate(&start_kernel);
cudaEventCreate(&stop_kernel);
cudaEventCreate(&start_cholesky);
cudaEventCreate(&stop_cholesky);
cudaEventCreate(&start_solver);
cudaEventCreate(&stop_solver);
//Connfiguring the shared memory banks for double precision
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
// Record timing event - start
cudaEventRecord(start_kernel, 0);
gpr_get_K<<<1,num_threads>>>(num_threads, m, K, xy);
cudaEventRecord(start_cholesky, 0);
gpr_cholesky<<<1,num_threads, num_threads * sizeof(double)>>>(num_threads, K, L);
cudaEventRecord(stop_cholesky, 0);
cudaEventSynchronize(stop_cholesky);
cudaEventRecord(start_solver, 0);
gpr_solver<<<1,num_threads, num_threads * sizeof(double)>>>(num_threads, Y, z, L, f);
cudaEventRecord(stop_solver, 0);
cudaEventSynchronize(stop_solver);
gpr_predict<<<1,num_threads, num_threads * sizeof(double)>>>(num_threads, m, rstar[0], rstar[1], k, z, xy);
// Record timing event - stop
cudaEventRecord(stop_kernel, 0);
cudaEventSynchronize(stop_kernel);
typeof(d_f_pred) f_pred;
cudaMemcpyFromSymbol(&f_pred, d_f_pred, sizeof(d_f_pred), 0, cudaMemcpyDeviceToHost);
printf("The predicted value of f at r_star : %f\n", f_pred);
// Compute elapsed time
float elapsedTime_cholesky;
cudaEventElapsedTime(&elapsedTime_cholesky, start_cholesky, stop_cholesky);
printf("Elapsed time: Cholesky = %f ms\n", elapsedTime_cholesky);
float elapsedTime_solver;
cudaEventElapsedTime(&elapsedTime_solver, start_solver, stop_solver);
printf("Elapsed time: Solver = %f ms\n", elapsedTime_solver);
float elapsedTime_kernel;
cudaEventElapsedTime(&elapsedTime_kernel, start_kernel, stop_kernel);
printf("Elapsed time: Kernel = %f ms\n", elapsedTime_kernel);
long flops_cholesky = 0;
long flops_solver = 0;
for(int i = 0; i < n; i++) {
flops_solver += (2*i + num_threads + 2);
}
flops_solver *= 2;
for(int i = 0; i < n; i++) {
flops_cholesky += (2 * i + num_threads + 2) * (n - i);
}
printf("Floating point operations Cholesky Factorization: %ld\n", flops_cholesky); //Update needed
printf("Floating point operations per second (FLOPS) Cholesky : %f Gflops\n", (flops_cholesky)/(elapsedTime_cholesky/1000.0)/(1024.0*1024*1024)); //Update Needed
printf("Floating point operations Solver: %ld\n", flops_solver); //Update needed
printf("Floating point operations per second (FLOPS) Solver: %f Gflops\n", (flops_solver)/(elapsedTime_solver/1000.0)/(1024.0*1024*1024)); //Update Needed
//for(int i = 0; i < m*m; i++)
// printf("%f \n", h_f[i]);
cudaFree(f);
cudaFree(k);
cudaFree(Y);
cudaFree(z);
cudaFree(xy);
cudaFree(K);
cudaFree(L);
free(h_xy);
free(h_f);
// Delete timing events
cudaEventDestroy(start_kernel);
cudaEventDestroy(stop_kernel);
cudaEventDestroy(start_cholesky);
cudaEventDestroy(stop_cholesky);
cudaEventDestroy(start_solver);
cudaEventDestroy(stop_solver);
}
|
487bd1a6a076befbb2e074ee05c36ee7bcde68b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--blockDim=32 --gridDim=2
#include "../common.h"
__global__ void partial_get_kernel3d(int Ntotal, int *g_index, float *g_partQ){
int n = blockIdx.x * blockDim.x + threadIdx.x;
if(n<Ntotal)
g_partQ[n] = tex1Dfetch(t_Q, g_index[n]);
}
| 487bd1a6a076befbb2e074ee05c36ee7bcde68b7.cu | //pass
//--blockDim=32 --gridDim=2
#include "../common.h"
__global__ void partial_get_kernel3d(int Ntotal, int *g_index, float *g_partQ){
int n = blockIdx.x * blockDim.x + threadIdx.x;
if(n<Ntotal)
g_partQ[n] = tex1Dfetch(t_Q, g_index[n]);
}
|
e629ae052ccf36481b8c1ff134ca05186da233f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaTriangles.h"
// __global__ void triangleSum(int *rowIndex_dev, int *colIndex_dev, pair *pairs_cm_dev, pair *pairs_rm_dev, int nze, int N, int *triangle_sum){
// __global__ void triangleSum(int **row_arr_pointer, pair *pairs_rm_dev, int nze, int N, int *triangle_sum){
__global__ void triangleSum(int *allRowsArray_dev, int *nzeCummus_dev, pair *pairs_rm_dev, int nze, int N, int *triangle_sum){
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
int sum_i;
// printf("yo\n");
struct pair pair_i;
int col, row;
// for(int i = 0 ;i<N;i++){
// row_arr = row_arr_pointer[i]
// }
// int minBlocks = ceilf((float) N / (float) blockDim.x);
int minBlocks = ceilf((float) nze / (float) blockDim.x);
// printf("tid = %d, i = %d, stride = %d, N = %d, minBlocks = %d, blockDim = %d, minBlocks*blockDim = %d \n", tid,i,stride, N, minBlocks, blockDim.x, minBlocks*blockDim.x);
// if(i<nze){
// if(i==0){
// printf("minBlocks = %d")
// }
for(int index=i; index<minBlocks*blockDim.x;index+=stride){
pair_i = pairs_rm_dev[index];
col = pair_i.col;
row = pair_i.row;
// printf("---%d . row, col = %d, %d \n",index, row, col);
if(tid ==0){
// printf(" ---- tid = %d, i = %d, stride = %d, N = %d, rowIndex_dev[0] = %d \n", tid,i,stride, N, rowIndex_dev[0] );
}
// if(index==8){
if(index<nze){
// sum_i = 1;
// sum_i = sumForPair(rowIndex_dev, colIndex_dev, pairs_cm_dev, pairs_rm_dev, nze, N, index);
// sum_i = sumForPair(row_arr_pointer[row], row_arr_pointer[col], nze, N, index);
// sum_i = commonElementCount(row_arr_pointer[row], row_arr_pointer[row][0],row_arr_pointer[col], row_arr_pointer[col][0], row, col);
sum_i = commonElementCount(&allRowsArray_dev[nzeCummus_dev[row-1]] , allRowsArray_dev[nzeCummus_dev[row-1]] , &allRowsArray_dev[nzeCummus_dev[col-1]] , allRowsArray_dev[nzeCummus_dev[col-1]] , row , col);
// printf(" (%d) X (%d) = %d \n", col, row, sum_i);
}else{
sum_i = 0;
}
// printf("tid = %d, i = %d, stride = %d, nze = %d, minBlocks = %d, blockDim = %d, index = %d, sum_i = %d \n", tid,i,stride, nze, minBlocks, blockDim.x, index, sum_i);
// }
// map reduce the sums of each pair
// sdata[tid] = rowIndex_dev[index];
// printf(" <<>> tid = %d, i = %d, stride = %d, N = %d \n", tid,i,stride, N );
sdata[tid] = sum_i;
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0){
triangle_sum[blockIdx.x] += sdata[0];
// printf("TriangleSum[%d] = %d \n\n",blockIdx.x,triangle_sum[blockIdx.x]);
}
}
}
//returns the final result of matrix A*A.*A for position (pair[index].row , pair[index].col)
// sumForPair(row_arr_pointer[row], row_arr_pointer[col], nze, N, index);
// __device__ int sumForPair(int *row1_arr, int *row2_arr, int nze, int N, int index){
// int row = pairs_rm_dev[index].row-1;
// int col = pairs_rm_dev[index].col-1;
// // printf(" XyXyX -- row = %d , col = %d \n",row,col);
// int *row_arr;
// int *col_arr;
// // int *row_arr = row_arr_p[row];
// // int *col_arr = row_arr_p[col];
// int rowNzeCount = 0;// = row_arr[0];
// int colNzeCount = 0; //= col_arr[0];
// // int rowNzeCount = row_arr[0];
// // int colNzeCount = col_arr[0];
// // printf("rowNzeCount & col = %d, %d \n", rowNzeCount, colNzeCount);
// allRowNze(row, &row_arr, &rowNzeCount, rowIndex_dev, colIndex_dev, pairs_cm_dev, pairs_rm_dev, nze, N);
// allRowNze(col, &col_arr, &colNzeCount, rowIndex_dev, colIndex_dev, pairs_cm_dev, pairs_rm_dev, nze, N);
// // allRowNze(col, &col_arr, &colNzeCount, rowIndex_dev, colIndex_dev, pairs_cm_dev, pairs_rm_dev, nze, N);
// int pairResult = commonElementCount(row_arr, rowNzeCount, col_arr,colNzeCount, row, col);
// free(row_arr);
// free(col_arr);
// // = (int*)malloc(sizeof(int)*10);
// // printf("<---> sum for pair (%d, %d) = %d \n", col,row,pairResult);
// return pairResult;
// }
// assign to *row_arr matrix all non-zero-elements of A's "row" row.
__host__ __device__ void allRowNze(int row, int **row_arr,int *rowNzeCount, int *rowIndex_dev, int *colIndex_dev, pair *pairs_cm_dev, pair *pairs_rm_dev, int nze, int N){
int colElems = colIndex_dev[row+1]-colIndex_dev[row];
int rowElems = rowIndex_dev[row+1]-rowIndex_dev[row];
// printf("colElems = %d, rowElems = %d \n",colElems, rowElems);
// to avoid extreme situations for out of bounds behavior...
int lastRow = N;
if(row==lastRow-1){
// printf("")
colElems = nze-colIndex_dev[row];
rowElems = nze-rowIndex_dev[row];
}
int staticCol = colElems;
int staticrow = rowElems;
// printf("row = %d, colElems = %d, rowElems = %d \n", row, colElems, rowElems);
//total elements = col elems + row elems + 1 for size
(*row_arr) = (int *)malloc(sizeof(int)*(colElems+rowElems+1));
(*row_arr)[0] = colElems + rowElems;
(*rowNzeCount) = (*row_arr)[0];
// need 2 pairs to calculate distance between them
struct pair prevElem;
prevElem.row = 1;
prevElem.col = row; // ok thats a little mindfuck but its correct
struct pair nextElem;
int count = 0;
int dist = 0;
int totalDist = 0;
while(colElems>0){
nextElem = pairs_cm_dev[colIndex_dev[row]+count]; // get from 'row'-th column the 'count'-th nz element
dist = (nextElem.row - prevElem.row) + (nextElem.col - prevElem.col);
totalDist += dist;
(*row_arr)[count+1] = totalDist;
count ++;
prevElem = nextElem;
colElems--;
}
while(rowElems>0){
nextElem = pairs_rm_dev[rowIndex_dev[row] + count - staticCol]; // get from 'row'-th rowumn the 'count-colElems'-th nz element
dist = (nextElem.row - prevElem.row) + (nextElem.col - prevElem.col);
totalDist += dist;
(*row_arr)[count+1] = totalDist;
count ++;
prevElem = nextElem;
rowElems--;
}
// if(count == (colElems+rowElems)){
// printf("- - - YES: row = %d, rowNzeCount = %d, colElems = %d, rowElems = %d , count = %d\n",row,(*rowNzeCount),(staticCol),(staticrow),count);
// }else{
// printf("^ ^ ^ NO: row = %d, rowNzeCount = %d, colElems = %d, rowElems = %d , count = %d\n",row,(*rowNzeCount),(staticCol),(staticrow),count);
// printf("nooooo\n");
// }
// *row_arr = (int *)malloc(sizeof(int)*10);
// (*row_arr)[i] = 5;
}
__device__ int commonElementCount(int *row_arr, int rowNzeCount, int *col_arr,int colNzeCount, int row, int col){
int rowCount = 0;
int colCount = 0;
int commonElements = 0;
int intex = threadIdx.x;
while(rowCount<rowNzeCount && colCount<colNzeCount){
if(row_arr[rowCount+1]==col_arr[colCount+1]){
commonElements++;
rowCount++;
colCount++;
}else if(row_arr[rowCount+1]>col_arr[colCount+1]){
colCount++;
}else if(row_arr[rowCount+1]<col_arr[colCount+1]){
rowCount++;
}
}
// int rowCount = rowNzeCount;
// int colCount = colNzeCount;
// printf(">>>Row %d : elems = %d [", row, row_arr[0]);
// for(int i=1;i<=rowNzeCount;i++){
// printf(" %d",row_arr[i]);
// // if(intex ==0){
// // }
// }
// printf("\n");
// // printf(" ]\n");
// printf(">>>Col %d : elems = %d [", col, col_arr[0]);
// for(int i=1;i<=colNzeCount;i++){
// printf("%d ",col_arr[i]);
// }
// printf("\n");
// printf(">>> (%d X %d) common: %d \n", col+1, row+1, commonElements );
// printf("")
return commonElements;
} | e629ae052ccf36481b8c1ff134ca05186da233f7.cu | #include "cudaTriangles.h"
// __global__ void triangleSum(int *rowIndex_dev, int *colIndex_dev, pair *pairs_cm_dev, pair *pairs_rm_dev, int nze, int N, int *triangle_sum){
// __global__ void triangleSum(int **row_arr_pointer, pair *pairs_rm_dev, int nze, int N, int *triangle_sum){
__global__ void triangleSum(int *allRowsArray_dev, int *nzeCummus_dev, pair *pairs_rm_dev, int nze, int N, int *triangle_sum){
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
int sum_i;
// printf("yo\n");
struct pair pair_i;
int col, row;
// for(int i = 0 ;i<N;i++){
// row_arr = row_arr_pointer[i]
// }
// int minBlocks = ceilf((float) N / (float) blockDim.x);
int minBlocks = ceilf((float) nze / (float) blockDim.x);
// printf("tid = %d, i = %d, stride = %d, N = %d, minBlocks = %d, blockDim = %d, minBlocks*blockDim = %d \n", tid,i,stride, N, minBlocks, blockDim.x, minBlocks*blockDim.x);
// if(i<nze){
// if(i==0){
// printf("minBlocks = %d")
// }
for(int index=i; index<minBlocks*blockDim.x;index+=stride){
pair_i = pairs_rm_dev[index];
col = pair_i.col;
row = pair_i.row;
// printf("---%d . row, col = %d, %d \n",index, row, col);
if(tid ==0){
// printf(" ---- tid = %d, i = %d, stride = %d, N = %d, rowIndex_dev[0] = %d \n", tid,i,stride, N, rowIndex_dev[0] );
}
// if(index==8){
if(index<nze){
// sum_i = 1;
// sum_i = sumForPair(rowIndex_dev, colIndex_dev, pairs_cm_dev, pairs_rm_dev, nze, N, index);
// sum_i = sumForPair(row_arr_pointer[row], row_arr_pointer[col], nze, N, index);
// sum_i = commonElementCount(row_arr_pointer[row], row_arr_pointer[row][0],row_arr_pointer[col], row_arr_pointer[col][0], row, col);
sum_i = commonElementCount(&allRowsArray_dev[nzeCummus_dev[row-1]] , allRowsArray_dev[nzeCummus_dev[row-1]] , &allRowsArray_dev[nzeCummus_dev[col-1]] , allRowsArray_dev[nzeCummus_dev[col-1]] , row , col);
// printf(" (%d) X (%d) = %d \n", col, row, sum_i);
}else{
sum_i = 0;
}
// printf("tid = %d, i = %d, stride = %d, nze = %d, minBlocks = %d, blockDim = %d, index = %d, sum_i = %d \n", tid,i,stride, nze, minBlocks, blockDim.x, index, sum_i);
// }
// map reduce the sums of each pair
// sdata[tid] = rowIndex_dev[index];
// printf(" <<>> tid = %d, i = %d, stride = %d, N = %d \n", tid,i,stride, N );
sdata[tid] = sum_i;
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (tid < s) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0){
triangle_sum[blockIdx.x] += sdata[0];
// printf("TriangleSum[%d] = %d \n\n",blockIdx.x,triangle_sum[blockIdx.x]);
}
}
}
//returns the final result of matrix A*A.*A for position (pair[index].row , pair[index].col)
// sumForPair(row_arr_pointer[row], row_arr_pointer[col], nze, N, index);
// __device__ int sumForPair(int *row1_arr, int *row2_arr, int nze, int N, int index){
// int row = pairs_rm_dev[index].row-1;
// int col = pairs_rm_dev[index].col-1;
// // printf(" XyXyX -- row = %d , col = %d \n",row,col);
// int *row_arr;
// int *col_arr;
// // int *row_arr = row_arr_p[row];
// // int *col_arr = row_arr_p[col];
// int rowNzeCount = 0;// = row_arr[0];
// int colNzeCount = 0; //= col_arr[0];
// // int rowNzeCount = row_arr[0];
// // int colNzeCount = col_arr[0];
// // printf("rowNzeCount & col = %d, %d \n", rowNzeCount, colNzeCount);
// allRowNze(row, &row_arr, &rowNzeCount, rowIndex_dev, colIndex_dev, pairs_cm_dev, pairs_rm_dev, nze, N);
// allRowNze(col, &col_arr, &colNzeCount, rowIndex_dev, colIndex_dev, pairs_cm_dev, pairs_rm_dev, nze, N);
// // allRowNze(col, &col_arr, &colNzeCount, rowIndex_dev, colIndex_dev, pairs_cm_dev, pairs_rm_dev, nze, N);
// int pairResult = commonElementCount(row_arr, rowNzeCount, col_arr,colNzeCount, row, col);
// free(row_arr);
// free(col_arr);
// // = (int*)malloc(sizeof(int)*10);
// // printf("<---> sum for pair (%d, %d) = %d \n", col,row,pairResult);
// return pairResult;
// }
// assign to *row_arr matrix all non-zero-elements of A's "row" row.
__host__ __device__ void allRowNze(int row, int **row_arr,int *rowNzeCount, int *rowIndex_dev, int *colIndex_dev, pair *pairs_cm_dev, pair *pairs_rm_dev, int nze, int N){
int colElems = colIndex_dev[row+1]-colIndex_dev[row];
int rowElems = rowIndex_dev[row+1]-rowIndex_dev[row];
// printf("colElems = %d, rowElems = %d \n",colElems, rowElems);
// to avoid extreme situations for out of bounds behavior...
int lastRow = N;
if(row==lastRow-1){
// printf("")
colElems = nze-colIndex_dev[row];
rowElems = nze-rowIndex_dev[row];
}
int staticCol = colElems;
int staticrow = rowElems;
// printf("row = %d, colElems = %d, rowElems = %d \n", row, colElems, rowElems);
//total elements = col elems + row elems + 1 for size
(*row_arr) = (int *)malloc(sizeof(int)*(colElems+rowElems+1));
(*row_arr)[0] = colElems + rowElems;
(*rowNzeCount) = (*row_arr)[0];
// need 2 pairs to calculate distance between them
struct pair prevElem;
prevElem.row = 1;
prevElem.col = row; // ok thats a little mindfuck but its correct
struct pair nextElem;
int count = 0;
int dist = 0;
int totalDist = 0;
while(colElems>0){
nextElem = pairs_cm_dev[colIndex_dev[row]+count]; // get from 'row'-th column the 'count'-th nz element
dist = (nextElem.row - prevElem.row) + (nextElem.col - prevElem.col);
totalDist += dist;
(*row_arr)[count+1] = totalDist;
count ++;
prevElem = nextElem;
colElems--;
}
while(rowElems>0){
nextElem = pairs_rm_dev[rowIndex_dev[row] + count - staticCol]; // get from 'row'-th rowumn the 'count-colElems'-th nz element
dist = (nextElem.row - prevElem.row) + (nextElem.col - prevElem.col);
totalDist += dist;
(*row_arr)[count+1] = totalDist;
count ++;
prevElem = nextElem;
rowElems--;
}
// if(count == (colElems+rowElems)){
// printf("- - - YES: row = %d, rowNzeCount = %d, colElems = %d, rowElems = %d , count = %d\n",row,(*rowNzeCount),(staticCol),(staticrow),count);
// }else{
// printf("^ ^ ^ NO: row = %d, rowNzeCount = %d, colElems = %d, rowElems = %d , count = %d\n",row,(*rowNzeCount),(staticCol),(staticrow),count);
// printf("nooooo\n");
// }
// *row_arr = (int *)malloc(sizeof(int)*10);
// (*row_arr)[i] = 5;
}
__device__ int commonElementCount(int *row_arr, int rowNzeCount, int *col_arr,int colNzeCount, int row, int col){
int rowCount = 0;
int colCount = 0;
int commonElements = 0;
int intex = threadIdx.x;
while(rowCount<rowNzeCount && colCount<colNzeCount){
if(row_arr[rowCount+1]==col_arr[colCount+1]){
commonElements++;
rowCount++;
colCount++;
}else if(row_arr[rowCount+1]>col_arr[colCount+1]){
colCount++;
}else if(row_arr[rowCount+1]<col_arr[colCount+1]){
rowCount++;
}
}
// int rowCount = rowNzeCount;
// int colCount = colNzeCount;
// printf(">>>Row %d : elems = %d [", row, row_arr[0]);
// for(int i=1;i<=rowNzeCount;i++){
// printf(" %d",row_arr[i]);
// // if(intex ==0){
// // }
// }
// printf("\n");
// // printf(" ]\n");
// printf(">>>Col %d : elems = %d [", col, col_arr[0]);
// for(int i=1;i<=colNzeCount;i++){
// printf("%d ",col_arr[i]);
// }
// printf("\n");
// printf(">>> (%d X %d) common: %d \n", col+1, row+1, commonElements );
// printf("")
return commonElements;
} |
84d9047d38413410fd23b67ce286f7fc9c5463a7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "linear_sampler_layer_updater_cuda.h"
#include <hip/hip_runtime.h>
#include "../linear_sampler_layer.h"
#include "../neural_network_exception.h"
#include "util_cuda.h"
namespace nnforge
{
namespace cuda
{
__global__ void linear_sampler_2d_upd_kernel(
float * __restrict output,
const float * __restrict grid,
const float * __restrict input_data,
int output_width,
int output_height,
int entry_count,
int input_width,
int input_height,
int input_feature_map_count,
float denormalize_scale_x,
float denormalize_scale_y,
int output_elem_count_per_feature_map,
int input_elem_count_per_feature_map,
int output_elem_count_per_entry,
int input_elem_count_per_entry)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < output_width) && (y < output_height) && (entry_id < entry_count))
{
int grid_x_offset = entry_id * output_elem_count_per_feature_map * 2 + y * output_width + x;
int grid_y_offset = grid_x_offset + output_elem_count_per_feature_map;
float normalized_x_pos = __load_nc(grid + grid_x_offset);
float normalized_y_pos = __load_nc(grid + grid_y_offset);
float absolute_x_pos = normalized_x_pos * denormalize_scale_x;
float absolute_y_pos = normalized_y_pos * denormalize_scale_y;
int left_x = __float2int_rd(absolute_x_pos);
int top_y = __float2int_rd(absolute_y_pos);
int right_x = left_x + 1;
int bottom_y = top_y + 1;
float right_weight = absolute_x_pos - (float)left_x;
float left_weight = 1.0F - right_weight;
float bottom_weight = absolute_y_pos - (float)top_y;
float top_weight = 1.0F - bottom_weight;
float top_left_weight = top_weight * left_weight;
float top_right_weight = top_weight * right_weight;
float bottom_left_weight = bottom_weight * left_weight;
float bottom_right_weight = bottom_weight * right_weight;
bool left_in_bounds = (unsigned int)left_x < (unsigned int)input_width;
bool right_in_bounds = (unsigned int)right_x < (unsigned int)input_width;
bool top_in_bounds = (unsigned int)top_y < (unsigned int)input_height;
bool bottom_in_bounds = (unsigned int)bottom_y < (unsigned int)input_height;
bool top_left_in_bounds = left_in_bounds && top_in_bounds;
bool top_right_in_bounds = right_in_bounds && top_in_bounds;
bool bottom_left_in_bounds = left_in_bounds && bottom_in_bounds;
bool bottom_right_in_bounds = right_in_bounds && bottom_in_bounds;
const float * current_input_data = input_data + entry_id * input_elem_count_per_entry + top_y * input_width + left_x;
float * current_output = output + entry_id * output_elem_count_per_entry + y * output_width + x;
for(int input_feature_map_id = 0; input_feature_map_id < input_feature_map_count; ++input_feature_map_id)
{
float top_left_val = top_left_in_bounds ? __load_nc(current_input_data) : 0.0F;
float top_right_val = top_right_in_bounds ? __load_nc(current_input_data + 1) : 0.0F;
float bottom_left_val = bottom_left_in_bounds ? __load_nc(current_input_data + input_width) : 0.0F;
float bottom_right_val = bottom_right_in_bounds ? __load_nc(current_input_data + input_width + 1) : 0.0F;
float weighted_sum = top_left_weight * top_left_val + top_right_weight * top_right_val + bottom_left_weight * bottom_left_val + bottom_right_weight * bottom_right_val;
*current_output = weighted_sum;
current_input_data += input_elem_count_per_feature_map;
current_output += output_elem_count_per_feature_map;
}
}
}
__global__ void linear_sampler_2d_backprop_upd_kernel(
float * __restrict input_grid_errors,
const float * __restrict grid,
const float * __restrict input_data,
const float * __restrict output_errors,
int output_width,
int output_height,
int entry_count,
int input_width,
int input_height,
int input_feature_map_count,
float denormalize_scale_x,
float denormalize_scale_y,
int output_elem_count_per_feature_map,
int input_elem_count_per_feature_map,
int output_elem_count_per_entry,
int input_elem_count_per_entry,
bool add_update_to_destination)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < output_width) && (y < output_height) && (entry_id < entry_count))
{
int grid_x_offset = entry_id * output_elem_count_per_feature_map * 2 + y * output_width + x;
int grid_y_offset = grid_x_offset + output_elem_count_per_feature_map;
float normalized_x_pos = __load_nc(grid + grid_x_offset);
float normalized_y_pos = __load_nc(grid + grid_y_offset);
float old_input_err_x = 0.0F;
float old_input_err_y = 0.0F;
if (add_update_to_destination)
{
old_input_err_x = __load_nc(input_grid_errors + grid_x_offset);
old_input_err_y = __load_nc(input_grid_errors + grid_y_offset);
}
float absolute_x_pos = normalized_x_pos * denormalize_scale_x;
float absolute_y_pos = normalized_y_pos * denormalize_scale_y;
int left_x = __float2int_rd(absolute_x_pos);
int top_y = __float2int_rd(absolute_y_pos);
int right_x = left_x + 1;
int bottom_y = top_y + 1;
bool left_in_bounds = (unsigned int)left_x < (unsigned int)input_width;
bool right_in_bounds = (unsigned int)right_x < (unsigned int)input_width;
bool top_in_bounds = (unsigned int)top_y < (unsigned int)input_height;
bool bottom_in_bounds = (unsigned int)bottom_y < (unsigned int)input_height;
bool top_left_in_bounds = left_in_bounds && top_in_bounds;
bool top_right_in_bounds = right_in_bounds && top_in_bounds;
bool bottom_left_in_bounds = left_in_bounds && bottom_in_bounds;
bool bottom_right_in_bounds = right_in_bounds && bottom_in_bounds;
const float * current_input_data = input_data + entry_id * input_elem_count_per_entry + top_y * input_width + left_x;
const float * current_output_errors = output_errors + entry_id * output_elem_count_per_entry + y * output_width + x;
float top_left_sum = 0.0F;
float top_right_sum = 0.0F;
float bottom_left_sum = 0.0F;
float bottom_right_sum = 0.0F;
for(int input_feature_map_id = 0; input_feature_map_id < input_feature_map_count; ++input_feature_map_id)
{
float output_error = __load_nc(current_output_errors);
float top_left_val = top_left_in_bounds ? __load_nc(current_input_data) : 0.0F;
float top_right_val = top_right_in_bounds ? __load_nc(current_input_data + 1) : 0.0F;
float bottom_left_val = bottom_left_in_bounds ? __load_nc(current_input_data + input_width) : 0.0F;
float bottom_right_val = bottom_right_in_bounds ? __load_nc(current_input_data + input_width + 1) : 0.0F;
top_left_sum += top_left_val * output_error;
top_right_sum += top_right_val * output_error;
bottom_left_sum += bottom_left_val * output_error;
bottom_right_sum += bottom_right_val * output_error;
current_input_data += input_elem_count_per_feature_map;
current_output_errors += output_elem_count_per_feature_map;
}
float right_weight = absolute_x_pos - (float)left_x;
float left_weight = 1.0F - right_weight;
float bottom_weight = absolute_y_pos - (float)top_y;
float top_weight = 1.0F - bottom_weight;
float input_err_x = (top_weight * (top_right_sum - top_left_sum) + bottom_weight * (bottom_right_sum - bottom_left_sum)) * denormalize_scale_x;
float input_err_y = (left_weight * (bottom_left_sum - top_left_sum) + right_weight * (bottom_right_sum - top_right_sum)) * denormalize_scale_y;
input_grid_errors[grid_x_offset] = old_input_err_x + input_err_x;
input_grid_errors[grid_y_offset] = old_input_err_y + input_err_y;
}
}
void linear_sampler_layer_updater_cuda::enqueue_forward_propagation(
hipStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count);
hipLaunchKernelGGL(( linear_sampler_2d_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*output_buffer,
*input_buffers[0],
*input_buffers[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count,
input_configuration_specific_list[1].dimension_sizes[0],
input_configuration_specific_list[1].dimension_sizes[1],
input_configuration_specific_list[1].feature_map_count,
static_cast<float>(input_configuration_specific_list[1].dimension_sizes[0] - 1),
static_cast<float>(input_configuration_specific_list[1].dimension_sizes[1] - 1),
output_elem_count_per_feature_map,
input_elem_count_per_feature_map_list[1],
output_elem_count_per_entry,
input_elem_count_per_entry_list[1]);
}
void linear_sampler_layer_updater_cuda::enqueue_backward_data_propagation(
hipStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count);
hipLaunchKernelGGL(( linear_sampler_2d_backprop_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*input_errors_buffer,
*input_neurons_buffers[0],
*input_neurons_buffers[1],
*output_errors_buffer,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count,
input_configuration_specific_list[1].dimension_sizes[0],
input_configuration_specific_list[1].dimension_sizes[1],
input_configuration_specific_list[1].feature_map_count,
static_cast<float>(input_configuration_specific_list[1].dimension_sizes[0] - 1),
static_cast<float>(input_configuration_specific_list[1].dimension_sizes[1] - 1),
output_elem_count_per_feature_map,
input_elem_count_per_feature_map_list[1],
output_elem_count_per_entry,
input_elem_count_per_entry_list[1],
add_update_to_destination);
}
void linear_sampler_layer_updater_cuda::updater_configured()
{
if (actions.find(layer_action(layer_action::backward_data, 1)) != actions.end())
throw neural_network_exception("linear_sampler_layer_updater_cuda cannot do backward propagation for input neurons");
}
bool linear_sampler_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const
{
return (action_input_index == 0);
}
bool linear_sampler_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
}
}
| 84d9047d38413410fd23b67ce286f7fc9c5463a7.cu | /*
* Copyright 2011-2016 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "linear_sampler_layer_updater_cuda.h"
#include <cuda_runtime.h>
#include "../linear_sampler_layer.h"
#include "../neural_network_exception.h"
#include "util_cuda.h"
namespace nnforge
{
namespace cuda
{
__global__ void linear_sampler_2d_upd_kernel(
float * __restrict output,
const float * __restrict grid,
const float * __restrict input_data,
int output_width,
int output_height,
int entry_count,
int input_width,
int input_height,
int input_feature_map_count,
float denormalize_scale_x,
float denormalize_scale_y,
int output_elem_count_per_feature_map,
int input_elem_count_per_feature_map,
int output_elem_count_per_entry,
int input_elem_count_per_entry)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < output_width) && (y < output_height) && (entry_id < entry_count))
{
int grid_x_offset = entry_id * output_elem_count_per_feature_map * 2 + y * output_width + x;
int grid_y_offset = grid_x_offset + output_elem_count_per_feature_map;
float normalized_x_pos = __load_nc(grid + grid_x_offset);
float normalized_y_pos = __load_nc(grid + grid_y_offset);
float absolute_x_pos = normalized_x_pos * denormalize_scale_x;
float absolute_y_pos = normalized_y_pos * denormalize_scale_y;
int left_x = __float2int_rd(absolute_x_pos);
int top_y = __float2int_rd(absolute_y_pos);
int right_x = left_x + 1;
int bottom_y = top_y + 1;
float right_weight = absolute_x_pos - (float)left_x;
float left_weight = 1.0F - right_weight;
float bottom_weight = absolute_y_pos - (float)top_y;
float top_weight = 1.0F - bottom_weight;
float top_left_weight = top_weight * left_weight;
float top_right_weight = top_weight * right_weight;
float bottom_left_weight = bottom_weight * left_weight;
float bottom_right_weight = bottom_weight * right_weight;
bool left_in_bounds = (unsigned int)left_x < (unsigned int)input_width;
bool right_in_bounds = (unsigned int)right_x < (unsigned int)input_width;
bool top_in_bounds = (unsigned int)top_y < (unsigned int)input_height;
bool bottom_in_bounds = (unsigned int)bottom_y < (unsigned int)input_height;
bool top_left_in_bounds = left_in_bounds && top_in_bounds;
bool top_right_in_bounds = right_in_bounds && top_in_bounds;
bool bottom_left_in_bounds = left_in_bounds && bottom_in_bounds;
bool bottom_right_in_bounds = right_in_bounds && bottom_in_bounds;
const float * current_input_data = input_data + entry_id * input_elem_count_per_entry + top_y * input_width + left_x;
float * current_output = output + entry_id * output_elem_count_per_entry + y * output_width + x;
for(int input_feature_map_id = 0; input_feature_map_id < input_feature_map_count; ++input_feature_map_id)
{
float top_left_val = top_left_in_bounds ? __load_nc(current_input_data) : 0.0F;
float top_right_val = top_right_in_bounds ? __load_nc(current_input_data + 1) : 0.0F;
float bottom_left_val = bottom_left_in_bounds ? __load_nc(current_input_data + input_width) : 0.0F;
float bottom_right_val = bottom_right_in_bounds ? __load_nc(current_input_data + input_width + 1) : 0.0F;
float weighted_sum = top_left_weight * top_left_val + top_right_weight * top_right_val + bottom_left_weight * bottom_left_val + bottom_right_weight * bottom_right_val;
*current_output = weighted_sum;
current_input_data += input_elem_count_per_feature_map;
current_output += output_elem_count_per_feature_map;
}
}
}
__global__ void linear_sampler_2d_backprop_upd_kernel(
float * __restrict input_grid_errors,
const float * __restrict grid,
const float * __restrict input_data,
const float * __restrict output_errors,
int output_width,
int output_height,
int entry_count,
int input_width,
int input_height,
int input_feature_map_count,
float denormalize_scale_x,
float denormalize_scale_y,
int output_elem_count_per_feature_map,
int input_elem_count_per_feature_map,
int output_elem_count_per_entry,
int input_elem_count_per_entry,
bool add_update_to_destination)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int entry_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < output_width) && (y < output_height) && (entry_id < entry_count))
{
int grid_x_offset = entry_id * output_elem_count_per_feature_map * 2 + y * output_width + x;
int grid_y_offset = grid_x_offset + output_elem_count_per_feature_map;
float normalized_x_pos = __load_nc(grid + grid_x_offset);
float normalized_y_pos = __load_nc(grid + grid_y_offset);
float old_input_err_x = 0.0F;
float old_input_err_y = 0.0F;
if (add_update_to_destination)
{
old_input_err_x = __load_nc(input_grid_errors + grid_x_offset);
old_input_err_y = __load_nc(input_grid_errors + grid_y_offset);
}
float absolute_x_pos = normalized_x_pos * denormalize_scale_x;
float absolute_y_pos = normalized_y_pos * denormalize_scale_y;
int left_x = __float2int_rd(absolute_x_pos);
int top_y = __float2int_rd(absolute_y_pos);
int right_x = left_x + 1;
int bottom_y = top_y + 1;
bool left_in_bounds = (unsigned int)left_x < (unsigned int)input_width;
bool right_in_bounds = (unsigned int)right_x < (unsigned int)input_width;
bool top_in_bounds = (unsigned int)top_y < (unsigned int)input_height;
bool bottom_in_bounds = (unsigned int)bottom_y < (unsigned int)input_height;
bool top_left_in_bounds = left_in_bounds && top_in_bounds;
bool top_right_in_bounds = right_in_bounds && top_in_bounds;
bool bottom_left_in_bounds = left_in_bounds && bottom_in_bounds;
bool bottom_right_in_bounds = right_in_bounds && bottom_in_bounds;
const float * current_input_data = input_data + entry_id * input_elem_count_per_entry + top_y * input_width + left_x;
const float * current_output_errors = output_errors + entry_id * output_elem_count_per_entry + y * output_width + x;
float top_left_sum = 0.0F;
float top_right_sum = 0.0F;
float bottom_left_sum = 0.0F;
float bottom_right_sum = 0.0F;
for(int input_feature_map_id = 0; input_feature_map_id < input_feature_map_count; ++input_feature_map_id)
{
float output_error = __load_nc(current_output_errors);
float top_left_val = top_left_in_bounds ? __load_nc(current_input_data) : 0.0F;
float top_right_val = top_right_in_bounds ? __load_nc(current_input_data + 1) : 0.0F;
float bottom_left_val = bottom_left_in_bounds ? __load_nc(current_input_data + input_width) : 0.0F;
float bottom_right_val = bottom_right_in_bounds ? __load_nc(current_input_data + input_width + 1) : 0.0F;
top_left_sum += top_left_val * output_error;
top_right_sum += top_right_val * output_error;
bottom_left_sum += bottom_left_val * output_error;
bottom_right_sum += bottom_right_val * output_error;
current_input_data += input_elem_count_per_feature_map;
current_output_errors += output_elem_count_per_feature_map;
}
float right_weight = absolute_x_pos - (float)left_x;
float left_weight = 1.0F - right_weight;
float bottom_weight = absolute_y_pos - (float)top_y;
float top_weight = 1.0F - bottom_weight;
float input_err_x = (top_weight * (top_right_sum - top_left_sum) + bottom_weight * (bottom_right_sum - bottom_left_sum)) * denormalize_scale_x;
float input_err_y = (left_weight * (bottom_left_sum - top_left_sum) + right_weight * (bottom_right_sum - top_right_sum)) * denormalize_scale_y;
input_grid_errors[grid_x_offset] = old_input_err_x + input_err_x;
input_grid_errors[grid_y_offset] = old_input_err_y + input_err_y;
}
}
void linear_sampler_layer_updater_cuda::enqueue_forward_propagation(
cudaStream_t stream_id,
cuda_linear_buffer_device::ptr output_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::ptr temporary_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_per_entry_buffer,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count);
linear_sampler_2d_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*output_buffer,
*input_buffers[0],
*input_buffers[1],
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count,
input_configuration_specific_list[1].dimension_sizes[0],
input_configuration_specific_list[1].dimension_sizes[1],
input_configuration_specific_list[1].feature_map_count,
static_cast<float>(input_configuration_specific_list[1].dimension_sizes[0] - 1),
static_cast<float>(input_configuration_specific_list[1].dimension_sizes[1] - 1),
output_elem_count_per_feature_map,
input_elem_count_per_feature_map_list[1],
output_elem_count_per_entry,
input_elem_count_per_entry_list[1]);
}
void linear_sampler_layer_updater_cuda::enqueue_backward_data_propagation(
cudaStream_t stream_id,
unsigned int input_index,
cuda_linear_buffer_device::ptr input_errors_buffer,
cuda_linear_buffer_device::const_ptr output_errors_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data,
const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom,
const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers,
cuda_linear_buffer_device::const_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data,
cuda_linear_buffer_device::ptr temporary_working_fixed_buffer,
cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer,
cuda_linear_buffer_device::const_ptr temporary_fixed_buffer,
cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer,
bool add_update_to_destination,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_2d_access(
*cuda_config,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count);
linear_sampler_2d_backprop_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*input_errors_buffer,
*input_neurons_buffers[0],
*input_neurons_buffers[1],
*output_errors_buffer,
output_configuration_specific.dimension_sizes[0],
output_configuration_specific.dimension_sizes[1],
entry_count,
input_configuration_specific_list[1].dimension_sizes[0],
input_configuration_specific_list[1].dimension_sizes[1],
input_configuration_specific_list[1].feature_map_count,
static_cast<float>(input_configuration_specific_list[1].dimension_sizes[0] - 1),
static_cast<float>(input_configuration_specific_list[1].dimension_sizes[1] - 1),
output_elem_count_per_feature_map,
input_elem_count_per_feature_map_list[1],
output_elem_count_per_entry,
input_elem_count_per_entry_list[1],
add_update_to_destination);
}
void linear_sampler_layer_updater_cuda::updater_configured()
{
if (actions.find(layer_action(layer_action::backward_data, 1)) != actions.end())
throw neural_network_exception("linear_sampler_layer_updater_cuda cannot do backward propagation for input neurons");
}
bool linear_sampler_layer_updater_cuda::is_backward_data_dependent_on_input_buffer(unsigned int action_input_index, unsigned int data_input_index) const
{
return (action_input_index == 0);
}
bool linear_sampler_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const
{
return false;
}
}
}
|
a9b47a3365c5aa8b135f44ea045616e67df1486c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "profileSubphaseTruncateP_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
profileSubphaseTruncateP_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
profileSubphaseTruncateP_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
profileSubphaseTruncateP_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, );
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a9b47a3365c5aa8b135f44ea045616e67df1486c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "profileSubphaseTruncateP_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
profileSubphaseTruncateP_kernel<<<gridBlock,threadBlock>>>();
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
profileSubphaseTruncateP_kernel<<<gridBlock,threadBlock>>>();
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
profileSubphaseTruncateP_kernel<<<gridBlock,threadBlock>>>();
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b34f78be88e645ee5749f57183ba99c78b343e39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void subtract_and_square(float *dest, float *a, float *b, int n)
{
// const int index = threadIdx.x * (threadIdx.y + 1);
// dest[index] = ( a[index] - b[index] ) * ( a[index] - b[index] );
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < n)
dest[index] = ( a[index] - b[index] ) * ( a[index] - b[index] );
} | b34f78be88e645ee5749f57183ba99c78b343e39.cu | __global__ void subtract_and_square(float *dest, float *a, float *b, int n)
{
// const int index = threadIdx.x * (threadIdx.y + 1);
// dest[index] = ( a[index] - b[index] ) * ( a[index] - b[index] );
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < n)
dest[index] = ( a[index] - b[index] ) * ( a[index] - b[index] );
} |
9dd8bf0204bf6eeeb1b4089bd3526e80e2e8c3f8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <algorithm>
#include <cmath>
#include <iostream>
#include <fstream>
#include <ctime>
#include <string>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <thrust/host_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#define MAX 10
#define MIN 0
#define THREADS_PER_BLOCK 256
#define BITS_IN_BYTE 8
#define FILE_NAME "input1000.txt"
#define K 2
// Different versions for the parallel algorithm
#define RADIX_SORT 0
#define THRUST_SORT 1
#define SORT THRUST_SORT
#define DISTANCE_GATHER 0
#define DISTANCE_MAPREDUCE 1
#define DISTANCE DISTANCE_GATHER
using namespace std;
__global__ void normalize(float * d_input, float *d_max, float *d_min, unsigned int numAttributes,
unsigned int numElems) {
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int attributeIdx = tid % numAttributes;
if(tid < numElems*numAttributes) {
d_input[tid] = (d_input[tid] - d_min[attributeIdx]) / (d_max[attributeIdx] - d_min[attributeIdx]);
}
}
__global__ void findDistanceMap(float *d_inputAttributes, float *d_inputSample, float *d_output, unsigned int numAttributes,
unsigned int numSamples) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < numAttributes * numSamples) {
d_output[tid] = pow(d_inputAttributes[tid] - d_inputSample[tid % numAttributes], 2);
}
}
__global__ void findDistance(float *d_inputAttributes, float *d_inputSample, float *d_output, unsigned int numAttributes,
unsigned int numElems) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float distance = 0;
if (tid < numElems) {
for (int i = 0; i < numAttributes; i++) {
distance += pow(d_inputAttributes[numAttributes*tid + i] - d_inputSample[i], 2);
}
// OPTIMIZATION: We don't have to square root, because if
// there's no point in wasting all of the distance values are squares
d_output[tid] = distance;
}
}
void distances(float *d_knowns, float* d_unknownSample, float *d_distance,
int numAttributes, int numKnownSamples)
{
if (DISTANCE == DISTANCE_GATHER) {
int threadsPerBlock = THREADS_PER_BLOCK;
int numBlocks = numKnownSamples / threadsPerBlock + 1;
hipLaunchKernelGGL(( findDistance), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_knowns, d_unknownSample, d_distance,
numAttributes, numKnownSamples);
} else if (DISTANCE == DISTANCE_MAPREDUCE) {
// Find the distances between the
float *d_distanceMap;
hipMalloc(&d_distanceMap, sizeof(float) * numKnownSamples * numAttributes);
float *h_distance = (float*) malloc(sizeof(float) * numKnownSamples);
int threadsPerBlock = THREADS_PER_BLOCK;
int numBlocks = numAttributes * numKnownSamples / threadsPerBlock + 1;
hipLaunchKernelGGL(( findDistanceMap), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_knowns, d_unknownSample, d_distanceMap,
numAttributes, numKnownSamples);
thrust::device_ptr<float> t_distanceMap = thrust::device_pointer_cast(d_distanceMap);
for (int i = 0; i < numKnownSamples; i++) {
h_distance[i] = thrust::reduce(t_distanceMap+(i*numAttributes), t_distanceMap+(i+1)*numAttributes, 0.0);
}
hipMemcpy(d_distance, h_distance, sizeof(float) * numKnownSamples, hipMemcpyHostToDevice);
hipFree(d_distanceMap);
free(h_distance);
}
}
// RADIX Sort helper function
// Map Ones and Zeros
__global__
void mapOnesZeros(unsigned int* const d_ones, unsigned int* const d_zeros, const unsigned int* const d_inputVals,
const unsigned int mask, const size_t numElems) {
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// Check if we're outside the bounds of the array
if (myId < numElems) {
if ((d_inputVals[myId] & mask) == 0) {
d_zeros[myId] = 1;
d_ones[myId] = 0;
} else {
d_zeros[myId] = 0;
d_ones[myId] = 1;
}
}
}
// Reorder elements based on their generated positions
__global__
void reorderElements(unsigned int* const d_outputVals, unsigned int* const d_outputClassification,
const unsigned int* const d_inputVals, const unsigned int* const d_inputClassification, const unsigned int* const d_positions_zeros,
const unsigned int* const d_positions_ones, const unsigned int mask, const size_t numElems) {
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// Check if we're outside the bounds of the array
if (myId < numElems) {
// Based on if the digit is zero or one depends on which position values
if ((d_inputVals[myId] & mask) == 0) {
d_outputVals[d_positions_zeros[myId]] = d_inputVals[myId];
d_outputClassification[d_positions_zeros[myId]] = d_inputClassification[myId];
} else {
d_outputVals[d_positions_ones[myId]] = d_inputVals[myId];
d_outputClassification[d_positions_ones[myId]] = d_inputClassification[myId];
}
}
}
void radixSort(unsigned int* const d_inputVals,
unsigned int* const d_inputClassification,
unsigned int* const d_outputVals,
unsigned int* const d_outputClassification,
const size_t numElems)
{
// Set the proper grid size and block size for this problem.
int blockSize = THREADS_PER_BLOCK;
int gridSize = numElems / blockSize + 1;
// Iterate over the number of bits in the unsigned int.
for (unsigned int i = 0; i < (sizeof(unsigned int) * BITS_IN_BYTE); i++) {
unsigned int *d_zeros;
unsigned int *d_ones;
hipMalloc(&d_zeros, sizeof(unsigned int) * numElems);
hipMalloc(&d_ones, sizeof(unsigned int) * numElems);
// Choose which digit to check currently for our radix
unsigned int mask = 1U << i;
// Find out which digits end in 0, and which digits end in 1
hipLaunchKernelGGL(( mapOnesZeros), dim3(gridSize), dim3(blockSize), 0, 0, d_ones, d_zeros, d_inputVals, mask, numElems);
// Thrust requires us to copy the memory from Cuda to the host for
// processing.
unsigned int *h_zeros = (unsigned int *) malloc(sizeof(unsigned int) * numElems);
unsigned int *h_ones = (unsigned int *) malloc(sizeof(unsigned int) * numElems);
unsigned int *h_positions_zeros = (unsigned int *) malloc(sizeof(unsigned int) * numElems);
unsigned int *h_positions_ones = (unsigned int *) malloc(sizeof(unsigned int) * numElems);
hipMemcpy(h_zeros, d_zeros, sizeof(unsigned int) * numElems,
hipMemcpyDeviceToHost);
hipMemcpy(h_ones, d_ones, sizeof(unsigned int) * numElems,
hipMemcpyDeviceToHost);
// Perform an exclusive scan on zeros to determine the position of elements with zero
thrust::exclusive_scan(h_zeros, h_zeros + numElems, h_positions_zeros, 0);
// Determine the position offset to shift the ones positions by
// If the last element's digit is a zero, then it's the last element of d_positions_zeros
// Otherwise, it's just the (last element of the d_positions_zeros array + 1)
unsigned int offset;
if (h_zeros[numElems - 1] == 1) {
offset = h_positions_zeros[numElems - 1] + 1;
} else {
offset = h_positions_zeros[numElems - 1];
}
// Perform an exclusive scan on the ones (with offset) to position elements with one
thrust::exclusive_scan(h_ones, h_ones + numElems, h_positions_ones, offset);
// Copy position elements to the device memory
unsigned int *d_positions_ones;
unsigned int *d_positions_zeros;
hipMalloc(&d_positions_ones, sizeof(unsigned int) * numElems);
hipMalloc(&d_positions_zeros, sizeof(unsigned int) * numElems);
hipMemcpy(d_positions_zeros, h_positions_zeros, sizeof(unsigned int) * numElems,
hipMemcpyHostToDevice);
hipMemcpy(d_positions_ones, h_positions_ones, sizeof(unsigned int) * numElems,
hipMemcpyHostToDevice);
// Now reorder the elements in cuda, based on our position items
hipLaunchKernelGGL(( reorderElements), dim3(gridSize), dim3(blockSize), 0, 0, d_outputVals, d_outputClassification, d_inputVals, d_inputClassification,
d_positions_zeros, d_positions_ones, mask, numElems);
hipMemcpy(d_inputVals, d_outputVals, sizeof(unsigned int) * numElems,
hipMemcpyDeviceToDevice);
hipMemcpy(d_inputClassification, d_outputClassification, sizeof(unsigned int) * numElems,
hipMemcpyDeviceToDevice);
// Clear all of our allocated memory
hipFree(d_positions_ones);
hipFree(d_positions_zeros);
hipFree(d_ones);
hipFree(d_zeros);
free(h_zeros);
free(h_ones);
free(h_positions_ones);
free(h_positions_zeros);
}
}
void sort(unsigned int* const d_inputVals,
unsigned int* const d_inputClassification,
unsigned int* const d_outputVals,
unsigned int* const d_outputClassification,
const size_t numElems)
{
if (SORT == RADIX_SORT) {
radixSort(d_inputVals, d_inputClassification,
d_outputVals,d_outputClassification,
numElems);
} else if (SORT == THRUST_SORT) {
thrust::device_ptr<unsigned int> t_outputClassification = thrust::device_pointer_cast(d_inputClassification);
thrust::device_ptr<unsigned int> t_outputVals = thrust::device_pointer_cast(d_inputVals);
thrust::sort_by_key(t_outputVals, t_outputVals + numElems, t_outputClassification);
hipMemcpy(d_outputClassification, d_inputClassification, sizeof(float) * numElems, hipMemcpyDeviceToDevice);
hipMemcpy(d_outputVals, d_inputVals, sizeof(float) * numElems, hipMemcpyDeviceToDevice);
}
}
int chooseMajority(int* d_outputClassification, unsigned int length, int numClass) {
int *histogram = new int[numClass];
// Initialize the histogram
for (int i = 0; i < numClass; i++) {
histogram[i] = 0;
}
// Count the values.
for (int i = 0; i < K; i++) {
// Make sure we're not above array bounds
if (i < length) {
histogram[d_outputClassification[i]]++;
}
}
// Find the element of the majority
int maxClass = distance(histogram, max_element(histogram, histogram + numClass));
return maxClass;
}
/*__global__ void block_sum(float *input, float *results, size_t n)
{
extern __shared__ float sdata[];
int i = threadIdx.x + blockDim.x*blockIdx.x;
int tx = threadIdx.x;
// load input into __shared__ memory
float x = 0;
if(i < n) {
x = input[i];
}
sdata[tx] = x;
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(tx < offset)
{
// add a partial sum upstream to our own
sdata[tx] += sdata[tx + offset];
}
__syncthreads();
}
if(tx == 0) {
results[blockIdx.x] =
}
}*/
void parse(int* numAttributes, int* numKnownSamples, int* numClass, int *numUnknowns,
float ** min, float ** max, float ** knowns, int ** classifications,
float ** unknowns, string** unknownNames)
{
ifstream myfile(FILE_NAME, ios::in); // declare and open
int numAttrib, numKnownSamp, numCla, numUn;
myfile >> numKnownSamp >> numAttrib >> numCla >> numUn;
*numAttributes = numAttrib;
*numKnownSamples = numKnownSamp;
*numClass = numCla;
*numUnknowns = numUn;
// Populate all of the mins and maxes
*min = (float*) malloc(sizeof(float) * numAttrib);
*max = (float*) malloc(sizeof(float) * numAttrib);
for (int i = 0; i < numAttrib; i++) {
float currentMax, currentMin;
myfile >> currentMin >> currentMax;
(*min)[i] = currentMin;
(*max)[i] = currentMax;
}
// Populate the known object types
*classifications =(int*) malloc(sizeof(int) * numKnownSamp);
*knowns = (float*) malloc(sizeof(float) * numKnownSamp * numAttrib);
for (int i = 0; i < numKnownSamp; i++) {
int currentClass;
myfile >> currentClass;
(*classifications)[i] = currentClass;
for (int j = 0; j < numAttrib; j++) {
float currentAttrib;
myfile >> currentAttrib;
(*knowns)[i*numAttrib + j] = currentAttrib;
}
}
// Populate the unknown object types
*unknownNames = new string[numUn];
*unknowns = (float*) malloc(sizeof(float) * numUn * numAttrib);
for (int i = 0; i < numUn; i++) {
string currentName;
myfile >> currentName;
(*unknownNames)[i] = currentName;
for (int j = 0; j < numAttrib; j++) {
float currentAttrib;
myfile >> currentAttrib;
(*unknowns)[i*numAttrib + j] = currentAttrib;
}
}
myfile.close();
}
int main() {
unsigned int threadsPerBlock = THREADS_PER_BLOCK;
int numBlocks;
// Metadata about our learning algorithm data
int numAttributes, numKnownSamples, numClass, numUnknowns;
// Data that needs to be sent to the device.
float *h_min, *h_max;
float *h_knowns;
int *h_classifications;
float *h_unknowns;
// Device data
float *d_min, *d_max;
float *d_knowns;
int *d_classifications;
float *d_unknowns;
string *unknownNames;
// Needed for the profiling
std::clock_t start;
std::clock_t kStart;
float totalDuration;
float normalDuration = 0;
float distanceDuration = 0;
float sortDuration = 0;
float majorityDuration = 0;
parse(&numAttributes, &numKnownSamples, &numClass, &numUnknowns,
&h_min, &h_max, &h_knowns, &h_classifications, &h_unknowns, &unknownNames);
start = std::clock();
// Start mallocing the data to the kernel
hipMalloc(&d_min, sizeof(float) * numAttributes);
hipMalloc(&d_max, sizeof(float) * numAttributes);
hipMalloc(&d_knowns, sizeof(float) * numKnownSamples * numAttributes);
hipMalloc(&d_unknowns, sizeof(float) * numUnknowns * numAttributes);
hipMalloc(&d_classifications, sizeof(int) * numKnownSamples);
// Copy the data from the host to the kernel
hipMemcpy(d_min, h_min, sizeof(float) * numAttributes, hipMemcpyHostToDevice);
hipMemcpy(d_max, h_max, sizeof(float) * numAttributes, hipMemcpyHostToDevice);
hipMemcpy(d_knowns, h_knowns, sizeof(float) * numKnownSamples * numAttributes, hipMemcpyHostToDevice);
hipMemcpy(d_unknowns, h_unknowns, sizeof(float) * numUnknowns * numAttributes, hipMemcpyHostToDevice);
hipMemcpy(d_classifications, h_classifications, sizeof(int) * numKnownSamples, hipMemcpyHostToDevice);
kStart = std::clock();
// Normalize the known values
threadsPerBlock = 256;
numBlocks = numAttributes * numKnownSamples / threadsPerBlock + 1;
hipLaunchKernelGGL(( normalize), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_knowns, d_max, d_min,
numAttributes, numKnownSamples);
// Normalize the unknown values
threadsPerBlock = 256;
numBlocks = numAttributes * numKnownSamples / threadsPerBlock + 1;
hipLaunchKernelGGL(( normalize), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_unknowns, d_max, d_min,
numAttributes, numUnknowns);
normalDuration = ( std::clock() - kStart ) / (float) CLOCKS_PER_SEC;
for (int cUn = 0; cUn < numUnknowns; cUn++) {
// Find the distances between the
kStart = std::clock();
float *d_distance;
hipMalloc(&d_distance, sizeof(float) * numKnownSamples);
distances(d_knowns, d_unknowns+cUn*numAttributes, d_distance, numAttributes, numKnownSamples);
distanceDuration += ( std::clock() - kStart ) / (float) CLOCKS_PER_SEC;
/*float *h_distance = (float*) malloc(sizeof(float) * numKnownSamples);
hipMemcpy(h_distance, d_distance, sizeof(float) * numKnownSamples, hipMemcpyDeviceToHost);
for (int i = 0; i < numKnownSamples; i++) {
printf("%f ", h_distance[i]);
}
printf("\n");*/
kStart = std::clock();
int *d_outputClassification;
float *d_outputDistances;
// Perform the sort
hipMalloc(&d_outputClassification, sizeof(int) * numKnownSamples);
hipMalloc(&d_outputDistances, sizeof(float) * numKnownSamples);
sort((unsigned int*) d_distance,
(unsigned int*) d_classifications,
(unsigned int*) d_outputDistances,
(unsigned int*) d_outputClassification,
numKnownSamples);
sortDuration += ( std::clock() - kStart ) / (float) CLOCKS_PER_SEC;
kStart = std::clock();
int *h_outputClassifications = (int*) malloc(sizeof(int) * numKnownSamples);
hipMemcpy(h_outputClassifications, d_outputClassification, sizeof(int) * numKnownSamples, hipMemcpyDeviceToHost);
/*
float *h_outputDistances = (float*) malloc(sizeof(float) * numKnownSamples);
hipMemcpy(h_outputDistances, d_outputDistances, sizeof(float) * numKnownSamples, hipMemcpyDeviceToHost);
for (int i = 0; i < numKnownSamples; i++) {
cout << h_outputClassifications[i] << " " << h_outputDistances[i] << endl;
}*/
//int *h_outputClassifications = (int*) malloc(sizeof(int) * numKnownSamples);
int majority = chooseMajority(h_outputClassifications, numKnownSamples, numClass);
majorityDuration += ( std::clock() - kStart ) / (float) CLOCKS_PER_SEC;
hipMemcpy(h_outputClassifications, d_outputClassification, sizeof(int) * numKnownSamples, hipMemcpyDeviceToHost);
//cout << unknownNames[0] << " " << majority << endl;
hipFree(d_distance);
hipFree(d_outputClassification);
hipFree(d_outputDistances);
free(h_outputClassifications);
}
totalDuration = ( std::clock() - start ) / (float) CLOCKS_PER_SEC;
std::cout<<"total Duration: "<< totalDuration <<'\n';
cout << "normal duraton: " << normalDuration << endl;
cout << "distance duration: " << distanceDuration << endl;
cout << "sort duration: " << sortDuration << endl;
cout << "majority duration: " << majorityDuration << endl ;
}
| 9dd8bf0204bf6eeeb1b4089bd3526e80e2e8c3f8.cu | #include <stdio.h>
#include <algorithm>
#include <cmath>
#include <iostream>
#include <fstream>
#include <ctime>
#include <string>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <thrust/host_vector.h>
#include <thrust/device_ptr.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#define MAX 10
#define MIN 0
#define THREADS_PER_BLOCK 256
#define BITS_IN_BYTE 8
#define FILE_NAME "input1000.txt"
#define K 2
// Different versions for the parallel algorithm
#define RADIX_SORT 0
#define THRUST_SORT 1
#define SORT THRUST_SORT
#define DISTANCE_GATHER 0
#define DISTANCE_MAPREDUCE 1
#define DISTANCE DISTANCE_GATHER
using namespace std;
__global__ void normalize(float * d_input, float *d_max, float *d_min, unsigned int numAttributes,
unsigned int numElems) {
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int attributeIdx = tid % numAttributes;
if(tid < numElems*numAttributes) {
d_input[tid] = (d_input[tid] - d_min[attributeIdx]) / (d_max[attributeIdx] - d_min[attributeIdx]);
}
}
__global__ void findDistanceMap(float *d_inputAttributes, float *d_inputSample, float *d_output, unsigned int numAttributes,
unsigned int numSamples) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < numAttributes * numSamples) {
d_output[tid] = pow(d_inputAttributes[tid] - d_inputSample[tid % numAttributes], 2);
}
}
__global__ void findDistance(float *d_inputAttributes, float *d_inputSample, float *d_output, unsigned int numAttributes,
unsigned int numElems) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float distance = 0;
if (tid < numElems) {
for (int i = 0; i < numAttributes; i++) {
distance += pow(d_inputAttributes[numAttributes*tid + i] - d_inputSample[i], 2);
}
// OPTIMIZATION: We don't have to square root, because if
// there's no point in wasting all of the distance values are squares
d_output[tid] = distance;
}
}
void distances(float *d_knowns, float* d_unknownSample, float *d_distance,
int numAttributes, int numKnownSamples)
{
if (DISTANCE == DISTANCE_GATHER) {
int threadsPerBlock = THREADS_PER_BLOCK;
int numBlocks = numKnownSamples / threadsPerBlock + 1;
findDistance<<<numBlocks, threadsPerBlock>>>(d_knowns, d_unknownSample, d_distance,
numAttributes, numKnownSamples);
} else if (DISTANCE == DISTANCE_MAPREDUCE) {
// Find the distances between the
float *d_distanceMap;
cudaMalloc(&d_distanceMap, sizeof(float) * numKnownSamples * numAttributes);
float *h_distance = (float*) malloc(sizeof(float) * numKnownSamples);
int threadsPerBlock = THREADS_PER_BLOCK;
int numBlocks = numAttributes * numKnownSamples / threadsPerBlock + 1;
findDistanceMap<<<numBlocks, threadsPerBlock>>>(d_knowns, d_unknownSample, d_distanceMap,
numAttributes, numKnownSamples);
thrust::device_ptr<float> t_distanceMap = thrust::device_pointer_cast(d_distanceMap);
for (int i = 0; i < numKnownSamples; i++) {
h_distance[i] = thrust::reduce(t_distanceMap+(i*numAttributes), t_distanceMap+(i+1)*numAttributes, 0.0);
}
cudaMemcpy(d_distance, h_distance, sizeof(float) * numKnownSamples, cudaMemcpyHostToDevice);
cudaFree(d_distanceMap);
free(h_distance);
}
}
// RADIX Sort helper function
// Map Ones and Zeros
__global__
void mapOnesZeros(unsigned int* const d_ones, unsigned int* const d_zeros, const unsigned int* const d_inputVals,
const unsigned int mask, const size_t numElems) {
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// Check if we're outside the bounds of the array
if (myId < numElems) {
if ((d_inputVals[myId] & mask) == 0) {
d_zeros[myId] = 1;
d_ones[myId] = 0;
} else {
d_zeros[myId] = 0;
d_ones[myId] = 1;
}
}
}
// Reorder elements based on their generated positions
__global__
void reorderElements(unsigned int* const d_outputVals, unsigned int* const d_outputClassification,
const unsigned int* const d_inputVals, const unsigned int* const d_inputClassification, const unsigned int* const d_positions_zeros,
const unsigned int* const d_positions_ones, const unsigned int mask, const size_t numElems) {
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// Check if we're outside the bounds of the array
if (myId < numElems) {
// Based on if the digit is zero or one depends on which position values
if ((d_inputVals[myId] & mask) == 0) {
d_outputVals[d_positions_zeros[myId]] = d_inputVals[myId];
d_outputClassification[d_positions_zeros[myId]] = d_inputClassification[myId];
} else {
d_outputVals[d_positions_ones[myId]] = d_inputVals[myId];
d_outputClassification[d_positions_ones[myId]] = d_inputClassification[myId];
}
}
}
void radixSort(unsigned int* const d_inputVals,
unsigned int* const d_inputClassification,
unsigned int* const d_outputVals,
unsigned int* const d_outputClassification,
const size_t numElems)
{
// Set the proper grid size and block size for this problem.
int blockSize = THREADS_PER_BLOCK;
int gridSize = numElems / blockSize + 1;
// Iterate over the number of bits in the unsigned int.
for (unsigned int i = 0; i < (sizeof(unsigned int) * BITS_IN_BYTE); i++) {
unsigned int *d_zeros;
unsigned int *d_ones;
cudaMalloc(&d_zeros, sizeof(unsigned int) * numElems);
cudaMalloc(&d_ones, sizeof(unsigned int) * numElems);
// Choose which digit to check currently for our radix
unsigned int mask = 1U << i;
// Find out which digits end in 0, and which digits end in 1
mapOnesZeros<<<gridSize, blockSize>>>(d_ones, d_zeros, d_inputVals, mask, numElems);
// Thrust requires us to copy the memory from Cuda to the host for
// processing.
unsigned int *h_zeros = (unsigned int *) malloc(sizeof(unsigned int) * numElems);
unsigned int *h_ones = (unsigned int *) malloc(sizeof(unsigned int) * numElems);
unsigned int *h_positions_zeros = (unsigned int *) malloc(sizeof(unsigned int) * numElems);
unsigned int *h_positions_ones = (unsigned int *) malloc(sizeof(unsigned int) * numElems);
cudaMemcpy(h_zeros, d_zeros, sizeof(unsigned int) * numElems,
cudaMemcpyDeviceToHost);
cudaMemcpy(h_ones, d_ones, sizeof(unsigned int) * numElems,
cudaMemcpyDeviceToHost);
// Perform an exclusive scan on zeros to determine the position of elements with zero
thrust::exclusive_scan(h_zeros, h_zeros + numElems, h_positions_zeros, 0);
// Determine the position offset to shift the ones positions by
// If the last element's digit is a zero, then it's the last element of d_positions_zeros
// Otherwise, it's just the (last element of the d_positions_zeros array + 1)
unsigned int offset;
if (h_zeros[numElems - 1] == 1) {
offset = h_positions_zeros[numElems - 1] + 1;
} else {
offset = h_positions_zeros[numElems - 1];
}
// Perform an exclusive scan on the ones (with offset) to position elements with one
thrust::exclusive_scan(h_ones, h_ones + numElems, h_positions_ones, offset);
// Copy position elements to the device memory
unsigned int *d_positions_ones;
unsigned int *d_positions_zeros;
cudaMalloc(&d_positions_ones, sizeof(unsigned int) * numElems);
cudaMalloc(&d_positions_zeros, sizeof(unsigned int) * numElems);
cudaMemcpy(d_positions_zeros, h_positions_zeros, sizeof(unsigned int) * numElems,
cudaMemcpyHostToDevice);
cudaMemcpy(d_positions_ones, h_positions_ones, sizeof(unsigned int) * numElems,
cudaMemcpyHostToDevice);
// Now reorder the elements in cuda, based on our position items
reorderElements<<<gridSize, blockSize>>>(d_outputVals, d_outputClassification, d_inputVals, d_inputClassification,
d_positions_zeros, d_positions_ones, mask, numElems);
cudaMemcpy(d_inputVals, d_outputVals, sizeof(unsigned int) * numElems,
cudaMemcpyDeviceToDevice);
cudaMemcpy(d_inputClassification, d_outputClassification, sizeof(unsigned int) * numElems,
cudaMemcpyDeviceToDevice);
// Clear all of our allocated memory
cudaFree(d_positions_ones);
cudaFree(d_positions_zeros);
cudaFree(d_ones);
cudaFree(d_zeros);
free(h_zeros);
free(h_ones);
free(h_positions_ones);
free(h_positions_zeros);
}
}
void sort(unsigned int* const d_inputVals,
unsigned int* const d_inputClassification,
unsigned int* const d_outputVals,
unsigned int* const d_outputClassification,
const size_t numElems)
{
if (SORT == RADIX_SORT) {
radixSort(d_inputVals, d_inputClassification,
d_outputVals,d_outputClassification,
numElems);
} else if (SORT == THRUST_SORT) {
thrust::device_ptr<unsigned int> t_outputClassification = thrust::device_pointer_cast(d_inputClassification);
thrust::device_ptr<unsigned int> t_outputVals = thrust::device_pointer_cast(d_inputVals);
thrust::sort_by_key(t_outputVals, t_outputVals + numElems, t_outputClassification);
cudaMemcpy(d_outputClassification, d_inputClassification, sizeof(float) * numElems, cudaMemcpyDeviceToDevice);
cudaMemcpy(d_outputVals, d_inputVals, sizeof(float) * numElems, cudaMemcpyDeviceToDevice);
}
}
int chooseMajority(int* d_outputClassification, unsigned int length, int numClass) {
int *histogram = new int[numClass];
// Initialize the histogram
for (int i = 0; i < numClass; i++) {
histogram[i] = 0;
}
// Count the values.
for (int i = 0; i < K; i++) {
// Make sure we're not above array bounds
if (i < length) {
histogram[d_outputClassification[i]]++;
}
}
// Find the element of the majority
int maxClass = distance(histogram, max_element(histogram, histogram + numClass));
return maxClass;
}
/*__global__ void block_sum(float *input, float *results, size_t n)
{
extern __shared__ float sdata[];
int i = threadIdx.x + blockDim.x*blockIdx.x;
int tx = threadIdx.x;
// load input into __shared__ memory
float x = 0;
if(i < n) {
x = input[i];
}
sdata[tx] = x;
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(tx < offset)
{
// add a partial sum upstream to our own
sdata[tx] += sdata[tx + offset];
}
__syncthreads();
}
if(tx == 0) {
results[blockIdx.x] =
}
}*/
void parse(int* numAttributes, int* numKnownSamples, int* numClass, int *numUnknowns,
float ** min, float ** max, float ** knowns, int ** classifications,
float ** unknowns, string** unknownNames)
{
ifstream myfile(FILE_NAME, ios::in); // declare and open
int numAttrib, numKnownSamp, numCla, numUn;
myfile >> numKnownSamp >> numAttrib >> numCla >> numUn;
*numAttributes = numAttrib;
*numKnownSamples = numKnownSamp;
*numClass = numCla;
*numUnknowns = numUn;
// Populate all of the mins and maxes
*min = (float*) malloc(sizeof(float) * numAttrib);
*max = (float*) malloc(sizeof(float) * numAttrib);
for (int i = 0; i < numAttrib; i++) {
float currentMax, currentMin;
myfile >> currentMin >> currentMax;
(*min)[i] = currentMin;
(*max)[i] = currentMax;
}
// Populate the known object types
*classifications =(int*) malloc(sizeof(int) * numKnownSamp);
*knowns = (float*) malloc(sizeof(float) * numKnownSamp * numAttrib);
for (int i = 0; i < numKnownSamp; i++) {
int currentClass;
myfile >> currentClass;
(*classifications)[i] = currentClass;
for (int j = 0; j < numAttrib; j++) {
float currentAttrib;
myfile >> currentAttrib;
(*knowns)[i*numAttrib + j] = currentAttrib;
}
}
// Populate the unknown object types
*unknownNames = new string[numUn];
*unknowns = (float*) malloc(sizeof(float) * numUn * numAttrib);
for (int i = 0; i < numUn; i++) {
string currentName;
myfile >> currentName;
(*unknownNames)[i] = currentName;
for (int j = 0; j < numAttrib; j++) {
float currentAttrib;
myfile >> currentAttrib;
(*unknowns)[i*numAttrib + j] = currentAttrib;
}
}
myfile.close();
}
int main() {
unsigned int threadsPerBlock = THREADS_PER_BLOCK;
int numBlocks;
// Metadata about our learning algorithm data
int numAttributes, numKnownSamples, numClass, numUnknowns;
// Data that needs to be sent to the device.
float *h_min, *h_max;
float *h_knowns;
int *h_classifications;
float *h_unknowns;
// Device data
float *d_min, *d_max;
float *d_knowns;
int *d_classifications;
float *d_unknowns;
string *unknownNames;
// Needed for the profiling
std::clock_t start;
std::clock_t kStart;
float totalDuration;
float normalDuration = 0;
float distanceDuration = 0;
float sortDuration = 0;
float majorityDuration = 0;
parse(&numAttributes, &numKnownSamples, &numClass, &numUnknowns,
&h_min, &h_max, &h_knowns, &h_classifications, &h_unknowns, &unknownNames);
start = std::clock();
// Start mallocing the data to the kernel
cudaMalloc(&d_min, sizeof(float) * numAttributes);
cudaMalloc(&d_max, sizeof(float) * numAttributes);
cudaMalloc(&d_knowns, sizeof(float) * numKnownSamples * numAttributes);
cudaMalloc(&d_unknowns, sizeof(float) * numUnknowns * numAttributes);
cudaMalloc(&d_classifications, sizeof(int) * numKnownSamples);
// Copy the data from the host to the kernel
cudaMemcpy(d_min, h_min, sizeof(float) * numAttributes, cudaMemcpyHostToDevice);
cudaMemcpy(d_max, h_max, sizeof(float) * numAttributes, cudaMemcpyHostToDevice);
cudaMemcpy(d_knowns, h_knowns, sizeof(float) * numKnownSamples * numAttributes, cudaMemcpyHostToDevice);
cudaMemcpy(d_unknowns, h_unknowns, sizeof(float) * numUnknowns * numAttributes, cudaMemcpyHostToDevice);
cudaMemcpy(d_classifications, h_classifications, sizeof(int) * numKnownSamples, cudaMemcpyHostToDevice);
kStart = std::clock();
// Normalize the known values
threadsPerBlock = 256;
numBlocks = numAttributes * numKnownSamples / threadsPerBlock + 1;
normalize<<<numBlocks, threadsPerBlock>>>(d_knowns, d_max, d_min,
numAttributes, numKnownSamples);
// Normalize the unknown values
threadsPerBlock = 256;
numBlocks = numAttributes * numKnownSamples / threadsPerBlock + 1;
normalize<<<numBlocks, threadsPerBlock>>>(d_unknowns, d_max, d_min,
numAttributes, numUnknowns);
normalDuration = ( std::clock() - kStart ) / (float) CLOCKS_PER_SEC;
for (int cUn = 0; cUn < numUnknowns; cUn++) {
// Find the distances between the
kStart = std::clock();
float *d_distance;
cudaMalloc(&d_distance, sizeof(float) * numKnownSamples);
distances(d_knowns, d_unknowns+cUn*numAttributes, d_distance, numAttributes, numKnownSamples);
distanceDuration += ( std::clock() - kStart ) / (float) CLOCKS_PER_SEC;
/*float *h_distance = (float*) malloc(sizeof(float) * numKnownSamples);
cudaMemcpy(h_distance, d_distance, sizeof(float) * numKnownSamples, cudaMemcpyDeviceToHost);
for (int i = 0; i < numKnownSamples; i++) {
printf("%f ", h_distance[i]);
}
printf("\n");*/
kStart = std::clock();
int *d_outputClassification;
float *d_outputDistances;
// Perform the sort
cudaMalloc(&d_outputClassification, sizeof(int) * numKnownSamples);
cudaMalloc(&d_outputDistances, sizeof(float) * numKnownSamples);
sort((unsigned int*) d_distance,
(unsigned int*) d_classifications,
(unsigned int*) d_outputDistances,
(unsigned int*) d_outputClassification,
numKnownSamples);
sortDuration += ( std::clock() - kStart ) / (float) CLOCKS_PER_SEC;
kStart = std::clock();
int *h_outputClassifications = (int*) malloc(sizeof(int) * numKnownSamples);
cudaMemcpy(h_outputClassifications, d_outputClassification, sizeof(int) * numKnownSamples, cudaMemcpyDeviceToHost);
/*
float *h_outputDistances = (float*) malloc(sizeof(float) * numKnownSamples);
cudaMemcpy(h_outputDistances, d_outputDistances, sizeof(float) * numKnownSamples, cudaMemcpyDeviceToHost);
for (int i = 0; i < numKnownSamples; i++) {
cout << h_outputClassifications[i] << " " << h_outputDistances[i] << endl;
}*/
//int *h_outputClassifications = (int*) malloc(sizeof(int) * numKnownSamples);
int majority = chooseMajority(h_outputClassifications, numKnownSamples, numClass);
majorityDuration += ( std::clock() - kStart ) / (float) CLOCKS_PER_SEC;
cudaMemcpy(h_outputClassifications, d_outputClassification, sizeof(int) * numKnownSamples, cudaMemcpyDeviceToHost);
//cout << unknownNames[0] << " " << majority << endl;
cudaFree(d_distance);
cudaFree(d_outputClassification);
cudaFree(d_outputDistances);
free(h_outputClassifications);
}
totalDuration = ( std::clock() - start ) / (float) CLOCKS_PER_SEC;
std::cout<<"total Duration: "<< totalDuration <<'\n';
cout << "normal duraton: " << normalDuration << endl;
cout << "distance duration: " << distanceDuration << endl;
cout << "sort duration: " << sortDuration << endl;
cout << "majority duration: " << majorityDuration << endl ;
}
|
ef063e4efe1ebdbdb682ba1ac2415ca2ee74206d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <inttypes.h>
#include <chrono>
#include <string.h>
#include <fstream>
#include <iostream>
using namespace std;
#define RANGE 4
#define ITER 10000
#define dr 1
#define dt 0.2
//DEVICE COPIES TO BE ALLOCATED DYNAMICALLY
void printDevProp(hipDeviceProp_t devProp)
{
printf("%s\n", devProp.name);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Total global memory: %zu", devProp.totalGlobalMem);
printf("bytes\n");
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Total amount of shared memory per block: %zu\n",devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %zu\n", devProp.memPitch);
printf("Total amount of constant memory: %zu\n", devProp.totalConstMem);
}
void device_list()
{
//get Devices
int nDevices;
/*Hard Set n
int nDevices = 1;
*/
hipGetDeviceCount(&nDevices);
//Device list and properties
for (int i = 0; i < nDevices; i++)
{
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printDevProp(prop);
printf("Device Number: %d\n", i);
printf("Device name: %s\n", prop.name);
printf("Memory Clock Rate (KHz): %d\n",prop.memoryClockRate);
printf("Memory Bus Width (bits): %d\n",prop.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n\n",2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
void display(double* field)
{
for(int i=0;i<RANGE;i++)
{
for(int j=0;j<RANGE;j++)
{
for(int k=0;k<RANGE;k++)
{
std::cout<<*(field+i+RANGE*j+RANGE*RANGE*k)<<" ";
}
std::cout<<"\n";
}
std::cout<<"\n\n";
}
}
void DDX(double *R, double *C,int X,int Y,int Z,double dx)
{
for(int j=1;j<Y-1;j++)
{
for(int k=1;k<Z-1;k++)
{
for(int i=1;i<X-1;i++)
{
*(R + i + j*X + k*X*Y) = (*(C + 1 + i + j*X + k*X*Y) + *(C-1 + i + j*X + k*X*Y) - 2* *(C + i + j*X + k*X*Y))/(dx);
}
}
}
}
void DDY(double* R, double* C,int X,int Y,int Z, double dy)
{
for(int j=1;j<Y-1;j++)
{
for(int k=1;k<Z-1;k++)
{
for(int i=1;i<X-1;i++)
{
*(R+i*X+j*X+k*X*Y) = (*(C+X*(i+1)+j*X+k*X*Y) + *(C+X*(i-1)+j*X+k*X*Y) - 2* *(C+i*X+j*X+k*X*Y))/(dy);
}
}
}
}
void DDZ(double* R, double* C,int X,int Y,int Z, double dz)
{
for(int j=1;j<Y-1;j++)
{
for(int k=1;k<Z-1;k++)
{
for(int i=1;i<X-1;i++)
{
*(R+i*X*Y+j*X+k*X*Y) = (*(C+X*Y*(i+1)+j*X+k*X*Y) + *(C+X*Y*(i-1)+j*X+k*X*Y)- 2* *(C+i*X*Y)+j*X+k*X*Y)/(dz);
}
}
}
}
void ADD(double* field,double* temp,int X,int Y,int Z,double dtt)
{
for(int i=1;i<X-1;i++)
{
for(int j=1;j<Y-1;j++)
{
for(int k=1;k<Z-1;k++)
{
*(field + i + Y*j + Y*Z*k) += *(temp + i + Y*j + Y*Z*k) * dtt;
}
}
}
}
void set(double* field)
{
for(int i=0;i<RANGE;i++)
{
for(int j=0;j<RANGE;j++)
{
for(int k=0;k<RANGE;k++)
{
*(field+i+RANGE*j+RANGE*RANGE*k) = i+j+k;
}
}
}
}
//Linear cpu time
int main()
{
unsigned long size = sizeof(double)*RANGE*RANGE*RANGE;
//ALL HOST TESTING COPIES COPIUM
double tol = 0.002;
std::cout<<size<<"\n";
std::cout<<"Part 1\n";
double *field = (double*)malloc(size);
double *temp_field = (double*)malloc(size);
set(field);
set(temp_field);
//Second Derivatives
double *ddx = (double*)malloc(size);
double *ddy = (double*)malloc(size);
double *ddz = (double*)malloc(size);
std::cout<<"Part 2\n";
double err_max = 10.0;
double cur_err_max = 0.0;
int loopctr = 0;
auto hst_st = std::chrono::high_resolution_clock::now();
std::cout<<"Part 3\n";
display(field);
while(err_max>tol)
{
cur_err_max = 0.;
loopctr++;
DDX(ddx,field,RANGE,RANGE,RANGE,4.);
DDY(ddy,field,RANGE,RANGE,RANGE,4.);
DDZ(ddz,field,RANGE,RANGE,RANGE,4.);
std::cout<<"Part 4\n";
ADD(temp_field,ddx,RANGE,RANGE,RANGE,0.4);
ADD(temp_field,ddx,RANGE,RANGE,RANGE,0.4);
ADD(temp_field,ddx,RANGE,RANGE,RANGE,0.4);
display(field);
std::cout<<"Part 5\n";
for(int i=1;i<RANGE-1;i++)
{
for(int j=1;j<RANGE-1;j++)
{
for(int k=1;k<RANGE-1;k++)
{
if(cur_err_max<abs(*(temp_field+i+RANGE*j+RANGE*RANGE*k) - *(field+i+RANGE*j+RANGE*RANGE*k)))
{
cur_err_max = abs(*(temp_field+i+RANGE*j+RANGE*RANGE*k) - *(field+i+RANGE*j+RANGE*RANGE*k));
*(temp_field+i+RANGE*j+RANGE*RANGE*k) = *(field+i+RANGE*j+RANGE*RANGE*k);
}
}
}
}
std::cout<<loopctr<<" "<<cur_err_max<<"\n";
err_max = cur_err_max;
}
free(temp_field);
free(temp);
free(ddx);
free(ddy);
free(ddz);
auto hst_en = std::chrono::high_resolution_clock::now();
std::chrono::duration<float> duration = hst_en-hst_st;
std::cout<<"Duration: "<<duration.count()<<"\n";
std::cout<<"With "<<loopctr<<" loops\n\n";
std::cout<<"Error: "<<err_max<<"\n";
device_list();
//display(field);
return 0;
} | ef063e4efe1ebdbdb682ba1ac2415ca2ee74206d.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <inttypes.h>
#include <chrono>
#include <string.h>
#include <fstream>
#include <iostream>
using namespace std;
#define RANGE 4
#define ITER 10000
#define dr 1
#define dt 0.2
//DEVICE COPIES TO BE ALLOCATED DYNAMICALLY
void printDevProp(cudaDeviceProp devProp)
{
printf("%s\n", devProp.name);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Total global memory: %zu", devProp.totalGlobalMem);
printf("bytes\n");
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Total amount of shared memory per block: %zu\n",devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %zu\n", devProp.memPitch);
printf("Total amount of constant memory: %zu\n", devProp.totalConstMem);
}
void device_list()
{
//get Devices
int nDevices;
/*Hard Set n
int nDevices = 1;
*/
cudaGetDeviceCount(&nDevices);
//Device list and properties
for (int i = 0; i < nDevices; i++)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printDevProp(prop);
printf("Device Number: %d\n", i);
printf("Device name: %s\n", prop.name);
printf("Memory Clock Rate (KHz): %d\n",prop.memoryClockRate);
printf("Memory Bus Width (bits): %d\n",prop.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n\n",2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
void display(double* field)
{
for(int i=0;i<RANGE;i++)
{
for(int j=0;j<RANGE;j++)
{
for(int k=0;k<RANGE;k++)
{
std::cout<<*(field+i+RANGE*j+RANGE*RANGE*k)<<" ";
}
std::cout<<"\n";
}
std::cout<<"\n\n";
}
}
void DDX(double *R, double *C,int X,int Y,int Z,double dx)
{
for(int j=1;j<Y-1;j++)
{
for(int k=1;k<Z-1;k++)
{
for(int i=1;i<X-1;i++)
{
*(R + i + j*X + k*X*Y) = (*(C + 1 + i + j*X + k*X*Y) + *(C-1 + i + j*X + k*X*Y) - 2* *(C + i + j*X + k*X*Y))/(dx);
}
}
}
}
void DDY(double* R, double* C,int X,int Y,int Z, double dy)
{
for(int j=1;j<Y-1;j++)
{
for(int k=1;k<Z-1;k++)
{
for(int i=1;i<X-1;i++)
{
*(R+i*X+j*X+k*X*Y) = (*(C+X*(i+1)+j*X+k*X*Y) + *(C+X*(i-1)+j*X+k*X*Y) - 2* *(C+i*X+j*X+k*X*Y))/(dy);
}
}
}
}
void DDZ(double* R, double* C,int X,int Y,int Z, double dz)
{
for(int j=1;j<Y-1;j++)
{
for(int k=1;k<Z-1;k++)
{
for(int i=1;i<X-1;i++)
{
*(R+i*X*Y+j*X+k*X*Y) = (*(C+X*Y*(i+1)+j*X+k*X*Y) + *(C+X*Y*(i-1)+j*X+k*X*Y)- 2* *(C+i*X*Y)+j*X+k*X*Y)/(dz);
}
}
}
}
void ADD(double* field,double* temp,int X,int Y,int Z,double dtt)
{
for(int i=1;i<X-1;i++)
{
for(int j=1;j<Y-1;j++)
{
for(int k=1;k<Z-1;k++)
{
*(field + i + Y*j + Y*Z*k) += *(temp + i + Y*j + Y*Z*k) * dtt;
}
}
}
}
void set(double* field)
{
for(int i=0;i<RANGE;i++)
{
for(int j=0;j<RANGE;j++)
{
for(int k=0;k<RANGE;k++)
{
*(field+i+RANGE*j+RANGE*RANGE*k) = i+j+k;
}
}
}
}
//Linear cpu time
int main()
{
unsigned long size = sizeof(double)*RANGE*RANGE*RANGE;
//ALL HOST TESTING COPIES COPIUM
double tol = 0.002;
std::cout<<size<<"\n";
std::cout<<"Part 1\n";
double *field = (double*)malloc(size);
double *temp_field = (double*)malloc(size);
set(field);
set(temp_field);
//Second Derivatives
double *ddx = (double*)malloc(size);
double *ddy = (double*)malloc(size);
double *ddz = (double*)malloc(size);
std::cout<<"Part 2\n";
double err_max = 10.0;
double cur_err_max = 0.0;
int loopctr = 0;
auto hst_st = std::chrono::high_resolution_clock::now();
std::cout<<"Part 3\n";
display(field);
while(err_max>tol)
{
cur_err_max = 0.;
loopctr++;
DDX(ddx,field,RANGE,RANGE,RANGE,4.);
DDY(ddy,field,RANGE,RANGE,RANGE,4.);
DDZ(ddz,field,RANGE,RANGE,RANGE,4.);
std::cout<<"Part 4\n";
ADD(temp_field,ddx,RANGE,RANGE,RANGE,0.4);
ADD(temp_field,ddx,RANGE,RANGE,RANGE,0.4);
ADD(temp_field,ddx,RANGE,RANGE,RANGE,0.4);
display(field);
std::cout<<"Part 5\n";
for(int i=1;i<RANGE-1;i++)
{
for(int j=1;j<RANGE-1;j++)
{
for(int k=1;k<RANGE-1;k++)
{
if(cur_err_max<abs(*(temp_field+i+RANGE*j+RANGE*RANGE*k) - *(field+i+RANGE*j+RANGE*RANGE*k)))
{
cur_err_max = abs(*(temp_field+i+RANGE*j+RANGE*RANGE*k) - *(field+i+RANGE*j+RANGE*RANGE*k));
*(temp_field+i+RANGE*j+RANGE*RANGE*k) = *(field+i+RANGE*j+RANGE*RANGE*k);
}
}
}
}
std::cout<<loopctr<<" "<<cur_err_max<<"\n";
err_max = cur_err_max;
}
free(temp_field);
free(temp);
free(ddx);
free(ddy);
free(ddz);
auto hst_en = std::chrono::high_resolution_clock::now();
std::chrono::duration<float> duration = hst_en-hst_st;
std::cout<<"Duration: "<<duration.count()<<"\n";
std::cout<<"With "<<loopctr<<" loops\n\n";
std::cout<<"Error: "<<err_max<<"\n";
device_list();
//display(field);
return 0;
} |
e0deea8cad92b1b4cd05e93834afded876c530a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Alyxandra Spikerman
* High Perfomance Computing
* Homework 5 - Question 2
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
// CUDA kernel
__global__ void getCount(int* values, int* histogram, int N, int CLASSES) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// we're working with a data range of 1000, so to get the class size, we need to divide the range by
// the number of classes
double CLASS_SIZE = 1000 / (double)CLASSES;
for (int i = id; i < N; i += stride) {
atomicAdd(&histogram[(int)ceil(values[i] / CLASS_SIZE) - 1], 1); // atomically add a value to the right class
}
}
int main(int argc, char* argv[] ) {
int N, CLASSES;
if (argc == 3) {
N = atoi(argv[1]);
CLASSES = atoi(argv[2]);
printf("N = %d\nClasses= %d\n", N, CLASSES);
} else {
printf("Error: must input 2 arguments, <N> <# of classes>\n");
return 1;
}
// how to do hipMalloc, hipMemcpy supplied by provided Oak Ridge National Labs code
size_t total_bytes = CLASSES * sizeof(int);
// create the histogram
int* h_histogram = (int*)malloc(total_bytes);
int* d_histogram;
hipMalloc(&d_histogram, total_bytes);
for (int i = 0; i < CLASSES; i++){
h_histogram[i] = 0; // initalize the host histogram values
}
size_t N_bytes = N * sizeof(int);
srand(150);
int* h_values = (int*)malloc(N_bytes);
int* d_values;
hipMalloc(&d_values, N_bytes);
for (int i = 0; i < N; i++) {
h_values[i] = (rand() % 1000) + 1; // calculate the values
}
hipMemcpy(d_histogram, h_histogram, total_bytes, hipMemcpyHostToDevice); // copy histogram to device
hipMemcpy(d_values, h_values, N_bytes, hipMemcpyHostToDevice); // copy values to device
time_t begin = time(NULL);
printf("\nStart kernel\n\n");
hipLaunchKernelGGL(( getCount), dim3(N / 128), dim3(128) , 0, 0, d_values, d_histogram, N, CLASSES); // Execute the kernel
hipDeviceSynchronize(); // wait for everything to finish before accessing
time_t end = time(NULL);
hipMemcpy(h_histogram, d_histogram, total_bytes, hipMemcpyDeviceToHost); // Copy histogram to host
printf("Total histogram values for %d classes\n", CLASSES);
for (int i = 0; i < CLASSES; i++) {
printf("Class %d: %d \n", i, h_histogram[i]);
}
printf("Parallel Time = %f\n", end-begin);
// free allocated memory
hipFree(d_values);
hipFree(d_histogram);
free(h_values);
free(h_histogram);
return 0;
}
| e0deea8cad92b1b4cd05e93834afded876c530a7.cu | /*
* Alyxandra Spikerman
* High Perfomance Computing
* Homework 5 - Question 2
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
// CUDA kernel
__global__ void getCount(int* values, int* histogram, int N, int CLASSES) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// we're working with a data range of 1000, so to get the class size, we need to divide the range by
// the number of classes
double CLASS_SIZE = 1000 / (double)CLASSES;
for (int i = id; i < N; i += stride) {
atomicAdd(&histogram[(int)ceil(values[i] / CLASS_SIZE) - 1], 1); // atomically add a value to the right class
}
}
int main(int argc, char* argv[] ) {
int N, CLASSES;
if (argc == 3) {
N = atoi(argv[1]);
CLASSES = atoi(argv[2]);
printf("N = %d\nClasses= %d\n", N, CLASSES);
} else {
printf("Error: must input 2 arguments, <N> <# of classes>\n");
return 1;
}
// how to do cudaMalloc, cudaMemcpy supplied by provided Oak Ridge National Labs code
size_t total_bytes = CLASSES * sizeof(int);
// create the histogram
int* h_histogram = (int*)malloc(total_bytes);
int* d_histogram;
cudaMalloc(&d_histogram, total_bytes);
for (int i = 0; i < CLASSES; i++){
h_histogram[i] = 0; // initalize the host histogram values
}
size_t N_bytes = N * sizeof(int);
srand(150);
int* h_values = (int*)malloc(N_bytes);
int* d_values;
cudaMalloc(&d_values, N_bytes);
for (int i = 0; i < N; i++) {
h_values[i] = (rand() % 1000) + 1; // calculate the values
}
cudaMemcpy(d_histogram, h_histogram, total_bytes, cudaMemcpyHostToDevice); // copy histogram to device
cudaMemcpy(d_values, h_values, N_bytes, cudaMemcpyHostToDevice); // copy values to device
time_t begin = time(NULL);
printf("\nStart kernel\n\n");
getCount<<< N / 128, 128 >>>(d_values, d_histogram, N, CLASSES); // Execute the kernel
cudaDeviceSynchronize(); // wait for everything to finish before accessing
time_t end = time(NULL);
cudaMemcpy(h_histogram, d_histogram, total_bytes, cudaMemcpyDeviceToHost); // Copy histogram to host
printf("Total histogram values for %d classes\n", CLASSES);
for (int i = 0; i < CLASSES; i++) {
printf("Class %d: %d \n", i, h_histogram[i]);
}
printf("Parallel Time = %f\n", end-begin);
// free allocated memory
cudaFree(d_values);
cudaFree(d_histogram);
free(h_values);
free(h_histogram);
return 0;
}
|
bb126e9da165e27214df9f2c6726f56f91fd3cc1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION &
AFFILIATES. All rights reserved. SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/plugin/group_norm_op_plugin.h"
#include "paddle/phi/kernels/group_norm_kernel.h"
#include <hipcub/hipcub.hpp>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
using DataLayout = phi::DataLayout;
static inline int32_t divUp(int32_t m, int32_t n) { return (m + n - 1) / n; }
static inline __device__ __host__ float sigmoid(float x) {
return 1.F / (1.F + expf(-x));
}
struct GroupSums {
// Is it the 1st element of the group?
int32_t flag;
// The sum.
float sum;
// The sum of squares.
float sumSq;
};
struct GroupSumsOp {
inline __device__ GroupSums operator()(GroupSums const &a,
GroupSums const &b) {
GroupSums dst;
dst.sum = b.flag ? b.sum : (a.sum + b.sum);
dst.sumSq = b.flag ? b.sumSq : (a.sumSq + b.sumSq);
dst.flag = a.flag + b.flag;
return dst;
}
};
static int32_t findMaxDivisor(int32_t n, int32_t maxAllowedDivisor) {
int32_t maxDivisor = -1;
for (int32_t i = 1; i <= std::sqrt(n); i++) {
if (n % i == 0) {
int32_t divisor1 = n / i;
int32_t divisor2 = i;
if (divisor1 > maxDivisor && divisor1 < maxAllowedDivisor) {
maxDivisor = divisor1;
}
if (divisor2 > maxDivisor && divisor2 < maxAllowedDivisor) {
maxDivisor = divisor2;
}
}
}
return maxDivisor;
}
template <int tTHREADS_PER_BLOCK>
__global__ void groupNormNHWCSumKernel(const GroupNormNHWCParams params) {
// The object in charge of doing the sums for the different blocks.
typedef hipcub::BlockScan<GroupSums, tTHREADS_PER_BLOCK> BlockScan;
// Allocate shared memory for BlockScan.
__shared__ typename BlockScan::TempStorage tempStorage;
// Allocate shared memory for the groups. We could reduce the amount of shared
// memory reserved.
__shared__ float2 smem[tTHREADS_PER_BLOCK];
// The instance in the batch.
int32_t ni = blockIdx.z;
// The channel loaded by that thread (2 channels per thread for F16x2).
int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2;
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
// The sums.
float sum = 0.F;
float sumSq = 0.F;
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The offset.
int64_t offset = static_cast<int64_t>(ni) * params.hwc +
static_cast<int64_t>(hwi) * params.c + ci;
// Fetch two channels per thread.
__half2 h2(0, 0);
if (ci < params.c) {
h2 = *reinterpret_cast<__half2 const *>(¶ms.srcX[offset]);
}
// Extract the two half values.
float2 f2 = __half22float2(h2);
// Update the sum.
sum += f2.x + f2.y;
// Update the sum of squares.
sumSq += f2.x * f2.x + f2.y * f2.y;
}
// The group that thread works on and the channel in the group (modulus).
int32_t gi = threadIdx.x * 2 / params.cPerGroup;
int32_t cj = threadIdx.x * 2 - params.cPerGroup * gi;
// The data for the summations.
GroupSums inp{cj == 0 ? 1 : 0, sum, sumSq};
// Do the segmented scan.
GroupSums out;
BlockScan(tempStorage).InclusiveScan(inp, out, GroupSumsOp());
// Store the results for the groups in shared memory (to produce coalesced
// stores later).
// 2 channels per thread
if (cj == params.cPerGroup - 2) {
smem[gi] = make_float2(out.sum, out.sumSq);
}
// Make sure the data is in shared memory.
__syncthreads();
// The global group index.
int32_t gj = blockIdx.x * params.groupsPerBlock + threadIdx.x;
// Threads that have nothing left to do, exit.
if (threadIdx.x >= params.groupsPerBlock || gj >= params.groups) {
return;
}
// The first threads (those storing to global memory, load the values).
float2 sums = smem[threadIdx.x];
// Store to global memory.
atomicAdd(¶ms.redBuffer[(2 * ni + 0) * params.groups + gj], sums.x);
atomicAdd(¶ms.redBuffer[(2 * ni + 1) * params.groups + gj], sums.y);
}
void groupNormNHWCSum(const GroupNormNHWCParams ¶ms, hipStream_t stream) {
dim3 grid;
// The number of blocks to compute all the channels.
grid.x = params.c / params.cPerBlock;
// The number of blocks to compute all the activations in a given instance.
grid.y = divUp(params.hw, params.hwPerBlock);
// The number of instances.
grid.z = params.n;
switch (params.cPerBlock) {
case 320:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<160>), dim3(grid), dim3(160), 0, stream, params);
break;
case 480:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<256>), dim3(grid), dim3(256), 0, stream, params);
break;
case 256:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<128>), dim3(grid), dim3(128), 0, stream, params);
break;
case 128:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<64>), dim3(grid), dim3(64), 0, stream, params);
break;
case 8:
hipLaunchKernelGGL(( groupNormNHWCSumKernel<4>), dim3(grid), dim3(4), 0, stream, params);
break;
default:
PADDLE_THROW(platform::errors::Fatal(
"The function groupNormNHWCSum of GroupNormPlugin TRT Plugin "
"encounter error"));
}
}
template <int tTHREADS_PER_BLOCK>
__global__ void groupNormNHWCScaleKernel(const GroupNormNHWCParams params) {
// The instance in the batch.
int32_t ni = blockIdx.z;
// The channel loaded by that thread (2 channels per thread for F16x2).
int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2;
// The group that thread works on and the channel in the group (modulus).
int32_t gi = ci / params.cPerGroup;
// Load the sum and sum of squares for the group.
float sum = 0.F, sumSq = 0.F;
if (gi < params.groups) {
sum = params.redBuffer[(2 * ni + 0) * params.groups + gi];
sumSq = params.redBuffer[(2 * ni + 1) * params.groups + gi];
}
// Load gamma/beta.
float2 gammaF2, betaF2;
if (ci < params.c) {
gammaF2 = __half22float2(*reinterpret_cast<half2 const *>(
reinterpret_cast<half const *>(params.gamma) + ci));
betaF2 = __half22float2(*reinterpret_cast<half2 const *>(
reinterpret_cast<half const *>(params.beta) + ci));
}
// Compute the mean.
float mean = sum * params.invHWC;
// Compute the variance.
float var = sumSq * params.invHWC - (mean * mean);
// Compute the inverse of the stddev.
float invStdDev = rsqrtf(var + params.eps);
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The src/dst offset.
int64_t offset = (int64_t)ni * params.hwc + hwi * params.c + ci;
// Fetch two channels per thread.
__half2 h2(0, 0);
if (ci < params.c) {
h2 = *reinterpret_cast<__half2 const *>(¶ms.srcX[offset]);
}
// Extract the two half values.
float2 f2 = __half22float2(h2);
// Normalize the channels.
f2.x = (f2.x - mean) * invStdDev;
f2.y = (f2.y - mean) * invStdDev;
// Scale by gamma and add beta.
f2.x = gammaF2.x * f2.x + betaF2.x;
f2.y = gammaF2.y * f2.y + betaF2.y;
// Apply Silu if needed.
if (params.withSilu) {
f2.x = f2.x * sigmoid(f2.x);
f2.y = f2.y * sigmoid(f2.y);
}
// Store the scaled values.
if (ci < params.c) {
*reinterpret_cast<__half2 *>(¶ms.dst[offset]) = __float22half2_rn(f2);
}
}
}
void groupNormNHWCScale(const GroupNormNHWCParams ¶ms,
hipStream_t stream) {
dim3 grid;
// The number of blocks to compute all the channels.
grid.x = params.c / params.cPerBlock;
// The number of blocks to compute all the activations in a given instance.
grid.y = divUp(params.hw, params.hwPerBlock);
// The number of instances.
grid.z = params.n;
switch (params.cPerBlock) {
case 320:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<160>), dim3(grid), dim3(160), 0, stream, params);
break;
case 480:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<256>), dim3(grid), dim3(256), 0, stream, params);
break;
case 256:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<128>), dim3(grid), dim3(128), 0, stream, params);
break;
case 128:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<64>), dim3(grid), dim3(64), 0, stream, params);
break;
case 8:
hipLaunchKernelGGL(( groupNormNHWCScaleKernel<4>), dim3(grid), dim3(4), 0, stream, params);
break;
default:
PADDLE_THROW(platform::errors::Fatal(
"The function groupNormNHWCScale of GroupNormPlugin TRT Plugin "
"encounter error"));
}
}
int GroupNormPlugin::initialize() TRT_NOEXCEPT {
if (!with_fp16_) {
// if use fp32
hipMalloc(&scale_gpu_, sizeof(float) * scale_.size());
hipMalloc(&bias_gpu_, sizeof(float) * bias_.size());
hipMemcpy(scale_gpu_,
scale_.data(),
scale_.size() * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(bias_gpu_,
bias_.data(),
bias_.size() * sizeof(float),
hipMemcpyHostToDevice);
} else {
// if use fp16
std::vector<half> scale_half(scale_.size());
std::vector<half> bias_half(bias_.size());
for (int i = 0; i < scale_.size(); ++i) {
scale_half[i] = static_cast<half>(scale_[i]);
}
for (int i = 0; i < bias_.size(); ++i) {
bias_half[i] = static_cast<half>(bias_[i]);
}
hipMalloc(&scale_gpu_, sizeof(half) * scale_half.size());
hipMalloc(&bias_gpu_, sizeof(half) * bias_half.size());
hipMemcpy(scale_gpu_,
scale_half.data(),
scale_half.size() * sizeof(half),
hipMemcpyHostToDevice);
hipMemcpy(bias_gpu_,
bias_half.data(),
bias_half.size() * sizeof(half),
hipMemcpyHostToDevice);
}
return 0;
}
bool GroupNormPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
return ((type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::PluginFormat::kLINEAR));
} else {
return ((type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::PluginFormat::kLINEAR));
}
}
nvinfer1::Dims GroupNormPlugin::getOutputDimensions(
int index, const nvinfer1::Dims *inputDims, int nbInputs) TRT_NOEXCEPT {
return inputDims[0];
}
int GroupNormPlugin::enqueue(int batch_size,
const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs,
void *workspace,
#else
void *const *outputs,
void *workspace,
#endif
hipStream_t stream) TRT_NOEXCEPT {
const auto &input_dims = this->getInputDims(0);
int groups = groups_;
float eps = eps_;
std::vector<int> input_shape;
input_shape.push_back(batch_size);
for (int i = 0; i < input_dims.nbDims; i++) {
input_shape.push_back(input_dims.d[i]);
}
const auto input_ddim = phi::make_ddim(input_shape);
int C = input_shape[1];
PADDLE_ENFORCE_EQ(
C,
scale_.size(),
platform::errors::InvalidArgument(
"scale's size should be equal to the channel number in groupnorm,"
"but got channel number:%d, scale's size:%d.",
C,
scale_.size()));
PADDLE_ENFORCE_EQ(
C,
bias_.size(),
platform::errors::InvalidArgument(
"bias's size should be equal to the channel number in groupnorm,"
"but got channel number:%d, bias's size:%d.",
C,
bias_.size()));
float *mean_d = static_cast<float *>(workspace);
float *variance_d = mean_d + input_shape[0] * groups_;
float *temp_variance_d = variance_d + input_shape[0] * groups_;
auto input_type = getDataType();
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp32";
const float *input = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
phi::GroupNormDirectCUDAFunctor<float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<float *>(bias_gpu_),
reinterpret_cast<float *>(scale_gpu_),
temp_variance_d,
groups_,
eps_,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp16";
const half *input = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
phi::GroupNormDirectCUDAFunctor<half, float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<const half *>(bias_gpu_),
reinterpret_cast<const half *>(scale_gpu_),
temp_variance_d,
groups_,
eps_,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The GroupNorm TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
nvinfer1::DimsExprs GroupNormPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputDims,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
return inputDims[0];
}
bool GroupNormPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of groupnorm plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return ((in.type == nvinfer1::DataType::kHALF) &&
((!with_silu_ && in.format == nvinfer1::PluginFormat::kLINEAR) ||
in.format == nvinfer1::PluginFormat::kHWC8));
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType GroupNormPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The groupnorm Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true,
platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int GroupNormPluginDynamic::initialize() TRT_NOEXCEPT {
if (with_fp16_ == false) {
// if use fp32
hipMalloc(&scale_gpu_, sizeof(float) * scale_.size());
hipMalloc(&bias_gpu_, sizeof(float) * bias_.size());
hipMemcpy(scale_gpu_,
scale_.data(),
scale_.size() * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(bias_gpu_,
bias_.data(),
bias_.size() * sizeof(float),
hipMemcpyHostToDevice);
} else {
// if use fp16
std::vector<half> scale_half(scale_.size());
std::vector<half> bias_half(bias_.size());
for (int i = 0; i < scale_.size(); ++i) {
scale_half[i] = static_cast<half>(scale_[i]);
}
for (int i = 0; i < bias_.size(); ++i) {
bias_half[i] = static_cast<half>(bias_[i]);
}
hipMalloc(&scale_gpu_, sizeof(half) * scale_.size());
hipMalloc(&bias_gpu_, sizeof(half) * bias_.size());
hipMemcpy(scale_gpu_,
scale_half.data(),
scale_half.size() * sizeof(half),
hipMemcpyHostToDevice);
hipMemcpy(bias_gpu_,
bias_half.data(),
bias_half.size() * sizeof(half),
hipMemcpyHostToDevice);
}
return 0;
}
int GroupNormPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
const auto &input_dims = input_desc[0].dims;
int groups = groups_;
float eps = eps_;
std::vector<int> input_shape;
for (int i = 0; i < input_dims.nbDims; i++) {
input_shape.push_back(input_dims.d[i]);
}
const auto input_ddim = phi::make_ddim(input_shape);
int C = input_shape[1];
int image_size = input_shape[2] * input_shape[3];
int batchSize = input_shape[0];
PADDLE_ENFORCE_EQ(
C,
scale_.size(),
platform::errors::InvalidArgument(
"scale's size should be equal to the channel number in groupnorm,"
"but got feature_size:%d, scale's size:%d.",
C,
scale_.size()));
PADDLE_ENFORCE_EQ(
C,
bias_.size(),
platform::errors::InvalidArgument(
"bias's size should be equal to the channel number in groupnorm,"
"but got feature_size:%d, bias's size:%d.",
C,
bias_.size()));
float *mean_d = static_cast<float *>(workspace);
float *variance_d = mean_d + input_shape[0] * groups_;
float *temp_variance_d = variance_d + input_shape[0] * groups_;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp32";
const float *input = reinterpret_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
phi::GroupNormDirectCUDAFunctor<float, float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<float *>(bias_gpu_),
reinterpret_cast<float *>(scale_gpu_),
temp_variance_d,
groups,
eps,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp16";
const half *input = reinterpret_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
if (input_desc[0].format == nvinfer1::PluginFormat::kLINEAR) {
phi::GroupNormDirectCUDAFunctor<half, float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<half *>(bias_gpu_),
reinterpret_cast<half *>(scale_gpu_),
temp_variance_d,
groups,
eps,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else if (input_desc[0].format == nvinfer1::PluginFormat::kHWC8) {
int32_t cPerBlock = 320;
int32_t maxBlocksPerHW = 1024;
switch (input_desc[0].dims.d[1]) {
case 960:
case 1920:
cPerBlock = 480;
break;
case 512:
case 256:
cPerBlock = 256;
break;
case 128:
cPerBlock = 128;
break;
default:
cPerBlock = 320;
}
if (cPerBlock > input_desc[0].dims.d[1]) {
cPerBlock = 8;
}
params_.withSilu = with_silu_;
params_.dst = static_cast<half *>(outputs[0]);
params_.srcX = static_cast<half const *>(inputs[0]);
params_.gamma = scale_gpu_;
params_.beta = bias_gpu_;
params_.redBuffer = static_cast<float *>(workspace);
params_.n = input_desc[0].dims.d[0];
params_.h = input_desc[0].dims.d[2];
params_.w = input_desc[0].dims.d[3];
params_.c = input_desc[0].dims.d[1];
params_.groups = groups_;
params_.hw = params_.h * params_.w;
const int32_t blocksPerHW = findMaxDivisor(params_.hw, maxBlocksPerHW);
params_.hwPerBlock = divUp(params_.hw, blocksPerHW);
params_.cPerBlock = cPerBlock;
params_.cPerGroup = params_.c / params_.groups;
params_.hwc = params_.hw * params_.c;
params_.invHWC = 1.F / static_cast<float>(params_.hw * params_.cPerGroup);
params_.groupsPerBlock = cPerBlock / params_.cPerGroup;
params_.eps = eps_;
hipMemsetAsync(params_.redBuffer,
0,
2 * sizeof(float) * params_.n * groups_,
stream);
groupNormNHWCSum(params_, stream);
groupNormNHWCScale(params_, stream);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Groupnorm TRT Plugin's only support nchw or nhwc8 input"));
}
} else {
// input not float
PADDLE_THROW(platform::errors::Fatal(
"The Groupnorm TRT Plugin's only support fp32 or fp16 input"));
}
return hipGetLastError() != hipSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| bb126e9da165e27214df9f2c6726f56f91fd3cc1.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION &
AFFILIATES. All rights reserved. SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/plugin/group_norm_op_plugin.h"
#include "paddle/phi/kernels/group_norm_kernel.h"
#include <cub/cub.cuh>
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
using DataLayout = phi::DataLayout;
static inline int32_t divUp(int32_t m, int32_t n) { return (m + n - 1) / n; }
static inline __device__ __host__ float sigmoid(float x) {
return 1.F / (1.F + expf(-x));
}
struct GroupSums {
// Is it the 1st element of the group?
int32_t flag;
// The sum.
float sum;
// The sum of squares.
float sumSq;
};
struct GroupSumsOp {
inline __device__ GroupSums operator()(GroupSums const &a,
GroupSums const &b) {
GroupSums dst;
dst.sum = b.flag ? b.sum : (a.sum + b.sum);
dst.sumSq = b.flag ? b.sumSq : (a.sumSq + b.sumSq);
dst.flag = a.flag + b.flag;
return dst;
}
};
static int32_t findMaxDivisor(int32_t n, int32_t maxAllowedDivisor) {
int32_t maxDivisor = -1;
for (int32_t i = 1; i <= std::sqrt(n); i++) {
if (n % i == 0) {
int32_t divisor1 = n / i;
int32_t divisor2 = i;
if (divisor1 > maxDivisor && divisor1 < maxAllowedDivisor) {
maxDivisor = divisor1;
}
if (divisor2 > maxDivisor && divisor2 < maxAllowedDivisor) {
maxDivisor = divisor2;
}
}
}
return maxDivisor;
}
template <int tTHREADS_PER_BLOCK>
__global__ void groupNormNHWCSumKernel(const GroupNormNHWCParams params) {
// The object in charge of doing the sums for the different blocks.
typedef cub::BlockScan<GroupSums, tTHREADS_PER_BLOCK> BlockScan;
// Allocate shared memory for BlockScan.
__shared__ typename BlockScan::TempStorage tempStorage;
// Allocate shared memory for the groups. We could reduce the amount of shared
// memory reserved.
__shared__ float2 smem[tTHREADS_PER_BLOCK];
// The instance in the batch.
int32_t ni = blockIdx.z;
// The channel loaded by that thread (2 channels per thread for F16x2).
int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2;
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
// The sums.
float sum = 0.F;
float sumSq = 0.F;
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The offset.
int64_t offset = static_cast<int64_t>(ni) * params.hwc +
static_cast<int64_t>(hwi) * params.c + ci;
// Fetch two channels per thread.
__half2 h2(0, 0);
if (ci < params.c) {
h2 = *reinterpret_cast<__half2 const *>(¶ms.srcX[offset]);
}
// Extract the two half values.
float2 f2 = __half22float2(h2);
// Update the sum.
sum += f2.x + f2.y;
// Update the sum of squares.
sumSq += f2.x * f2.x + f2.y * f2.y;
}
// The group that thread works on and the channel in the group (modulus).
int32_t gi = threadIdx.x * 2 / params.cPerGroup;
int32_t cj = threadIdx.x * 2 - params.cPerGroup * gi;
// The data for the summations.
GroupSums inp{cj == 0 ? 1 : 0, sum, sumSq};
// Do the segmented scan.
GroupSums out;
BlockScan(tempStorage).InclusiveScan(inp, out, GroupSumsOp());
// Store the results for the groups in shared memory (to produce coalesced
// stores later).
// 2 channels per thread
if (cj == params.cPerGroup - 2) {
smem[gi] = make_float2(out.sum, out.sumSq);
}
// Make sure the data is in shared memory.
__syncthreads();
// The global group index.
int32_t gj = blockIdx.x * params.groupsPerBlock + threadIdx.x;
// Threads that have nothing left to do, exit.
if (threadIdx.x >= params.groupsPerBlock || gj >= params.groups) {
return;
}
// The first threads (those storing to global memory, load the values).
float2 sums = smem[threadIdx.x];
// Store to global memory.
atomicAdd(¶ms.redBuffer[(2 * ni + 0) * params.groups + gj], sums.x);
atomicAdd(¶ms.redBuffer[(2 * ni + 1) * params.groups + gj], sums.y);
}
void groupNormNHWCSum(const GroupNormNHWCParams ¶ms, cudaStream_t stream) {
dim3 grid;
// The number of blocks to compute all the channels.
grid.x = params.c / params.cPerBlock;
// The number of blocks to compute all the activations in a given instance.
grid.y = divUp(params.hw, params.hwPerBlock);
// The number of instances.
grid.z = params.n;
switch (params.cPerBlock) {
case 320:
groupNormNHWCSumKernel<160><<<grid, 160, 0, stream>>>(params);
break;
case 480:
groupNormNHWCSumKernel<256><<<grid, 256, 0, stream>>>(params);
break;
case 256:
groupNormNHWCSumKernel<128><<<grid, 128, 0, stream>>>(params);
break;
case 128:
groupNormNHWCSumKernel<64><<<grid, 64, 0, stream>>>(params);
break;
case 8:
groupNormNHWCSumKernel<4><<<grid, 4, 0, stream>>>(params);
break;
default:
PADDLE_THROW(platform::errors::Fatal(
"The function groupNormNHWCSum of GroupNormPlugin TRT Plugin "
"encounter error"));
}
}
template <int tTHREADS_PER_BLOCK>
__global__ void groupNormNHWCScaleKernel(const GroupNormNHWCParams params) {
// The instance in the batch.
int32_t ni = blockIdx.z;
// The channel loaded by that thread (2 channels per thread for F16x2).
int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2;
// The group that thread works on and the channel in the group (modulus).
int32_t gi = ci / params.cPerGroup;
// Load the sum and sum of squares for the group.
float sum = 0.F, sumSq = 0.F;
if (gi < params.groups) {
sum = params.redBuffer[(2 * ni + 0) * params.groups + gi];
sumSq = params.redBuffer[(2 * ni + 1) * params.groups + gi];
}
// Load gamma/beta.
float2 gammaF2, betaF2;
if (ci < params.c) {
gammaF2 = __half22float2(*reinterpret_cast<half2 const *>(
reinterpret_cast<half const *>(params.gamma) + ci));
betaF2 = __half22float2(*reinterpret_cast<half2 const *>(
reinterpret_cast<half const *>(params.beta) + ci));
}
// Compute the mean.
float mean = sum * params.invHWC;
// Compute the variance.
float var = sumSq * params.invHWC - (mean * mean);
// Compute the inverse of the stddev.
float invStdDev = rsqrtf(var + params.eps);
// The first activation loaded by that block.
int32_t hwBegin = blockIdx.y * params.hwPerBlock;
// The last activation loaded by that block.
int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw);
// Iterate over the activations to compute the sums.
for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) {
// The src/dst offset.
int64_t offset = (int64_t)ni * params.hwc + hwi * params.c + ci;
// Fetch two channels per thread.
__half2 h2(0, 0);
if (ci < params.c) {
h2 = *reinterpret_cast<__half2 const *>(¶ms.srcX[offset]);
}
// Extract the two half values.
float2 f2 = __half22float2(h2);
// Normalize the channels.
f2.x = (f2.x - mean) * invStdDev;
f2.y = (f2.y - mean) * invStdDev;
// Scale by gamma and add beta.
f2.x = gammaF2.x * f2.x + betaF2.x;
f2.y = gammaF2.y * f2.y + betaF2.y;
// Apply Silu if needed.
if (params.withSilu) {
f2.x = f2.x * sigmoid(f2.x);
f2.y = f2.y * sigmoid(f2.y);
}
// Store the scaled values.
if (ci < params.c) {
*reinterpret_cast<__half2 *>(¶ms.dst[offset]) = __float22half2_rn(f2);
}
}
}
void groupNormNHWCScale(const GroupNormNHWCParams ¶ms,
cudaStream_t stream) {
dim3 grid;
// The number of blocks to compute all the channels.
grid.x = params.c / params.cPerBlock;
// The number of blocks to compute all the activations in a given instance.
grid.y = divUp(params.hw, params.hwPerBlock);
// The number of instances.
grid.z = params.n;
switch (params.cPerBlock) {
case 320:
groupNormNHWCScaleKernel<160><<<grid, 160, 0, stream>>>(params);
break;
case 480:
groupNormNHWCScaleKernel<256><<<grid, 256, 0, stream>>>(params);
break;
case 256:
groupNormNHWCScaleKernel<128><<<grid, 128, 0, stream>>>(params);
break;
case 128:
groupNormNHWCScaleKernel<64><<<grid, 64, 0, stream>>>(params);
break;
case 8:
groupNormNHWCScaleKernel<4><<<grid, 4, 0, stream>>>(params);
break;
default:
PADDLE_THROW(platform::errors::Fatal(
"The function groupNormNHWCScale of GroupNormPlugin TRT Plugin "
"encounter error"));
}
}
int GroupNormPlugin::initialize() TRT_NOEXCEPT {
if (!with_fp16_) {
// if use fp32
cudaMalloc(&scale_gpu_, sizeof(float) * scale_.size());
cudaMalloc(&bias_gpu_, sizeof(float) * bias_.size());
cudaMemcpy(scale_gpu_,
scale_.data(),
scale_.size() * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(bias_gpu_,
bias_.data(),
bias_.size() * sizeof(float),
cudaMemcpyHostToDevice);
} else {
// if use fp16
std::vector<half> scale_half(scale_.size());
std::vector<half> bias_half(bias_.size());
for (int i = 0; i < scale_.size(); ++i) {
scale_half[i] = static_cast<half>(scale_[i]);
}
for (int i = 0; i < bias_.size(); ++i) {
bias_half[i] = static_cast<half>(bias_[i]);
}
cudaMalloc(&scale_gpu_, sizeof(half) * scale_half.size());
cudaMalloc(&bias_gpu_, sizeof(half) * bias_half.size());
cudaMemcpy(scale_gpu_,
scale_half.data(),
scale_half.size() * sizeof(half),
cudaMemcpyHostToDevice);
cudaMemcpy(bias_gpu_,
bias_half.data(),
bias_half.size() * sizeof(half),
cudaMemcpyHostToDevice);
}
return 0;
}
bool GroupNormPlugin::supportsFormat(
nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT {
if (with_fp16_) {
return ((type == nvinfer1::DataType::kHALF) &&
(format == nvinfer1::PluginFormat::kLINEAR));
} else {
return ((type == nvinfer1::DataType::kFLOAT) &&
(format == nvinfer1::PluginFormat::kLINEAR));
}
}
nvinfer1::Dims GroupNormPlugin::getOutputDimensions(
int index, const nvinfer1::Dims *inputDims, int nbInputs) TRT_NOEXCEPT {
return inputDims[0];
}
int GroupNormPlugin::enqueue(int batch_size,
const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs,
void *workspace,
#else
void *const *outputs,
void *workspace,
#endif
cudaStream_t stream) TRT_NOEXCEPT {
const auto &input_dims = this->getInputDims(0);
int groups = groups_;
float eps = eps_;
std::vector<int> input_shape;
input_shape.push_back(batch_size);
for (int i = 0; i < input_dims.nbDims; i++) {
input_shape.push_back(input_dims.d[i]);
}
const auto input_ddim = phi::make_ddim(input_shape);
int C = input_shape[1];
PADDLE_ENFORCE_EQ(
C,
scale_.size(),
platform::errors::InvalidArgument(
"scale's size should be equal to the channel number in groupnorm,"
"but got channel number:%d, scale's size:%d.",
C,
scale_.size()));
PADDLE_ENFORCE_EQ(
C,
bias_.size(),
platform::errors::InvalidArgument(
"bias's size should be equal to the channel number in groupnorm,"
"but got channel number:%d, bias's size:%d.",
C,
bias_.size()));
float *mean_d = static_cast<float *>(workspace);
float *variance_d = mean_d + input_shape[0] * groups_;
float *temp_variance_d = variance_d + input_shape[0] * groups_;
auto input_type = getDataType();
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp32";
const float *input = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
phi::GroupNormDirectCUDAFunctor<float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<float *>(bias_gpu_),
reinterpret_cast<float *>(scale_gpu_),
temp_variance_d,
groups_,
eps_,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp16";
const half *input = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
phi::GroupNormDirectCUDAFunctor<half, float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<const half *>(bias_gpu_),
reinterpret_cast<const half *>(scale_gpu_),
temp_variance_d,
groups_,
eps_,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The GroupNorm TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
nvinfer1::DimsExprs GroupNormPluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputDims,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
return inputDims[0];
}
bool GroupNormPluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of groupnorm plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
if (with_fp16_) {
return ((in.type == nvinfer1::DataType::kHALF) &&
((!with_silu_ && in.format == nvinfer1::PluginFormat::kLINEAR) ||
in.format == nvinfer1::PluginFormat::kHWC8));
} else {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType GroupNormPluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The groupnorm Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true,
platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int GroupNormPluginDynamic::initialize() TRT_NOEXCEPT {
if (with_fp16_ == false) {
// if use fp32
cudaMalloc(&scale_gpu_, sizeof(float) * scale_.size());
cudaMalloc(&bias_gpu_, sizeof(float) * bias_.size());
cudaMemcpy(scale_gpu_,
scale_.data(),
scale_.size() * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(bias_gpu_,
bias_.data(),
bias_.size() * sizeof(float),
cudaMemcpyHostToDevice);
} else {
// if use fp16
std::vector<half> scale_half(scale_.size());
std::vector<half> bias_half(bias_.size());
for (int i = 0; i < scale_.size(); ++i) {
scale_half[i] = static_cast<half>(scale_[i]);
}
for (int i = 0; i < bias_.size(); ++i) {
bias_half[i] = static_cast<half>(bias_[i]);
}
cudaMalloc(&scale_gpu_, sizeof(half) * scale_.size());
cudaMalloc(&bias_gpu_, sizeof(half) * bias_.size());
cudaMemcpy(scale_gpu_,
scale_half.data(),
scale_half.size() * sizeof(half),
cudaMemcpyHostToDevice);
cudaMemcpy(bias_gpu_,
bias_half.data(),
bias_half.size() * sizeof(half),
cudaMemcpyHostToDevice);
}
return 0;
}
int GroupNormPluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
const auto &input_dims = input_desc[0].dims;
int groups = groups_;
float eps = eps_;
std::vector<int> input_shape;
for (int i = 0; i < input_dims.nbDims; i++) {
input_shape.push_back(input_dims.d[i]);
}
const auto input_ddim = phi::make_ddim(input_shape);
int C = input_shape[1];
int image_size = input_shape[2] * input_shape[3];
int batchSize = input_shape[0];
PADDLE_ENFORCE_EQ(
C,
scale_.size(),
platform::errors::InvalidArgument(
"scale's size should be equal to the channel number in groupnorm,"
"but got feature_size:%d, scale's size:%d.",
C,
scale_.size()));
PADDLE_ENFORCE_EQ(
C,
bias_.size(),
platform::errors::InvalidArgument(
"bias's size should be equal to the channel number in groupnorm,"
"but got feature_size:%d, bias's size:%d.",
C,
bias_.size()));
float *mean_d = static_cast<float *>(workspace);
float *variance_d = mean_d + input_shape[0] * groups_;
float *temp_variance_d = variance_d + input_shape[0] * groups_;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp32";
const float *input = reinterpret_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
phi::GroupNormDirectCUDAFunctor<float, float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<float *>(bias_gpu_),
reinterpret_cast<float *>(scale_gpu_),
temp_variance_d,
groups,
eps,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else if (input_type == nvinfer1::DataType::kHALF) {
VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp16";
const half *input = reinterpret_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
if (input_desc[0].format == nvinfer1::PluginFormat::kLINEAR) {
phi::GroupNormDirectCUDAFunctor<half, float> group_norm;
group_norm(stream,
input,
input_shape,
reinterpret_cast<half *>(bias_gpu_),
reinterpret_cast<half *>(scale_gpu_),
temp_variance_d,
groups,
eps,
output,
mean_d,
variance_d,
DataLayout::kNCHW);
} else if (input_desc[0].format == nvinfer1::PluginFormat::kHWC8) {
int32_t cPerBlock = 320;
int32_t maxBlocksPerHW = 1024;
switch (input_desc[0].dims.d[1]) {
case 960:
case 1920:
cPerBlock = 480;
break;
case 512:
case 256:
cPerBlock = 256;
break;
case 128:
cPerBlock = 128;
break;
default:
cPerBlock = 320;
}
if (cPerBlock > input_desc[0].dims.d[1]) {
cPerBlock = 8;
}
params_.withSilu = with_silu_;
params_.dst = static_cast<half *>(outputs[0]);
params_.srcX = static_cast<half const *>(inputs[0]);
params_.gamma = scale_gpu_;
params_.beta = bias_gpu_;
params_.redBuffer = static_cast<float *>(workspace);
params_.n = input_desc[0].dims.d[0];
params_.h = input_desc[0].dims.d[2];
params_.w = input_desc[0].dims.d[3];
params_.c = input_desc[0].dims.d[1];
params_.groups = groups_;
params_.hw = params_.h * params_.w;
const int32_t blocksPerHW = findMaxDivisor(params_.hw, maxBlocksPerHW);
params_.hwPerBlock = divUp(params_.hw, blocksPerHW);
params_.cPerBlock = cPerBlock;
params_.cPerGroup = params_.c / params_.groups;
params_.hwc = params_.hw * params_.c;
params_.invHWC = 1.F / static_cast<float>(params_.hw * params_.cPerGroup);
params_.groupsPerBlock = cPerBlock / params_.cPerGroup;
params_.eps = eps_;
cudaMemsetAsync(params_.redBuffer,
0,
2 * sizeof(float) * params_.n * groups_,
stream);
groupNormNHWCSum(params_, stream);
groupNormNHWCScale(params_, stream);
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Groupnorm TRT Plugin's only support nchw or nhwc8 input"));
}
} else {
// input not float
PADDLE_THROW(platform::errors::Fatal(
"The Groupnorm TRT Plugin's only support fp32 or fp16 input"));
}
return cudaGetLastError() != cudaSuccess;
}
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
6ce1d23feca3d2905d9ed9db549cb643aeb4411a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
CUDA16_KERNEL_FW_X_CONST(add_const, X_VAL + k);
CUDA16_KERNEL_BW_X_CONST(add_const, GY_VAL);
CUDA16_KERNEL_FW_X_SCALAR_R(add_scalar, ::__fadd_rn);
CUDA16_KERNEL_FW_AB(add, ::__fadd_rn);
DECL_ATOMIC_OP(atomicHAdd, ::__fadd_rn);
__global__ void add_bw_dev(
const half *, const half *, const half *, const half *pgy,
std::uint32_t size, std::uint32_t mba, std::uint32_t mbb,
half *pga, half *pgb) {
const std::uint32_t i = IDX;
const std::uint32_t shift = blockIdx.y * size;
if (i < size) {
const float gy = ::__half2float(pgy[i + shift]);
atomicHAdd(pga, i + mba * shift, gy);
atomicHAdd(pgb, i + mbb * shift, gy);
}
}
} // namespace
namespace primitiv {
namespace devices {
CUDA16_DEV_FW_X_CONST(add_const);
CUDA16_DEV_BW_X_CONST(add_const);
CUDA16_DEV_FW_X_SCALAR(add_scalar);
CUDA16_DEV_FW_AB(add);
CUDA16_DEV_BW_AB(add);
} // namespace devices
} // namespace primitiv
| 6ce1d23feca3d2905d9ed9db549cb643aeb4411a.cu | #include <primitiv/config.h>
#include <primitiv/devices/cuda16/device.h>
#include <primitiv/devices/cuda16/ops/common.h>
#include <primitiv/internal/cuda/utils.h>
namespace {
CUDA16_KERNEL_FW_X_CONST(add_const, X_VAL + k);
CUDA16_KERNEL_BW_X_CONST(add_const, GY_VAL);
CUDA16_KERNEL_FW_X_SCALAR_R(add_scalar, ::__fadd_rn);
CUDA16_KERNEL_FW_AB(add, ::__fadd_rn);
DECL_ATOMIC_OP(atomicHAdd, ::__fadd_rn);
__global__ void add_bw_dev(
const half *, const half *, const half *, const half *pgy,
std::uint32_t size, std::uint32_t mba, std::uint32_t mbb,
half *pga, half *pgb) {
const std::uint32_t i = IDX;
const std::uint32_t shift = blockIdx.y * size;
if (i < size) {
const float gy = ::__half2float(pgy[i + shift]);
atomicHAdd(pga, i + mba * shift, gy);
atomicHAdd(pgb, i + mbb * shift, gy);
}
}
} // namespace
namespace primitiv {
namespace devices {
CUDA16_DEV_FW_X_CONST(add_const);
CUDA16_DEV_BW_X_CONST(add_const);
CUDA16_DEV_FW_X_SCALAR(add_scalar);
CUDA16_DEV_FW_AB(add);
CUDA16_DEV_BW_AB(add);
} // namespace devices
} // namespace primitiv
|
a5ab5c9834d57906dcb33623fde307632b18132f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// tatami.cu
const unsigned nMax(100000000);
const unsigned nMaxSqrt(sqrt(nMax));
__global__ void even(unsigned* v, unsigned base)
{
unsigned i = (blockIdx.x * blockDim.x + threadIdx.x + base) * 2 + 8;
unsigned k2 = i + 3;
unsigned k3 = i + i - 4;
while ((k2 <= k3) && ((i * k2) < nMax))
{
unsigned k4 = (nMax - 1) / i;
if (k3 < k4)
k4 = k3;
__syncthreads();
for (unsigned j = k2; j <= k4; ++j)
atomicInc(&v[i * j / 2], 0xffffffff);
__syncthreads();
k2 += i + 1;
k3 += i - 1;
}
} | a5ab5c9834d57906dcb33623fde307632b18132f.cu | #include "includes.h"
// tatami.cu
const unsigned nMax(100000000);
const unsigned nMaxSqrt(sqrt(nMax));
__global__ void even(unsigned* v, unsigned base)
{
unsigned i = (blockIdx.x * blockDim.x + threadIdx.x + base) * 2 + 8;
unsigned k2 = i + 3;
unsigned k3 = i + i - 4;
while ((k2 <= k3) && ((i * k2) < nMax))
{
unsigned k4 = (nMax - 1) / i;
if (k3 < k4)
k4 = k3;
__syncthreads();
for (unsigned j = k2; j <= k4; ++j)
atomicInc(&v[i * j / 2], 0xffffffff);
__syncthreads();
k2 += i + 1;
k3 += i - 1;
}
} |
107ff4973a483210a44f8d95b86532eda079f933.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// @file normalize_gpu.c
// @brief Normalize block implementation (GPU)
// @author Andrea Vedaldi
/*
Copyright (C) 2014-15 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "normalize.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <float.h>
/* ---------------------------------------------------------------- */
/* normalize_forward */
/* ---------------------------------------------------------------- */
#undef xat
#undef yat
#undef zat
#define xat(t) x[(t) * offset]
#define yat(t) y[(t) * offset]
#define zat(t) z[(t) * offset]
#define __powf powf
template<typename T> __global__ void
normalize_forward_kernel
(T* normalized,
T const* data,
int width,
int height,
int depth,
int num,
int normDepth,
T kappa, T alpha, T beta)
{
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < width*height*num) {
int u0 = index ;
int v0 = u0 / width ;
int k0 = v0 / height ;
u0 %= width ;
v0 %= height ;
int m1 = ((signed)normDepth-1)/2 ;
int m2 = normDepth - m1 - 1 ;
int offset = width*height ;
int t ;
T const* x = data + u0 + (v0 + k0 * (depth*height)) * width ;
T* y = normalized + u0 + (v0 + k0 * (depth*height)) * width ;
T acc = 0 ;
for (t = -m2 ; t < (signed)depth ; ++t) {
T ap = 0 ;
T am = 0 ;
if (t-m1-1 >= 0) { am = xat(t-m1-1) ; }
if (t+m2 < depth) { ap = xat(t+m2) ; }
acc += ap*ap - am*am ;
if (0 <= t && t < depth) {
yat(t) = xat(t) * __powf(kappa + alpha * acc, -beta) ;
}
}
}
}
template<> vl::Error
vl::impl::normalize_forward<vl::GPU, float>(float* normalized,
float const* data,
size_t width,
size_t height,
size_t depth,
size_t size,
size_t normDepth,
double kappa, double alpha, double beta)
{
hipLaunchKernelGGL(( normalize_forward_kernel<float>)
, dim3(divideUpwards(width*height*size, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
normalized, data, width, height, depth, size, normDepth, kappa, alpha, beta) ;
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
/* ---------------------------------------------------------------- */
/* normalize_backward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
normalize_backward_kernel
(T* normalized,
T const* data,
T const* dzdy,
int width,
int height,
int depth,
int num,
int normDepth,
T kappa, T alpha, T beta)
{
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < width*height*num) {
int u0 = index ;
int v0 = u0 / width ;
int k0 = v0 / height ;
u0 %= width ;
v0 %= height ;
int m1 = ((signed)normDepth-1)/2 ;
int m2 = normDepth - m1 - 1 ;
int offset = width*height ;
T ab2 = 2*alpha*beta ;
int t, q ;
T const* x = data + u0 + (v0 + k0 * (depth*height)) * width ;
T* y = normalized + u0 + (v0 + k0 * (depth*height)) * width ;
T const* z = dzdy + u0 + (v0 + k0 * (depth*height)) * width ;
T acc = 0 ;
for (t = 0 ; t < (signed)depth ; ++t) {
yat(t) = 0 ;
}
for (t = -m2 ; t < (signed)depth ; ++t) {
int q1 = t-m1 ;
int q2 = t+m2 ;
T ap = 0 ;
T am = 0 ;
if (t-m1-1 >= 0) { am = xat(t-m1-1) ; } else { q1 = 0 ; }
if (t+m2 < depth) { ap = xat(t+m2) ; } else { q2 = depth - 1 ; }
acc += ap*ap - am*am ;
T L = kappa + alpha * acc ;
T Lbeta = __powf(L, -beta) ;
T Lbeta1 = Lbeta / L ;
if (0 <= t && t < depth) {
yat(t) += zat(t) * Lbeta ;
for (q = q1 ; q <= q2 ; ++ q) {
yat(q) -= zat(t) * xat(t) * xat(q) * ab2 * Lbeta1 ;
}
}
}
}
}
template<> vl::Error
vl::impl::normalize_backward<vl::GPU, float>(float* derData,
float const* data,
float const* derNormalized,
size_t width,
size_t height,
size_t depth,
size_t size,
size_t normDepth,
double kappa, double alpha, double beta)
{
hipLaunchKernelGGL(( normalize_backward_kernel<float>)
, dim3(divideUpwards(width*height*size, VL_CUDA_NUM_THREADS)), dim3(VL_CUDA_NUM_THREADS) , 0, 0,
derData, data, derNormalized, width, height, depth, size, normDepth, kappa, alpha, beta) ;
hipError_t status = hipPeekAtLastError() ;
return (status == hipSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
| 107ff4973a483210a44f8d95b86532eda079f933.cu | // @file normalize_gpu.c
// @brief Normalize block implementation (GPU)
// @author Andrea Vedaldi
/*
Copyright (C) 2014-15 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "normalize.hpp"
#include "../datacu.hpp"
#include <assert.h>
#include <float.h>
/* ---------------------------------------------------------------- */
/* normalize_forward */
/* ---------------------------------------------------------------- */
#undef xat
#undef yat
#undef zat
#define xat(t) x[(t) * offset]
#define yat(t) y[(t) * offset]
#define zat(t) z[(t) * offset]
#define __powf powf
template<typename T> __global__ void
normalize_forward_kernel
(T* normalized,
T const* data,
int width,
int height,
int depth,
int num,
int normDepth,
T kappa, T alpha, T beta)
{
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < width*height*num) {
int u0 = index ;
int v0 = u0 / width ;
int k0 = v0 / height ;
u0 %= width ;
v0 %= height ;
int m1 = ((signed)normDepth-1)/2 ;
int m2 = normDepth - m1 - 1 ;
int offset = width*height ;
int t ;
T const* x = data + u0 + (v0 + k0 * (depth*height)) * width ;
T* y = normalized + u0 + (v0 + k0 * (depth*height)) * width ;
T acc = 0 ;
for (t = -m2 ; t < (signed)depth ; ++t) {
T ap = 0 ;
T am = 0 ;
if (t-m1-1 >= 0) { am = xat(t-m1-1) ; }
if (t+m2 < depth) { ap = xat(t+m2) ; }
acc += ap*ap - am*am ;
if (0 <= t && t < depth) {
yat(t) = xat(t) * __powf(kappa + alpha * acc, -beta) ;
}
}
}
}
template<> vl::Error
vl::impl::normalize_forward<vl::GPU, float>(float* normalized,
float const* data,
size_t width,
size_t height,
size_t depth,
size_t size,
size_t normDepth,
double kappa, double alpha, double beta)
{
normalize_forward_kernel<float>
<<< divideUpwards(width*height*size, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(normalized, data, width, height, depth, size, normDepth, kappa, alpha, beta) ;
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
/* ---------------------------------------------------------------- */
/* normalize_backward */
/* ---------------------------------------------------------------- */
template<typename T> __global__ void
normalize_backward_kernel
(T* normalized,
T const* data,
T const* dzdy,
int width,
int height,
int depth,
int num,
int normDepth,
T kappa, T alpha, T beta)
{
int index = threadIdx.x + blockIdx.x * blockDim.x ;
if (index < width*height*num) {
int u0 = index ;
int v0 = u0 / width ;
int k0 = v0 / height ;
u0 %= width ;
v0 %= height ;
int m1 = ((signed)normDepth-1)/2 ;
int m2 = normDepth - m1 - 1 ;
int offset = width*height ;
T ab2 = 2*alpha*beta ;
int t, q ;
T const* x = data + u0 + (v0 + k0 * (depth*height)) * width ;
T* y = normalized + u0 + (v0 + k0 * (depth*height)) * width ;
T const* z = dzdy + u0 + (v0 + k0 * (depth*height)) * width ;
T acc = 0 ;
for (t = 0 ; t < (signed)depth ; ++t) {
yat(t) = 0 ;
}
for (t = -m2 ; t < (signed)depth ; ++t) {
int q1 = t-m1 ;
int q2 = t+m2 ;
T ap = 0 ;
T am = 0 ;
if (t-m1-1 >= 0) { am = xat(t-m1-1) ; } else { q1 = 0 ; }
if (t+m2 < depth) { ap = xat(t+m2) ; } else { q2 = depth - 1 ; }
acc += ap*ap - am*am ;
T L = kappa + alpha * acc ;
T Lbeta = __powf(L, -beta) ;
T Lbeta1 = Lbeta / L ;
if (0 <= t && t < depth) {
yat(t) += zat(t) * Lbeta ;
for (q = q1 ; q <= q2 ; ++ q) {
yat(q) -= zat(t) * xat(t) * xat(q) * ab2 * Lbeta1 ;
}
}
}
}
}
template<> vl::Error
vl::impl::normalize_backward<vl::GPU, float>(float* derData,
float const* data,
float const* derNormalized,
size_t width,
size_t height,
size_t depth,
size_t size,
size_t normDepth,
double kappa, double alpha, double beta)
{
normalize_backward_kernel<float>
<<< divideUpwards(width*height*size, VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>
(derData, data, derNormalized, width, height, depth, size, normDepth, kappa, alpha, beta) ;
cudaError_t status = cudaPeekAtLastError() ;
return (status == cudaSuccess) ? vl::vlSuccess : vl::vlErrorCuda ;
}
|
2995a43e15acf1bae49e489d18ea440d5fd093e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
#include "../student/reference_calc.cpp"
#include "../student/utils.h"
#include <thrust/extrema.h>
#include <thrust/scan.h>
/* Copied from Mike's IPython notebook with minor changes
moved to 1D thread indexing. Constified some input pointers */
/* Seems silly to optimize here since HW5 is about optimizing histograms */
__global__ void compute_histogram(
unsigned int* const d_histogram,
const float* const d_log_Y,
float min_log_Y,
float max_log_Y,
float log_Y_range,
int num_bins,
int num_pixels)
{
const int image_index_1d = blockIdx.x * blockDim.x + threadIdx.x;
if ( image_index_1d < num_pixels )
{
float log_Y = d_log_Y[ image_index_1d ];
int bin_index = min( num_bins - 1, int( ( num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) );
atomicAdd( d_histogram + bin_index, 1 );
}
}
//TODO need "slow" versions of min/max and scan
void your_histogram_and_prefixsum(const float* const d_luminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
thrust::device_ptr<float> d_lum((float *)d_luminance);
/* The thrust routines are well optimized, but there is little reason to find the min/max
separately plus thrust has its own overheads. Good students should still be able to beat
these routines because of that */
thrust::device_ptr<float> min_it = thrust::min_element(d_lum, d_lum + numRows * numCols);
thrust::device_ptr<float> max_it = thrust::max_element(d_lum, d_lum + numRows * numCols);
min_logLum = *min_it;
max_logLum = *max_it;
float range = max_logLum - min_logLum;
const int numThreads = 512;
unsigned int *d_histo;
checkCudaErrors(hipMalloc(&d_histo, sizeof(unsigned int) * numBins));
checkCudaErrors(hipMemset(d_histo, 0, sizeof(unsigned int) * numBins));
hipLaunchKernelGGL(( compute_histogram), dim3((numRows * numCols + numThreads - 1) / numThreads), dim3(numThreads), 0, 0,
d_histo, d_luminance, min_logLum, max_logLum, range, numBins, numRows * numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
thrust::device_ptr<unsigned int> histo(d_histo);
thrust::device_ptr<unsigned int> cdf(d_cdf);
/* The scan is so small, that most techniques will probably not have significant
difference in their execution times. Thrust may actual be fairly slow because
of its high overhead. */
thrust::exclusive_scan(histo, histo + numBins, cdf);
/****************************************************************************
* You can use the code below to help with debugging, but make sure to *
* comment it out again before submitting your assignment for grading, *
* otherwise this code will take too much time and make it seem like your *
* GPU implementation isn't fast enough. *
* *
* This code generates a reference cdf on the host by running the *
* reference calculation we have given you. It then copies your GPU *
* generated cdf back to the host and calls a function that compares the *
* the two and will output the first location they differ. *
* ************************************************************************* */
/* float *h_logLuminance = new float[numRows * numCols];
unsigned int *h_cdf = new unsigned int[numBins];
unsigned int *h_your_cdf = new unsigned int[numBins];
checkCudaErrors(hipMemcpy(h_logLuminance, d_luminance, numCols * numRows * sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(h_your_cdf, d_cdf, numBins * sizeof(unsigned int), hipMemcpyDeviceToHost));
referenceCalculation(h_logLuminance, h_cdf, numRows, numCols, numBins);
//compare the results of the CDF
checkResultsExact(h_cdf, h_your_cdf, numBins);
delete[] h_logLuminance;
delete[] h_cdf;
delete[] h_your_cdf; */
}
| 2995a43e15acf1bae49e489d18ea440d5fd093e5.cu | // Homework 2
#include "../student/reference_calc.cpp"
#include "../student/utils.h"
#include <thrust/extrema.h>
#include <thrust/scan.h>
/* Copied from Mike's IPython notebook with minor changes
moved to 1D thread indexing. Constified some input pointers */
/* Seems silly to optimize here since HW5 is about optimizing histograms */
__global__ void compute_histogram(
unsigned int* const d_histogram,
const float* const d_log_Y,
float min_log_Y,
float max_log_Y,
float log_Y_range,
int num_bins,
int num_pixels)
{
const int image_index_1d = blockIdx.x * blockDim.x + threadIdx.x;
if ( image_index_1d < num_pixels )
{
float log_Y = d_log_Y[ image_index_1d ];
int bin_index = min( num_bins - 1, int( ( num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) );
atomicAdd( d_histogram + bin_index, 1 );
}
}
//TODO need "slow" versions of min/max and scan
void your_histogram_and_prefixsum(const float* const d_luminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
thrust::device_ptr<float> d_lum((float *)d_luminance);
/* The thrust routines are well optimized, but there is little reason to find the min/max
separately plus thrust has its own overheads. Good students should still be able to beat
these routines because of that */
thrust::device_ptr<float> min_it = thrust::min_element(d_lum, d_lum + numRows * numCols);
thrust::device_ptr<float> max_it = thrust::max_element(d_lum, d_lum + numRows * numCols);
min_logLum = *min_it;
max_logLum = *max_it;
float range = max_logLum - min_logLum;
const int numThreads = 512;
unsigned int *d_histo;
checkCudaErrors(cudaMalloc(&d_histo, sizeof(unsigned int) * numBins));
checkCudaErrors(cudaMemset(d_histo, 0, sizeof(unsigned int) * numBins));
compute_histogram<<< (numRows * numCols + numThreads - 1) / numThreads, numThreads>>>(
d_histo, d_luminance, min_logLum, max_logLum, range, numBins, numRows * numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
thrust::device_ptr<unsigned int> histo(d_histo);
thrust::device_ptr<unsigned int> cdf(d_cdf);
/* The scan is so small, that most techniques will probably not have significant
difference in their execution times. Thrust may actual be fairly slow because
of its high overhead. */
thrust::exclusive_scan(histo, histo + numBins, cdf);
/****************************************************************************
* You can use the code below to help with debugging, but make sure to *
* comment it out again before submitting your assignment for grading, *
* otherwise this code will take too much time and make it seem like your *
* GPU implementation isn't fast enough. *
* *
* This code generates a reference cdf on the host by running the *
* reference calculation we have given you. It then copies your GPU *
* generated cdf back to the host and calls a function that compares the *
* the two and will output the first location they differ. *
* ************************************************************************* */
/* float *h_logLuminance = new float[numRows * numCols];
unsigned int *h_cdf = new unsigned int[numBins];
unsigned int *h_your_cdf = new unsigned int[numBins];
checkCudaErrors(cudaMemcpy(h_logLuminance, d_luminance, numCols * numRows * sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(h_your_cdf, d_cdf, numBins * sizeof(unsigned int), cudaMemcpyDeviceToHost));
referenceCalculation(h_logLuminance, h_cdf, numRows, numCols, numBins);
//compare the results of the CDF
checkResultsExact(h_cdf, h_your_cdf, numBins);
delete[] h_logLuminance;
delete[] h_cdf;
delete[] h_your_cdf; */
}
|
e136a0b02894884ab2c1a217fb997041cf1cbc21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Example code showing use of bariers to synchronize all threads in a block.
Barier is set with __syncthreads();
Job of this program is:
1. Initialize array with threadIndex
2. At each index assign value of index + 1
Compile: nvcc shiftLeft.cu -o shiftLeft.out
Run: ./shiftLeft
*/
#include <stdio.h>
#define NUM_BLOCKS 1
#define BLOCK_WIDTH 128
__global__ void shiftLeft(int* array)
{
int idx = threadIdx.x;
array[idx] = idx;
__syncthreads();
if (idx < BLOCK_WIDTH - 1) {
int tmp = array[idx + 1];
__syncthreads();
array[idx] = tmp;
}
}
int main(int argc,char **argv)
{
const int ARRAY_SIZE = BLOCK_WIDTH;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// output array on the host
int h_out[ARRAY_SIZE];
// declare GPU memory pointer
int* d_out;
// allocate GPU memory
hipMalloc((void**) &d_out, ARRAY_BYTES);
// launch the kernel
hipLaunchKernelGGL(( shiftLeft), dim3(NUM_BLOCKS), dim3(BLOCK_WIDTH), 0, 0, d_out);
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i = 0; i < ARRAY_SIZE; ++i) {
printf("%d", h_out[i]);
printf(i % 4 != 3 ? "\t" : "\n");
}
// free GPU memory allocation
hipFree(d_out);
return 0;
} | e136a0b02894884ab2c1a217fb997041cf1cbc21.cu | /*
Example code showing use of bariers to synchronize all threads in a block.
Barier is set with __syncthreads();
Job of this program is:
1. Initialize array with threadIndex
2. At each index assign value of index + 1
Compile: nvcc shiftLeft.cu -o shiftLeft.out
Run: ./shiftLeft
*/
#include <stdio.h>
#define NUM_BLOCKS 1
#define BLOCK_WIDTH 128
__global__ void shiftLeft(int* array)
{
int idx = threadIdx.x;
array[idx] = idx;
__syncthreads();
if (idx < BLOCK_WIDTH - 1) {
int tmp = array[idx + 1];
__syncthreads();
array[idx] = tmp;
}
}
int main(int argc,char **argv)
{
const int ARRAY_SIZE = BLOCK_WIDTH;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// output array on the host
int h_out[ARRAY_SIZE];
// declare GPU memory pointer
int* d_out;
// allocate GPU memory
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// launch the kernel
shiftLeft<<<NUM_BLOCKS, BLOCK_WIDTH>>>(d_out);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i = 0; i < ARRAY_SIZE; ++i) {
printf("%d", h_out[i]);
printf(i % 4 != 3 ? "\t" : "\n");
}
// free GPU memory allocation
cudaFree(d_out);
return 0;
} |
208ab16fe3e2f7d1bb37653d43ffa498a5e211a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// file: sift.cu
// GPU implementation: Liang Men
// description: Sift definition
#include<sift.hpp>
#include<sift-conv.tpp>
#include<algorithm>
#include<iostream>
#include<sstream>
#include<cassert>
#include <cutil.h>
extern "C" {
#if defined (VL_MAC)
#include<libgen.h>
#else
#include<string.h>
}
#endif
#define BLOCK_SIZE 16
#define C_TILE_SIZE 250
#define D_BLOCK_SIZE 128
#define F_TILE_SIZE 14
#define F_BLOCK_SIZE F_TILE_SIZE+2
#define F_TILE_SIZE_S 12
#define F_BLOCK_SIZE_S F_TILE_SIZE_S+4
#define G_TILE_SIZE 14
#define G_BLOCK_SIZE G_TILE_SIZE+2
#define K_BLOCK_SIZE 128
using namespace VL ;
// on startup, pre-compute expn(x) = exp(-x)
namespace VL {
namespace Detail {
int const expnTableSize = 256 ;
VL::float_t const expnTableMax = VL::float_t(25.0) ;
VL::float_t expnTable [ expnTableSize + 1 ] ;
struct buildExpnTable
{
buildExpnTable() {
for(int k = 0 ; k < expnTableSize + 1 ; ++k) {
expnTable[k] = exp( - VL::float_t(k) / expnTableSize * expnTableMax ) ;
}
}
} _buildExpnTable ;
} }
namespace VL {
namespace Detail {
/** Comment eater istream manipulator */
class _cmnt {} cmnt ;
/** @brief Extract a comment from a stream
**
** The function extracts a block of consecutive comments from an
** input stream. A comment is a sequence of whitespaces, followed by
** a `#' character, other characters and terminated at the next line
** ending. A block of comments is just a sequence of comments.
**/
std::istream&
operator>>(std::istream& is, _cmnt& manip)
{
char c ;
char b [1024] ;
is>>c ;
if( c != '#' )
return is.putback(c) ;
is.getline(b,1024) ;
return is ;
}
}
/** @brief Insert PGM file into stream
**
** The function iserts into the stream @a os the grayscale image @a
** im encoded as a PGM file. The immage is assumed to be normalized
** in the range 0.0 - 1.0.
**
** @param os output stream.
** @param im pointer to image data.
** @param width image width.
** @param height image height.
** @return the stream @a os.
**/
std::ostream&
insertPgm(std::ostream& os, pixel_t const* im, int width, int height)
{
os<< "P5" << "\n"
<< width << " "
<< height << "\n"
<< "255" << "\n" ;
for(int y = 0 ; y < height ; ++y) {
for(int x = 0 ; x < width ; ++x) {
unsigned char v =
(unsigned char)
(::max(::min(*im++, 1.0f),0.f) * 255.0f) ;
os << v ;
}
}
return os ;
}
/** @brief Extract PGM file from stream.
**
** The function extracts from the stream @a in a grayscale image
** encoded as a PGM file. The function fills the structure @a buffer,
** containing the image dimensions and a pointer to the image data.
**
** The image data is an array of floats and is owned by the caller,
** which should erase it as in
**
** @code
** delete [] buffer.data.
** @endcode
**
** When the function encouters an error it throws a generic instance
** of VL::Exception.
**
** @param in input stream.
** @param buffer buffer descriptor to be filled.
** @return the stream @a in.
**/
std::istream&
extractPgm(std::istream& in, PgmBuffer& buffer)
{
pixel_t* im_pt ;
int width ;
int height ;
int maxval ;
char c ;
in>>c ;
if( c != 'P') VL_THROW("File is not in PGM format") ;
bool is_ascii ;
in>>c ;
switch( c ) {
case '2' : is_ascii = true ; break ;
case '5' : is_ascii = false ; break ;
default : VL_THROW("File is not in PGM format") ;
}
in >> Detail::cmnt
>> width
>> Detail::cmnt
>> height
>> Detail::cmnt
>> maxval ;
// after maxval no more comments, just a whitespace or newline
{char trash ; in.get(trash) ;}
if(maxval > 255)
VL_THROW("Only <= 8-bit per channel PGM files are supported") ;
if(! in.good())
VL_THROW("PGM header parsing error") ;
im_pt = new pixel_t [ width*height ];
try {
if( is_ascii ) {
pixel_t* start = im_pt ;
pixel_t* end = start + width*height ;
pixel_t norm = pixel_t( maxval ) ;
while( start != end ) {
int i ;
in >> i ;
if( ! in.good() ) VL_THROW
("PGM parsing error file (width="<<width
<<" height="<<height
<<" maxval="<<maxval
<<" at pixel="<<start-im_pt<<")") ;
*start++ = pixel_t( i ) / norm ;
}
} else {
std::streampos beg = in.tellg() ;
char* buffer = new char [width*height] ;
in.read(buffer, width*height) ;
if( ! in.good() ) VL_THROW
("PGM parsing error file (width="<<width
<<" height="<<height
<<" maxval="<<maxval
<<" at pixel="<<in.tellg()-beg<<")") ;
pixel_t* start = im_pt ;
pixel_t* end = start + width*height ;
uint8_t* src = reinterpret_cast<uint8_t*>(buffer) ;
while( start != end ) *start++ = *src++ / 255.0f ;
}
} catch(...) {
delete [] im_pt ;
throw ;
}
buffer.width = width ;
buffer.height = height ;
buffer.data = im_pt ;
return in ;
}
// ===================================================================
// Low level image operations
// -------------------------------------------------------------------
namespace Detail {
/** @brief Copy an image
** @param dst output imgage buffer.
** @param src input image buffer.
** @param width input image width.
** @param height input image height.
**/
void
copy(pixel_t* dst, pixel_t const* src, int width, int height)
{
memcpy(dst, src, sizeof(pixel_t)*width*height) ;
}
/** @brief Copy an image upsampling two times
**
** The destination buffer must be at least as big as two times the
** input buffer. Bilinear interpolation is used.
**
** @param dst output imgage buffer.
** @param src input image buffer.
** @param width input image width.
** @param height input image height.
**/
/*
void
copyAndUpsampleRows
(pixel_t* dst, pixel_t const* src, int width, int height)
{
for(int y = 0 ; y < height ; ++y) {
pixel_t b, a ;
b = a = *src++ ;
for(int x = 0 ; x < width-1 ; ++x) {
b = *src++ ;
*dst = a ; dst += height ;
*dst = 0.5*(a+b) ; dst += height ;
a = b ;
}
*dst = b ; dst += height ;
*dst = b ; dst += height ;
dst += 1 - width * 2 * height ;
}
}
void
copyAndDownsample(pixel_t* dst, pixel_t const* src,
int width, int height, int d)
{
for(int y = 0 ; y < height ; y+=d) {
pixel_t const * srcrowp = src + y * width ;
for(int x = 0 ; x < width - (d-1) ; x+=d) {
*dst++ = *srcrowp ;
srcrowp += d ;
}
}
}
*/
}
__global__ void UpsampleKernel(pixel_t* dst, pixel_t* src, int src_width, int src_height)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Col = bx*BLOCK_SIZE + tx;
int Row = by*BLOCK_SIZE + ty;
if ( Col < (src_width -1) && Row < src_height )
{
dst[2*Col*src_height + Row] = src[Row*src_width + Col];
dst[(2*Col+1)*src_height + Row] =
(src[Row*src_width + Col] + src[Row*src_width + Col + 1])/2;
}
else
{
if ( Col == (src_width - 1) && Row < src_height )
{
dst[2*Col*src_height + Row] = src[Row*src_width + Col];
dst[(2*Col+1)*src_height + Row] = src[Row*src_width + Col];
}
}
}
void
copyAndUpsampleRows (pixel_t* dst, pixel_t const* src, int width, int height)
{
int dst_width = height;
int dst_height = width * 2;
unsigned int src_size = sizeof(pixel_t) * (width*height);
unsigned int dst_size = sizeof(pixel_t) * (dst_width*dst_height);
pixel_t* dst_d = NULL;
pixel_t* src_d = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &src_d, src_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &dst_d, dst_size));
CUDA_SAFE_CALL( hipMemcpy( src_d, src, src_size, hipMemcpyHostToDevice) );
dim3 dimBlock, dimGrid1;
dimBlock.x = dimBlock.y = BLOCK_SIZE;
dimBlock.z = 1;
dimGrid1.x = (width / dimBlock.x) + ( (width % dimBlock.x) ? 1:0 );
dimGrid1.y = (height / dimBlock.y) + ( (height % dimBlock.y) ? 1:0 );
dimGrid1.z = 1;
hipLaunchKernelGGL(( UpsampleKernel), dim3(dimGrid1), dim3(dimBlock), 0, 0, dst_d, src_d, width, height);
CUDA_SAFE_CALL(hipMemcpy( dst, dst_d, dst_size, hipMemcpyDeviceToHost));
hipFree(dst_d);
hipFree(src_d);
}
void //Use this function to reduce double call in the main function.
copyAndUpsampleRows2 (pixel_t* dst, pixel_t const* src, int width, int height)
{
int tmp_width = height;
int tmp_height = width * 2;
int dst_width = width * 2;
int dst_height = height * 2;
unsigned int src_size = sizeof(pixel_t) * (width*height);
unsigned int tmp_size = sizeof(pixel_t) * (2*width*height);
unsigned int dst_size = sizeof(pixel_t) * (dst_width*dst_height);
pixel_t* dst_d = NULL;
pixel_t* tmp_d = NULL;
pixel_t* src_d = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &src_d, src_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &tmp_d, tmp_size));
CUDA_SAFE_CALL( hipMemcpy( src_d, src, src_size, hipMemcpyHostToDevice) );
dim3 dimBlock, dimGrid1, dimGrid2;
dimBlock.x = dimBlock.y = BLOCK_SIZE;
dimBlock.z = 1;
dimGrid1.x = (width / dimBlock.x) + ( (width % dimBlock.x) ? 1:0 );
dimGrid1.y = (height / dimBlock.y) + ( (height % dimBlock.y) ? 1:0 );
dimGrid1.z = 1;
dimGrid2.x = (tmp_width / dimBlock.x) + ( (tmp_width % dimBlock.x) ? 1:0 );
dimGrid2.y = (tmp_height / dimBlock.y) + ( (tmp_height % dimBlock.y) ? 1:0 );
dimGrid2.z = 1;
hipLaunchKernelGGL(( UpsampleKernel), dim3(dimGrid1), dim3(dimBlock), 0, 0, tmp_d, src_d, width, height);
hipFree(src_d);
CUDA_SAFE_CALL( hipMalloc( (void**) &dst_d, dst_size));
hipLaunchKernelGGL(( UpsampleKernel), dim3(dimGrid2), dim3(dimBlock), 0, 0, dst_d, tmp_d, width, height);
CUDA_SAFE_CALL(hipMemcpy( dst, dst_d, dst_size, hipMemcpyDeviceToHost));
hipFree(dst_d);
hipFree(tmp_d);
}
__global__ void DownsampleKernel(pixel_t* dst, pixel_t* src, int src_width, int src_height, int dst_width, int d)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Col = bx*BLOCK_SIZE + tx;
int Row = by*BLOCK_SIZE + ty;
if ( d*Col < src_width && d*Row < src_height)
dst[Row*dst_width + Col] = src[d*Row*src_width + d*Col];
}
void copyAndDownsample(pixel_t* dst, pixel_t const* src, int width, int height, int d)
{
int dst_width = (width / d) + ((width % d) ? 1:0 );
int dst_height =(height / d) + ((height % d) ? 1:0);
unsigned int src_size = sizeof(pixel_t) * (width*height);
unsigned int dst_size = sizeof(pixel_t) * (dst_width*dst_height);
pixel_t* dst_d = NULL;
pixel_t* src_d = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &dst_d, dst_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &src_d, src_size));
CUDA_SAFE_CALL( hipMemcpy( src_d, src, src_size, hipMemcpyHostToDevice) );
dim3 dimBlock, dimGrid;
dimBlock.x = dimBlock.y = BLOCK_SIZE;
dimBlock.z = 1;
dimGrid.x = (dst_width / dimBlock.x) + ( (dst_width % dimBlock.x) ? 1:0 );
dimGrid.y = (dst_height / dimBlock.y) + ( (dst_height % dimBlock.y) ? 1:0 );
dimGrid.z = 1;
hipLaunchKernelGGL(( DownsampleKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dst_d, src_d, width, height, dst_width, d);
CUDA_SAFE_CALL(hipMemcpy( dst, dst_d, dst_size, hipMemcpyDeviceToHost));
hipFree(dst_d);
hipFree(src_d);
}
/*
void econvolve(pixel_t* dst_pt,
pixel_t* src_pt, int M, int N,
pixel_t* filter_pt, int W)
{
//typedef T const TC ;
// convolve along columns, save transpose
// image is M by N
// buffer is N by M
// filter is (2*W+1) by 1
for(int j = 0 ; j < N ; ++j) {
for(int i = 0 ; i < M ; ++i) {
pixel_t acc = 0.0 ;
pixel_t* g = filter_pt ;
pixel_t* start = src_pt + (i-W) ;
pixel_t* stop ;
pixel_t x ;
// beginning
stop = src_pt + ::max(0, i-W) ;
x = *stop ;
while( start <= stop ) { acc += (*g++) * x ; start++ ; }
// middle
stop = src_pt + ::min(M-1, i+W) ;
while( start < stop ) acc += (*g++) * (*start++) ;
// end
x = *start ;
stop = src_pt + (i+W) ;
while( start <= stop ) { acc += (*g++) * x ; start++ ; }
// save
*dst_pt = acc ;
dst_pt += N ;
assert( g - filter_pt == 2*W+1 ) ;
}
// next column
src_pt += M ;
dst_pt -= M*N - 1 ;
}
}
*/
__global__ void ConvKernel(pixel_t* dst, pixel_t* src, int src_width, int src_height, pixel_t* filter, int w)
{
extern __shared__ pixel_t Ns[];
int tx = threadIdx.x;
int bx = blockIdx.x;
int by = blockIdx.y;
int Row = by;
int Col = bx*C_TILE_SIZE + tx;
int i;
pixel_t Pvalue = 0;
if ((Col - w) >= 0 && (Col - w) <= (src_width - 1))
{
Ns[tx] = src[Row * src_width + (Col - w)];
}
else
{
if((Col - w) < 0)
Ns[tx] = src[Row * src_width];
else
Ns[tx] = src[(Row + 1) * src_width - 1];
}
__syncthreads();
if (tx < C_TILE_SIZE)
{
for ( i = 0; i < 2*w+1; i++)
Pvalue += filter[i] * Ns[i+tx];
if (Col < src_width )
dst[Col * src_height + Row] = Pvalue;
}
}
void econvolve(pixel_t* dst, pixel_t* src, int src_width, int src_height,
pixel_t* filter_pt, int W)
{
// convolve along columns, save transpose
// image is M by N
// buffer is N by M
// filter is (2*W+1) by 1
unsigned int src_size = sizeof(pixel_t) * (src_width*src_height);
unsigned int dst_size = sizeof(pixel_t) * (src_width*src_height);
unsigned int filter_size = sizeof(pixel_t) * (2*W + 1);
pixel_t* dst_d = NULL;
pixel_t* src_d = NULL;
pixel_t* filter_d = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &dst_d, dst_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &src_d, src_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &filter_d, filter_size));
CUDA_SAFE_CALL( hipMemcpy( src_d, src, src_size, hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMemcpy( filter_d, filter_pt, filter_size, hipMemcpyHostToDevice) );
int SizeofSM = sizeof(pixel_t) * (2*W + C_TILE_SIZE);
dim3 dimBlock, dimGrid;
dimBlock.x = 2*W + C_TILE_SIZE;
dimBlock.y = 1;
dimGrid.x = (src_width / C_TILE_SIZE) + ( (src_width % C_TILE_SIZE) ? 1:0 );
dimGrid.y = src_height;
// std::cout
// << "econvolve: number of w : " << W<<std::endl;
hipLaunchKernelGGL(( ConvKernel), dim3(dimGrid), dim3(dimBlock), SizeofSM, 0, dst_d, src_d, src_width, src_height, filter_d, W);
CUDA_SAFE_CALL(hipMemcpy( dst, dst_d, dst_size, hipMemcpyDeviceToHost));
hipFree(dst_d);
hipFree(src_d);
hipFree(filter_d);
}
/** @brief Smooth an image
**
** The function convolves the image @a src by a Gaussian kernel of
** variance @a s and writes the result to @a dst. The function also
** needs a scratch buffer @a dst of the same size of @a src and @a
** dst.
**
** @param dst output image buffer.
** @param temp scratch image buffer.
** @param src input image buffer.
** @param width width of the buffers.
** @param height height of the buffers.
** @param s standard deviation of the Gaussian kernel.
**/
void
Sift::smooth
(pixel_t* dst, pixel_t* temp,
pixel_t* src, int width, int height,
VL::float_t s)
{
// make sure a buffer larege enough has been allocated
// to hold the filter
int W = int( ceil( VL::float_t(4.0) * s ) ) ;
if( ! filter ) {
filterReserved = 0 ;
}
if( filterReserved < W ) {
filterReserved = W ;
if( filter ) delete [] filter ;
filter = new pixel_t [ 2* filterReserved + 1 ] ;
}
// pre-compute filter
for(int j = 0 ; j < 2*W+1 ; ++j)
filter[j] = VL::pixel_t
(std::exp
(VL::float_t
(-0.5 * (j-W) * (j-W) / (s*s) ))) ;
// normalize to one
normalize(filter, W) ;
// convolve
econvolve(temp, src, width, height, filter, W) ;
econvolve(dst, temp, height, width, filter, W) ;
}
// ===================================================================
// Sift(), ~Sift()
// -------------------------------------------------------------------
/** @brief Initialize Gaussian scale space parameters
**
** @param _im_pt Source image data
** @param _width Soruce image width
** @param _height Soruce image height
** @param _sigman Nominal smoothing value of the input image.
** @param _sigma0 Base smoothing level.
** @param _O Number of octaves.
** @param _S Number of levels per octave.
** @param _omin First octave.
** @param _smin First level in each octave.
** @param _smax Last level in each octave.
**/
Sift::Sift(const pixel_t* _im_pt, int _width, int _height,
VL::float_t _sigman,
VL::float_t _sigma0,
int _O, int _S,
int _omin, int _smin, int _smax)
: sigman( _sigman ),
sigma0( _sigma0 ),
O( _O ),
S( _S ),
omin( _omin ),
smin( _smin ),
smax( _smax ),
magnif( 3.0f ),
normalizeDescriptor( true ),
temp( NULL ),
octaves( NULL ),
filter( NULL )
{
process(_im_pt, _width, _height) ;
}
/** @brief Destroy SIFT filter.
**/
Sift::~Sift()
{
freeBuffers() ;
}
/** Allocate buffers. Buffer sizes depend on the image size and the
** value of omin.
**/
void
Sift::
prepareBuffers()
{
// compute buffer size
int w = (omin >= 0) ? (width >> omin) : (width << -omin) ;
int h = (omin >= 0) ? (height >> omin) : (height << -omin) ;
int size = w*h* std::max
((smax - smin), 2*((smax+1) - (smin-2) +1)) ;
if( temp && tempReserved == size ) return ;
freeBuffers() ;
// allocate
Kmid = new kvalue [w*h];
KeyNum = new int [O-omin];
temp = new pixel_t [ size ] ;
tempReserved = size ;
tempIsGrad = false ;
tempOctave = 0 ;
octaves = new pixel_t* [ O ] ;
for(int o = 0 ; o < O ; ++o) {
octaves[o] = new pixel_t [ (smax - smin + 1) * w * h ] ;
w >>= 1 ;
h >>= 1 ;
}
}
/** @brief Free buffers.
**
** This function releases any buffer allocated by prepareBuffers().
**
** @sa prepareBuffers().
**/
void
Sift::
freeBuffers()
{
if( filter ) {
delete [] filter ;
}
filter = 0 ;
if( octaves ) {
for(int o = 0 ; o < O ; ++o) {
delete [] octaves[ o ] ;
}
delete [] octaves ;
}
octaves = 0 ;
if( temp ) {
delete [] temp ;
}
temp = 0 ;
}
// ===================================================================
// getKeypoint
// -------------------------------------------------------------------
/** @brief Get keypoint from position and scale
**
** The function returns a keypoint with a given position and
** scale. Note that the keypoint structure contains fields that make
** sense only in conjunction with a specific scale space. Therefore
** the keypoint structure should be re-calculated whenever the filter
** is applied to a new image, even if the parameters @a x, @a y and
** @a sigma do not change.
**
** @param x x coordinate of the center.
** @peram y y coordinate of the center.
** @param sigma scale.
** @return Corresponing keypoint.
**/
Sift::Keypoint
Sift::getKeypoint(VL::float_t x, VL::float_t y, VL::float_t sigma) const
{
/*
The formula linking the keypoint scale sigma to the octave and
scale index is
(1) sigma(o,s) = sigma0 2^(o+s/S)
for which
(2) o + s/S = log2 sigma/sigma0 == phi.
In addition to the scale index s (which can be fractional due to
scale interpolation) a keypoint has an integer scale index is too
(which is the index of the scale level where it was detected in
the DoG scale space). We have the constraints:
- o and is are integer
- is is in the range [smin+1, smax-2 ]
- o is in the range [omin, omin+O-1]
- is = rand(s) most of the times (but not always, due to the way s
is obtained by quadratic interpolation of the DoG scale space).
Depending on the values of smin and smax, often (2) has multiple
solutions is,o that satisfy all constraints. In this case we
choose the one with biggest index o (this saves a bit of
computation).
DETERMINING THE OCTAVE INDEX O
From (2) we have o = phi - s/S and we want to pick the biggest
possible index o in the feasible range. This corresponds to
selecting the smallest possible index s. We write s = is + ds
where in most cases |ds|<.5 (but in general |ds|<1). So we have
o = phi - s/S, s = is + ds , |ds| < .5 (or |ds| < 1).
Since is is in the range [smin+1,smax-2], s is in the range
[smin+.5,smax-1.5] (or [smin,smax-1]), the number o is an integer
in the range phi+[-smax+1.5,-smin-.5] (or
phi+[-smax+1,-smin]). Thus the maximum value of o is obtained for
o = floor(phi-smin-.5) (or o = floor(phi-smin)).
Finally o is clamped to make sure it is contained in the feasible
range.
DETERMINING THE SCALE INDEXES S AND IS
Given o we can derive is by writing (2) as
s = is + ds = S(phi - o).
We then take is = round(s) and clamp its value to be in the
feasible range.
*/
int o,ix,iy,is ;
VL::float_t s,phi ;
phi = log2(sigma/sigma0) ;
o = fast_floor( phi - (VL::float_t(smin)+.5)/S ) ;
o = ::min(o, omin+O-1) ;
o = ::max(o, omin ) ;
s = S * (phi - o) ;
is = int(s + 0.5) ;
is = ::min(is, smax - 2) ;
is = ::max(is, smin + 1) ;
VL::float_t per = getOctaveSamplingPeriod(o) ;
ix = int(x / per + 0.5) ;
iy = int(y / per + 0.5) ;
Keypoint key ;
key.o = o ;
key.ix = ix ;
key.iy = iy ;
key.is = is ;
key.x = x ;
key.y = y ;
key.s = s ;
key.sigma = sigma ;
return key ;
}
// ===================================================================
// process()
// -------------------------------------------------------------------
/** @brief Compute Gaussian Scale Space
**
** The method computes the Gaussian scale space of the specified
** image. The scale space data is managed internally and can be
** accessed by means of getOctave() and getLevel().
**
** @remark Calling this method will delete the list of keypoints
** constructed by detectKeypoints().
**
** @param _im_pt pointer to image data.
** @param _width image width.
** @param _height image height .
**/
void
Sift::
process(const pixel_t* _im_pt, int _width, int _height)
{
using namespace Detail ;
width = _width ;
height = _height ;
prepareBuffers() ;
VL::float_t sigmak = powf(2.0f, 1.0 / S) ;
VL::float_t dsigma0 = sigma0 * sqrt (1.0f - 1.0f / (sigmak*sigmak) ) ;
// -----------------------------------------------------------------
// Make pyramid base
// -----------------------------------------------------------------
if( omin < 0 ) {
copyAndUpsampleRows(temp, _im_pt, width, height ) ;
copyAndUpsampleRows(octaves[0], temp, height, 2*width ) ;
for(int o = -1 ; o > omin ; --o) {
copyAndUpsampleRows(temp, octaves[0], width << -o, height << -o) ;
copyAndUpsampleRows(octaves[0], temp, height << -o, 2*(width << -o)) ; }
} else if( omin > 0 ) {
copyAndDownsample(octaves[0], _im_pt, width, height, 1 << omin) ;
} else {
copy(octaves[0], _im_pt, width, height) ;
}
{
VL::float_t sa = sigma0 * powf(sigmak, smin) ;
VL::float_t sb = sigman / powf(2.0f, omin) ; // review this
if( sa > sb ) {
VL::float_t sd = sqrt ( sa*sa - sb*sb ) ;
smooth( octaves[0], temp, octaves[0],
getOctaveWidth(omin),
getOctaveHeight(omin),
sd ) ;
}
}
// -----------------------------------------------------------------
// Make octaves
// -----------------------------------------------------------------
for(int o = omin ; o < omin+O ; ++o) {
// Prepare octave base
if( o > omin ) {
int sbest = ::min(smin + S, smax) ;
copyAndDownsample(getLevel(o, smin ),
getLevel(o-1, sbest),
getOctaveWidth(o-1),
getOctaveHeight(o-1), 2 ) ;
VL::float_t sa = sigma0 * powf(sigmak, smin ) ;
VL::float_t sb = sigma0 * powf(sigmak, sbest - S ) ;
if(sa > sb ) {
VL::float_t sd = sqrt ( sa*sa - sb*sb ) ;
smooth( getLevel(o,0), temp, getLevel(o,0),
getOctaveWidth(o), getOctaveHeight(o),
sd ) ;
}
}
// Make other levels
for(int s = smin+1 ; s <= smax ; ++s) {
VL::float_t sd = dsigma0 * powf(sigmak, s) ;
smooth( getLevel(o,s), temp, getLevel(o,s-1),
getOctaveWidth(o), getOctaveHeight(o),
sd ) ;
}
}
}
/** @brief Sift detector
**
** The function runs the SIFT detector on the stored Gaussian scale
** space (see process()). The detector consists in three steps
**
** - local maxima detection;
** - subpixel interpolation;
** - rejection of weak keypoints (@a threhsold);
** - rejection of keypoints on edge-like structures (@a edgeThreshold).
**
** As they are found, keypoints are added to an internal list. This
** list can be accessed by means of the member functions
** getKeypointsBegin() and getKeypointsEnd(). The list is ordered by
** octave, which is usefult to speed-up computeKeypointOrientations()
** and computeKeypointDescriptor().
**/
__global__ void DogKernel(pixel_t* dst, pixel_t* srca, pixel_t* srcb, int width)
{
__shared__ pixel_t src[D_BLOCK_SIZE];
int tx = threadIdx.x;
int bx = blockIdx.x;
int by = blockIdx.y;
int Row = by;
int Col = bx*D_BLOCK_SIZE + tx; //D_BLOCK_SIZE = 128
src[tx] = srcb[Row * width + Col];
__syncthreads();
if (Col < width)
{
dst[Row * width + Col] = srca[Row * width + Col] - src[tx];
srca[Row * width + Col] = src[tx];
}
}
void Sift::Compute_Dog (pixel_t* pt, int o, int smin, int smax, int width, int height)
{
unsigned int src_size = sizeof(pixel_t) * (width*height);
unsigned int dst_size = sizeof(pixel_t) * (width*height);
pixel_t* dst_d = NULL;
pixel_t* srca_d = NULL;
pixel_t* srcb_d = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &dst_d, dst_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &srca_d, src_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &srcb_d, src_size));
dim3 dimBlock, dimGrid;
dimBlock.x = D_BLOCK_SIZE;
dimBlock.y = 1;
dimGrid.x = (width / D_BLOCK_SIZE) + ( (width % D_BLOCK_SIZE) ? 1:0 );
dimGrid.y = height;
pixel_t* srca = getLevel(o, smin) ;
CUDA_SAFE_CALL( hipMemcpy(srca_d, srca, src_size, hipMemcpyHostToDevice) );
for(int s = smin + 1 ; s <= smax ; ++s)
{
pixel_t* srcb = getLevel(o, s) ;
CUDA_SAFE_CALL( hipMemcpy(srcb_d, srcb, src_size, hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( DogKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dst_d, srca_d, srcb_d, width);
CUDA_SAFE_CALL(hipMemcpy(pt, dst_d, dst_size, hipMemcpyDeviceToHost));
pt = pt + width*height;
}
hipFree(dst_d);
hipFree(srca_d);
hipFree(srcb_d);
}
__global__ void FindkKernel(kvalue* dst, pixel_t* srcT, pixel_t* srcM, pixel_t* srcB, int width, int height, float threshold, float edgethreshold)
{
__shared__ pixel_t Mtop[F_BLOCK_SIZE][F_BLOCK_SIZE]; //F_BLOCK_SIZE = F_TILE_SIZE + 2
__shared__ pixel_t Mmid[F_BLOCK_SIZE][F_BLOCK_SIZE];
__shared__ pixel_t Mbot[F_BLOCK_SIZE][F_BLOCK_SIZE];
int tx, ty, bx, by;
tx = threadIdx.x;
ty = threadIdx.y;
bx = blockIdx.x;
by = blockIdx.y;
int i, j, Row, Col;
float extr = 1.0f;
float Threshold = threshold;
float edgeThreshold = edgethreshold;
Row = by*F_TILE_SIZE + ty;
Col = bx*F_TILE_SIZE + tx;
if (Row < height && Col < width)
{
Mtop[ty][tx] = srcT[Row * width + Col];
Mmid[ty][tx] = srcM[Row * width + Col];
Mbot[ty][tx] = srcB[Row * width + Col];
//dst[Row * width + Col].flag = 0.0f;
}
else
{
Mtop[ty][tx] = 0;
Mmid[ty][tx] = 0;
Mbot[ty][tx] = 0;
}
__syncthreads();
if(ty < F_TILE_SIZE && tx < F_TILE_SIZE && Row < (height -1) && Col < (width-1))
{
if (Mmid[ty+1][tx+1] > 0)
{
for(i = 0; i < 3; i++)
{
for(j = 0; j < 3; j++)
{
if ( Mmid[ty+1][tx+1] < Mtop[ty+i][tx+j] || Mmid[ty+1][tx+1] < Mbot[ty+i][tx+j] ||
Mmid[ty+1][tx+1] < Mmid[ty][tx+j] || Mmid[ty+1][tx+1] < Mmid[ty+2][tx+j] ||
Mmid[ty+1][tx+1] < Mmid[ty+1][tx] || Mmid[ty+1][tx+1] < Mmid[ty+1][tx+2] ||
Mmid[ty+1][tx+1] < Threshold)
{ extr = 0; break; }
}
if (extr == 0)
break;
}
}
else
{
for(i = 0; i < 3; i++)
{
for(j = 0; j < 3; j++)
{
if ( Mmid[ty+1][tx+1] > Mtop[ty+i][tx+j] || Mmid[ty+1][tx+1] > Mbot[ty+i][tx+j] ||
Mmid[ty+1][tx+1] > Mmid[ty][tx+j] || Mmid[ty+1][tx+1] > Mmid[ty+2][tx+j] ||
Mmid[ty+1][tx+1] > Mmid[ty+1][tx] || Mmid[ty+1][tx+1] > Mmid[ty+1][tx+2] ||
Mmid[ty+1][tx+1] > Threshold * (-1))
{ extr = 0; break; }
}
if (extr == 0)
break;
}
}
__syncthreads();
if(extr == 1)
{
//float4 value = RefineKernel(Mtop, Mmid, Mbot, width, height, threshold, edgethreshold)
//int StepX = 0;
//int StepY = 0;
float ds = 0.0f;
float dy = 0.0f;
float dx = 0.0f;
float Vx2, fx, fy, fs, fxx, fyy, fss, fxy, fxs, fys;
// for(int iter = 0 ; iter < 5 ; ++iter) {
//tx = threadIdx.x + StepX;
//ty = threadIdx.y + StepY;
Vx2 = Mmid[ty+1][tx+1] * 2.0f;
fx = 0.5f * (Mmid[ty+1][tx+2] - Mmid[ty+1][tx]);
fy = 0.5f * (Mmid[ty+2][tx+1] - Mmid[ty][tx+1]);
fs = 0.5f * (Mbot[ty+1][tx+1] - Mtop[ty+1][tx+1]);
fxx = Mmid[ty+1][tx+2] + Mmid[ty+1][tx] - Vx2;
fyy = Mmid[ty+2][tx+1] + Mmid[ty][tx+1] - Vx2;
fss = Mbot[ty+1][tx+1] + Mtop[ty+1][tx+1] - Vx2;
fxy = 0.25f * (Mmid[ty+2][tx+2] + Mmid[ty][tx] - Mmid[ty+2][tx] - Mmid[ty][tx+2]);
fxs = 0.25f * (Mbot[ty+1][tx+2] + Mtop[ty+1][tx] - Mbot[ty+1][tx] - Mtop[ty+1][tx+2]);
fys = 0.25f * (Mbot[ty+2][tx+1] + Mtop[ty][tx+1] - Mbot[ty][tx+1] - Mtop[ty+2][tx+1]);
//need to solve dx, dy, ds;
// |-fx| | fxx fxy fxs | |dx|
// |-fy| = | fxy fyy fys | * |dy|
// |-fs| | fxs fys fss | |ds|
float4 A0 = fxx > 0? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx);
float4 A1 = fxy > 0? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy);
float4 A2 = fxs > 0? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs);
float maxa = max(max(A0.x, A1.x), A2.x);
if(maxa >= 1e-10){
if(maxa == A1.x){
float4 TEMP = A1; A1 = A0; A0 = TEMP;
}else if(maxa == A2.x){
float4 TEMP = A2; A2 = A0; A0 = TEMP;
}
A0.y /= A0.x; A0.z /= A0.x; A0.w/= A0.x;
A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w;
A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w;
if(abs(A2.y) > abs(A1.y)){
float4 TEMP = A2; A2 = A1; A1 = TEMP;
}
if(abs(A1.y) >= 1e-10) {
A1.z /= A1.y; A1.w /= A1.y;
A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w;
if(abs(A2.z) >= 1e-10) {
ds = A2.w / A2.z;
dy = A1.w - ds * A1.z;
dx = A0.w - ds * A0.z - dy * A0.y;
}
}
}
// StepX= ((ds > 0.6 && ( bx*F_TILE_SIZE + tx + 1 ) < width -2) ? 1 : 0 ) + ((ds < -0.6 && (bx*F_TILE_SIZE + tx + 1) > 1 ) ? -1 : 0 ) ;
// StepY= ((dy > 0.6 && ( by*F_TILE_SIZE + ty + 1 )< height -2) ? 1 : 0 ) + ((dy < -0.6 && (by*F_TILE_SIZE + ty + 1) > 1 ) ? -1 : 0 ) ;
// if( StepX == 0 && StepY == 0 ) break ;
// }
float val = Mmid[ty+1][tx+1] + 0.5f * (fx * dx + fy * dy + fs * ds);
float score = (fxx + fyy) * (fxx + fyy) / (fxx * fyy - fxy * fxy);
if(fabs(val) > threshold && score < (edgeThreshold + 1)*(edgeThreshold + 1)/edgeThreshold && score >= 0 &&
fabs(dx) < 1.5 && fabs(dy) < 0.6 && fabs(ds) < 0.6 )
{
dst[(Row+1) * width + (Col+1)].dx = dx;
dst[(Row+1) * width + (Col+1)].dy = dy;
dst[(Row+1) * width + (Col+1)].ds = ds;
dst[(Row+1) * width + (Col+1)].flag = extr;
}
else
dst[(Row+1) * width + (Col+1)].flag = 0.0f;
}
else
dst[(Row+1) * width + (Col+1)].flag = extr;
}
}
void
Sift :: FindkExtrem(pixel_t* pt, int width, int height, int o, float xperiod, float threshold, float edgethreshold)
{
unsigned int dst_size = sizeof(kvalue) * (width*height);
unsigned int src_size = sizeof(pixel_t) * (width*height);
pixel_t* srcT_d = NULL;
pixel_t* srcM_d = NULL;
pixel_t* srcB_d = NULL;
kvalue* dst_d = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &srcT_d, src_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &srcM_d, src_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &srcB_d, src_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &dst_d, dst_size));
dim3 dimBlock, dimGrid;
dimBlock.x = F_BLOCK_SIZE;
dimBlock.y = F_BLOCK_SIZE;
dimGrid.x = ((width-2) / F_TILE_SIZE) + (((width-2) % F_TILE_SIZE) ? 1:0 );
dimGrid.y = ((height-2) / F_TILE_SIZE) + (((height-2) % F_TILE_SIZE) ? 1:0 );
pixel_t* src = pt;
// kvalue* dst;
// dst = new kvalue [width*height] ;
Keypoint k ;
int uu = 0;
for(int s = smin + 1 ; s <= smax-2 ; ++s)
{
CUDA_SAFE_CALL( hipMemcpy(srcT_d, src, src_size, hipMemcpyHostToDevice) );
src = src + width * height;
CUDA_SAFE_CALL( hipMemcpy(srcM_d, src, src_size, hipMemcpyHostToDevice) );
src = src + width * height;
CUDA_SAFE_CALL( hipMemcpy(srcB_d, src, src_size, hipMemcpyHostToDevice) );
src = src - width * height;
hipLaunchKernelGGL(( FindkKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, dst_d, srcT_d, srcM_d, srcB_d, width, height, 0.8*threshold, edgethreshold);
CUDA_SAFE_CALL(hipMemcpy(Kmid, dst_d, dst_size, hipMemcpyDeviceToHost));
float xn;
float yn;
float sn;
for(int y = 0; y < height-1; y++)
for(int x = 0; x < width-1; x++)
{
if (Kmid[width * y + x].flag == 1.0f)
{
xn = x + Kmid[width * y + x].dx;
yn = y + Kmid[width * y + x].dy;
sn = s + Kmid[width * y + x].ds;
if(xn >= 0 && xn <= width -1 && yn >= 0 && yn <= height -1 && sn >= smin && sn <= smax )
{
k.o = o;
k.ix = x ;
k.iy = y ;
k.is = s ;
k.x = xn * xperiod ;
k.y = yn * xperiod ;
k.s = sn;
k.sigma = getScaleFromIndex(o,sn) ;
keypoints.push_back(k);
KeyNum[o-omin]++;
uu++;
// std::cout<<x<<","<<y<<","<<s<<","<<k.x<<","<<k.y<<","<<k.sigma<<","<<"|| "<<std::flush;
}
}
}
}
//std::cout<<" " <<" "<<std::endl ;
std::cout<<"o is "<<o<<" total key number is "<<KeyNum[o-omin]<<std::endl;
hipFree(srcT_d);
hipFree(srcM_d);
hipFree(srcB_d);
hipFree(dst_d);
//free dst;
}
__global__ void FindkKernel_small(kvalue* dst, pixel_t* srcT, pixel_t* srcM, pixel_t* srcB, int width, int height, float threshold, float edgethreshold)
{
__shared__ pixel_t Mtop[F_BLOCK_SIZE_S][F_BLOCK_SIZE_S]; //F_BLOCK_SIZE_S = F_TILE_SIZE + 4
__shared__ pixel_t Mmid[F_BLOCK_SIZE_S][F_BLOCK_SIZE_S];
__shared__ pixel_t Mbot[F_BLOCK_SIZE_S][F_BLOCK_SIZE_S];
int tx, ty, bx, by;
tx = threadIdx.x;
ty = threadIdx.y;
bx = blockIdx.x;
by = blockIdx.y;
int i, j, Row, Col;
float extr = 1.0f;
float Threshold = threshold;
float edgeThreshold = edgethreshold;
Row = by*F_TILE_SIZE_S + ty;
Col = bx*F_TILE_SIZE_S + tx;
if ( ((Row - 1) < 0) || (Row > height) || ((Col - 1) < 0) || (Col > width) )
{
Mtop[ty][tx] = 0;
Mmid[ty][tx] = 0;
Mbot[ty][tx] = 0;
}
else
{
Mtop[ty][tx] = srcT[(Row-1) * width + (Col-1)];
Mmid[ty][tx] = srcM[(Row-1) * width + (Col-1)];
Mbot[ty][tx] = srcB[(Row-1) * width + (Col-1)];
//dst[(Row-1) * width + (Col-1)].flag = 0.0f;
}
__syncthreads();
if(ty < F_TILE_SIZE_S && tx < F_TILE_SIZE_S && Row < (height -2) && Col < (width-2))
{
if (Mmid[ty+2][tx+2] > 0)
{
for(i = 1; i < 4; i++)
{
for(j = 1; j < 4; j++)
{
if ( Mmid[ty+2][tx+2] < Mtop[ty+i][tx+j] || Mmid[ty+2][tx+2] < Mbot[ty+i][tx+j] ||
Mmid[ty+2][tx+2] < Mmid[ty+1][tx+j] || Mmid[ty+2][tx+2] < Mmid[ty+3][tx+j] ||
Mmid[ty+2][tx+2] < Mmid[ty+2][tx+1] || Mmid[ty+2][tx+2] < Mmid[ty+2][tx+3] ||
Mmid[ty+2][tx+2] < Threshold)
{ extr = 0; break; }
}
if (extr == 0)
break;
}
}
else
{
for(i = 1; i < 4; i++)
{
for(j = 1; j < 4; j++)
{
if ( Mmid[ty+2][tx+2] > Mtop[ty+i][tx+j] || Mmid[ty+2][tx+2] > Mbot[ty+i][tx+j] ||
Mmid[ty+2][tx+2] > Mmid[ty+1][tx+j] || Mmid[ty+2][tx+2] > Mmid[ty+3][tx+j] ||
Mmid[ty+2][tx+2] > Mmid[ty+2][tx+1] || Mmid[ty+2][tx+2] > Mmid[ty+2][tx+3] ||
Mmid[ty+2][tx+2] > Threshold * (-1))
{ extr = 0; break; }
}
if (extr == 0)
break;
}
}
__syncthreads();
if(extr == 1)
{
int StepX = 0;
int StepY = 0;
float ds = 0.0f;
float dy = 0.0f;
float dx = 0.0f;
float Vx2, fx, fy, fs, fxx, fyy, fss, fxy, fxs, fys;
for(int iter = 0 ; iter < 2 ; ++iter) {
tx = threadIdx.x + StepX;
ty = threadIdx.y + StepY;
Vx2 = Mmid[ty+2][tx+2] * 2.0f;
fx = 0.5f * (Mmid[ty+2][tx+3] - Mmid[ty+2][tx+1]);
fy = 0.5f * (Mmid[ty+3][tx+2] - Mmid[ty+1][tx+2]);
fs = 0.5f * (Mbot[ty+2][tx+2] - Mtop[ty+2][tx+2]);
fxx = Mmid[ty+2][tx+3] + Mmid[ty+2][tx+1] - Vx2;
fyy = Mmid[ty+3][tx+2] + Mmid[ty+1][tx+2] - Vx2;
fss = Mbot[ty+2][tx+2] + Mtop[ty+2][tx+2] - Vx2;
fxy = 0.25f * (Mmid[ty+3][tx+3] + Mmid[ty+1][tx+1] - Mmid[ty+3][tx+1] - Mmid[ty+1][tx+3]);
fxs = 0.25f * (Mbot[ty+2][tx+3] + Mtop[ty+2][tx+1] - Mbot[ty+2][tx+1] - Mtop[ty+2][tx+3]);
fys = 0.25f * (Mbot[ty+3][tx+2] + Mtop[ty+1][tx+2] - Mbot[ty+1][tx+2] - Mtop[ty+3][tx+2]);
//need to solve dx, dy, ds;
// |-fx| | fxx fxy fxs | |dx|
// |-fy| = | fxy fyy fys | * |dy|
// |-fs| | fxs fys fss | |ds|
float4 A0 = fxx > 0? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx);
float4 A1 = fxy > 0? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy);
float4 A2 = fxs > 0? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs);
float maxa = max(max(A0.x, A1.x), A2.x);
if(maxa >= 1e-10){
if(maxa == A1.x){
float4 TEMP = A1; A1 = A0; A0 = TEMP;
}else if(maxa == A2.x){
float4 TEMP = A2; A2 = A0; A0 = TEMP;
}
A0.y /= A0.x; A0.z /= A0.x; A0.w/= A0.x;
A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w;
A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w;
if(abs(A2.y) > abs(A1.y)){
float4 TEMP = A2; A2 = A1; A1 = TEMP;
}
if(abs(A1.y) >= 1e-10) {
A1.z /= A1.y; A1.w /= A1.y;
A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w;
if(abs(A2.z) >= 1e-10) {
ds = A2.w / A2.z;
dy = A1.w - ds * A1.z;
dx = A0.w - ds * A0.z - dy * A0.y;
}
}
}
StepX= ((dx > 0.6 && ( bx*F_TILE_SIZE_S + tx + 2 ) < width -2) ? 1 : 0 ) + ((dx < -0.6 && (bx*F_TILE_SIZE_S + tx ) > 1 ) ? -1 : 0 ) ;
StepY= ((dy > 0.6 && ( by*F_TILE_SIZE_S + ty + 2 )< height -2) ? 1 : 0 ) + ((dy < -0.6 && (by*F_TILE_SIZE_S + ty ) > 1 ) ? -1 : 0 ) ;
if( StepX == 0 && StepY == 0 ) break ;
}
float val = Mmid[ty+2][tx+2] + 0.5f * (fx * dx + fy * dy + fs * ds);
float score = (fxx + fyy) * (fxx + fyy) / (fxx * fyy - fxy * fxy);
Row = by*F_TILE_SIZE_S + ty;
Col = bx*F_TILE_SIZE_S + tx;
if(fabs(val) > threshold && score < (edgeThreshold + 1)*(edgeThreshold + 1)/edgeThreshold && score >= 0 &&
fabs(dx) < 0.6 && fabs(dy) < 0.6 && fabs(ds) < 1.5 )
{
dst[(Row+1) * width + (Col+1)].dx = dx;
dst[(Row+1) * width + (Col+1)].dy = dy;
dst[(Row+1) * width + (Col+1)].ds = ds;
dst[(Row+1) * width + (Col+1)].flag = extr;
}
else
dst[(Row+1) * width + (Col+1)].flag = 0.0f;
}
else
dst[(Row+1) * width + (Col+1)].flag = extr;
}
}
void
Sift :: FindkExtrem_small(pixel_t* pt, int width, int height, int o, float xperiod, float threshold, float edgethreshold)
{
unsigned int dst_size = sizeof(kvalue) * (width*height);
unsigned int src_size = sizeof(pixel_t) * (width*height);
pixel_t* srcT_d = NULL;
pixel_t* srcM_d = NULL;
pixel_t* srcB_d = NULL;
kvalue* dst_d = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &srcT_d, src_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &srcM_d, src_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &srcB_d, src_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &dst_d, dst_size));
dim3 dimBlock, dimGrid;
dimBlock.x = F_BLOCK_SIZE_S;
dimBlock.y = F_BLOCK_SIZE_S;
dimGrid.x = ((width-2) / F_TILE_SIZE_S) + (((width-2) % F_TILE_SIZE_S) ? 1:0 );
dimGrid.y = ((height-2) / F_TILE_SIZE_S) + (((height-2) % F_TILE_SIZE) ? 1:0 );
pixel_t* src = pt;
int uu = 0;
for(int s = smin + 1 ; s <= smax-2 ; ++s)
{
CUDA_SAFE_CALL( hipMemcpy(srcT_d, src, src_size, hipMemcpyHostToDevice) );
src = src + width * height;
CUDA_SAFE_CALL( hipMemcpy(srcM_d, src, src_size, hipMemcpyHostToDevice) );
src = src + width * height;
CUDA_SAFE_CALL( hipMemcpy(srcB_d, src, src_size, hipMemcpyHostToDevice) );
src = src - width * height;
hipLaunchKernelGGL(( FindkKernel_small), dim3(dimGrid), dim3(dimBlock), 0, 0, dst_d, srcT_d, srcM_d, srcB_d, width, height, 0.8*threshold, edgethreshold);
CUDA_SAFE_CALL(hipMemcpy(Kmid, dst_d, dst_size, hipMemcpyDeviceToHost));
float xn;
float yn;
float sn;
for(int y = 1; y < height-1; y++)
for(int x = 1; x < width-1; x++)
{
if (Kmid[width * y + x].flag == 1.0f)
{
xn = x + Kmid[width * y + x].dx;
yn = y + Kmid[width * y + x].dy;
sn = s + Kmid[width * y + x].ds;
if(xn >= 0 && xn <= width -1 && yn >= 0 && yn <= height -1 && sn >= smin && sn <= smax )
{
Keypoint k ;
k.o = o ;
k.ix = x ;
k.iy = y ;
k.is = s ;
k.x = xn * xperiod ;
k.y = yn * xperiod ;
k.s = sn;
k.sigma = getScaleFromIndex(o,sn) ;
keypoints.push_back(k) ;
KeyNum[o-omin]++;
uu++;
// std::cout<<x<<","<<y<<","<<s<<","<<k.x<<","<<k.y<<","<<k.sigma<<","<<"|| "<<std::flush;
//std::cout<<x<<","<<y<<","<<s<<", "<<std::flush;
}
}
}
}
std::cout<<" " <<" "<<std::endl ;
std::cout<<"o is "<<o<<" total key number is "<<KeyNum[o-omin]<<std::endl;
hipFree(srcT_d);
hipFree(srcM_d);
hipFree(srcB_d);
hipFree(dst_d);
//free dst;
//delete []Kdst;
}
void
Sift::detectKeypoints(VL::float_t threshold, VL::float_t edgeThreshold)
{
keypoints.clear() ;
//int nValidatedKeypoints = 0 ;
// Process one octave per time
for(int o = omin; o < omin + O; ++o) {
//int const xo = 1 ;
int const yo = getOctaveWidth(o) ;
int const so = getOctaveWidth(o) * getOctaveHeight(o) ;
int const ow = getOctaveWidth(o) ;
int const oh = getOctaveHeight(o) ;
VL::float_t xperiod = getOctaveSamplingPeriod(o) ;
// -----------------------------------------------------------------
// Difference of Gaussians
// -----------------------------------------------------------------
pixel_t* dog = temp ;
tempIsGrad = false ;
KeyNum[o-omin] = 0;
{
pixel_t* pt = dog ;
Compute_Dog (pt, o, smin, smax, yo, oh); //gpu function
/*
for(int s = smin ; s <= smax-1 ; ++s) {
pixel_t* srca = getLevel(o, s ) ;
pixel_t* srcb = getLevel(o, s+1) ;
pixel_t* enda = srcb ;
while( srca != enda ) {
*pt++ = *srcb++ - *srca++ ;
}
}
*/
}
// -----------------------------------------------------------------
// Find points of extremum
// -----------------------------------------------------------------
// std::cout<<" " <<" "<<std::endl ;
std::cout<<" " <<" "<<std::endl ;
//std::cout<<"O is "<<o<<" "<<std::endl ;
pixel_t* pt = dog ;
if (O < 8)
FindkExtrem_small(pt, yo, oh, o, xperiod, threshold, edgeThreshold);
else
FindkExtrem(pt, yo, oh, o, xperiod, threshold, edgeThreshold);
/*
{
int uu;
pixel_t* pt = dog + xo + yo + so ;
for(int s = smin+1 ; s <= smax-2 ; ++s) {
for(int y = 1 ; y < oh - 1 ; ++y) {
for(int x = 1 ; x < ow - 1 ; ++x) {
pixel_t v = *pt ;
// assert( (pt - x*xo - y*yo - (s-smin)*so) - dog == 0 ) ;
#define CHECK_NEIGHBORS(CMP,SGN) \
( v CMP ## = SGN 0.8 * threshold && \
v CMP *(pt + xo) && \
v CMP *(pt - xo) && \
v CMP *(pt + so) && \
v CMP *(pt - so) && \
v CMP *(pt + yo) && \
v CMP *(pt - yo) && \
\
v CMP *(pt + yo + xo) && \
v CMP *(pt + yo - xo) && \
v CMP *(pt - yo + xo) && \
v CMP *(pt - yo - xo) && \
\
v CMP *(pt + xo + so) && \
v CMP *(pt - xo + so) && \
v CMP *(pt + yo + so) && \
v CMP *(pt - yo + so) && \
v CMP *(pt + yo + xo + so) && \
v CMP *(pt + yo - xo + so) && \
v CMP *(pt - yo + xo + so) && \
v CMP *(pt - yo - xo + so) && \
\
v CMP *(pt + xo - so) && \
v CMP *(pt - xo - so) && \
v CMP *(pt + yo - so) && \
v CMP *(pt - yo - so) && \
v CMP *(pt + yo + xo - so) && \
v CMP *(pt + yo - xo - so) && \
v CMP *(pt - yo + xo - so) && \
v CMP *(pt - yo - xo - so) )
if( CHECK_NEIGHBORS(>,+) || CHECK_NEIGHBORS(<,-) ) {
Keypoint k ;
k.ix = x ;
k.iy = y ;
k.is = s ;
keypoints.push_back(k) ;
std::cout<<x<<","<<y<<","<<s<<","<<o<<","
<<" "<<std::flush ;
uu++;
}
pt += 1 ;
}
pt += 2 ;
}
pt += 2*yo ;
std::cout<<" "<<std::endl;
std::cout<<"s is "<<s<<" total key number is "<<uu<<std::endl;
uu = 0;
}
}
*/
// -----------------------------------------------------------------
// Refine local maxima
// -----------------------------------------------------------------
/* int uu;
{ // refine
KeypointsIter siter ;
KeypointsIter diter ;
for(diter = siter = keypointsBegin() + nValidatedKeypoints ;
siter != keypointsEnd() ;
++siter) {
int x = int( siter->ix ) ;
int y = int( siter->iy ) ;
int s = int( siter->is ) ;
VL::float_t Dx=0,Dy=0,Ds=0,Dxx=0,Dyy=0,Dss=0,Dxy=0,Dxs=0,Dys=0 ;
VL::float_t b [3] ;
pixel_t* pt ;
int dx = 0 ;
int dy = 0 ;
// must be exec. at least once
for(int iter = 0 ; iter < 5 ; ++iter) {
VL::float_t A[3*3] ;
x += dx ;
y += dy ;
pt = dog
+ xo * x
+ yo * y
+ so * (s - smin) ;
#define at(dx,dy,ds) (*( pt + (dx)*xo + (dy)*yo + (ds)*so))
#define Aat(i,j) (A[(i)+(j)*3])
Dx = 0.5 * (at(+1,0,0) - at(-1,0,0)) ;
Dy = 0.5 * (at(0,+1,0) - at(0,-1,0));
Ds = 0.5 * (at(0,0,+1) - at(0,0,-1)) ;
// Compute the Hessian.
Dxx = (at(+1,0,0) + at(-1,0,0) - 2.0 * at(0,0,0)) ;
Dyy = (at(0,+1,0) + at(0,-1,0) - 2.0 * at(0,0,0)) ;
Dss = (at(0,0,+1) + at(0,0,-1) - 2.0 * at(0,0,0)) ;
Dxy = 0.25 * ( at(+1,+1,0) + at(-1,-1,0) - at(-1,+1,0) - at(+1,-1,0) ) ;
Dxs = 0.25 * ( at(+1,0,+1) + at(-1,0,-1) - at(-1,0,+1) - at(+1,0,-1) ) ;
Dys = 0.25 * ( at(0,+1,+1) + at(0,-1,-1) - at(0,-1,+1) - at(0,+1,-1) ) ;
// Solve linear system.
Aat(0,0) = Dxx ;
Aat(1,1) = Dyy ;
Aat(2,2) = Dss ;
Aat(0,1) = Aat(1,0) = Dxy ;
Aat(0,2) = Aat(2,0) = Dxs ;
Aat(1,2) = Aat(2,1) = Dys ;
b[0] = - Dx ;
b[1] = - Dy ;
b[2] = - Ds ;
// Gauss elimination
for(int j = 0 ; j < 3 ; ++j) {
// look for leading pivot
VL::float_t maxa = 0 ;
VL::float_t maxabsa = 0 ;
int maxi = -1 ;
int i ;
for(i = j ; i < 3 ; ++i) {
VL::float_t a = Aat(i,j) ;
VL::float_t absa = fabsf( a ) ;
if ( absa > maxabsa ) {
maxa = a ;
maxabsa = absa ;
maxi = i ;
}
}
// singular?
if( maxabsa < 1e-10f ) {
b[0] = 0 ;
b[1] = 0 ;
b[2] = 0 ;
break ;
}
i = maxi ;
// swap j-th row with i-th row and
// normalize j-th row
for(int jj = j ; jj < 3 ; ++jj) {
std::swap( Aat(j,jj) , Aat(i,jj) ) ;
Aat(j,jj) /= maxa ;
}
std::swap( b[j], b[i] ) ;
b[j] /= maxa ;
// elimination
for(int ii = j+1 ; ii < 3 ; ++ii) {
VL::float_t x = Aat(ii,j) ;
for(int jj = j ; jj < 3 ; ++jj) {
Aat(ii,jj) -= x * Aat(j,jj) ;
}
b[ii] -= x * b[j] ;
}
}
// backward substitution
for(int i = 2 ; i > 0 ; --i) {
VL::float_t x = b[i] ;
for(int ii = i-1 ; ii >= 0 ; --ii) {
b[ii] -= x * Aat(ii,i) ;
}
}
// If the translation of the keypoint is big, move the keypoint
// and re-iterate the computation. Otherwise we are all set.
dx= ((b[0] > 0.6 && x < ow-2) ? 1 : 0 )
+ ((b[0] < -0.6 && x > 1 ) ? -1 : 0 ) ;
dy= ((b[1] > 0.6 && y < oh-2) ? 1 : 0 )
+ ((b[1] < -0.6 && y > 1 ) ? -1 : 0 ) ;
// std::cout<<x<<","<<y<<"="<<at(0,0,0) <<"(" <<at(0,0,0)+0.5 * (Dx * b[0] + Dy * b[1] + Ds * b[2])<<")" <<" "<<std::flush ;
if( dx == 0 && dy == 0 ) break ;
}
// Accept-reject keypoint
{
VL::float_t val = at(0,0,0) + 0.5 * (Dx * b[0] + Dy * b[1] + Ds * b[2]) ;
VL::float_t score = (Dxx+Dyy)*(Dxx+Dyy) / (Dxx*Dyy - Dxy*Dxy) ;
VL::float_t xn = x + b[0] ;
VL::float_t yn = y + b[1] ;
VL::float_t sn = s + b[2] ;
if(fast_abs(val) > threshold &&
score < (edgeThreshold+1)*(edgeThreshold+1)/edgeThreshold &&
score >= 0 &&
fast_abs(b[0]) < 1.5 &&
fast_abs(b[1]) < 1.5 &&
fast_abs(b[2]) < 1.5 &&
xn >= 0 &&
xn <= ow-1 &&
yn >= 0 &&
yn <= oh-1 &&
sn >= smin &&
sn <= smax )
{
diter->o = o ;
diter->ix = x ;
diter->iy = y ;
diter->is = s ;
diter->x = xn * xperiod ;
diter->y = yn * xperiod ;
diter->s = sn ;
diter->sigma = getScaleFromIndex(o,sn) ;
++diter ;
// std::cout<<x<<","<<y<<","<<s<<","<<o<<","<<" "<<std::flush;
uu++;
}
}
} // next candidate keypoint
// prepare for next octave
keypoints.resize( diter - keypoints.begin() ) ;
nValidatedKeypoints = keypoints.size() ;
} // refine block
// std::cout<<" " <<" "<<std::endl ;
// std::cout<<" total key number is "<<uu<<std::endl;
uu = 0;
*/
} // next octave
}
// ===================================================================
// computeKeypointOrientations()
// -------------------------------------------------------------------
/** @brief Compute modulus and phase of the gradient
**
** The function computes the modulus and the angle of the gradient of
** the specified octave @a o. The result is stored in a temporary
** internal buffer accessed by computeKeypointDescriptor() and
** computeKeypointOrientations().
**
** The SIFT detector provides keypoint with scale index s in the
** range @c smin+1 and @c smax-2. As such, the buffer contains only
** these levels.
**
** If called mutliple time on the same data, the function exits
** immediately.
**
** @param o octave of interest.
**/
__global__ void GradKernelZ(pixel_t* src, pixel_t* dst, int width, int height, int square)
{
__shared__ pixel_t Ms[G_BLOCK_SIZE][G_BLOCK_SIZE]; //
int tx, ty, bx, by, bz;
//float m, t;
tx = threadIdx.x;
ty = threadIdx.y;
bx = blockIdx.x;
by = blockIdx.y;
bz = blockIdx.z;
int Row = by*G_TILE_SIZE + ty;
int Col = bx*G_TILE_SIZE + tx;
int Dep = bz*square;
if (Row < height && Col < width)
{
Ms[ty][tx] = src[Dep + Row * width + Col];
}
else
{
Ms[ty][tx] = 0.0f;
}
__syncthreads();
if(ty < G_TILE_SIZE && tx < G_TILE_SIZE && Row < (height -1) && Col < (width-1))
{
float_t Gx = 0.5f * (Ms[ty+1][tx+2] - Ms[ty+1][tx]);
float_t Gy = 0.5f * (Ms[ty+2][tx+1] - Ms[ty][tx+1]);
float_t m = sqrt( Gx*Gx + Gy*Gy );
float_t x = atan2(Gy, Gx) + float(2*M_PI);
float_t t = (x >= 0)? fmod (x, float(2*M_PI)) : float(2*M_PI) + fmod (x, float(2*M_PI));
dst[2*Dep + 2*width*(Row + 1) + 2*(Col + 1)] = m;
dst[2*Dep + 2*width*(Row + 1) + 2*(Col + 1) + 1] = t;
}
}
__global__ void GradKernel(pixel_t* src, pixel_t* dst, int width, int height, int square)
{
__shared__ pixel_t Ms[G_BLOCK_SIZE][G_BLOCK_SIZE]; //F_BLOCK_SIZE = F_TILE_SIZE + 2
int tx, ty, bx, by;
//float m, t;
tx = threadIdx.x;
ty = threadIdx.y;
bx = blockIdx.x;
by = blockIdx.y;
int Row = by*G_TILE_SIZE + ty;
int Col = bx*G_TILE_SIZE + tx;
if (Row < height && Col < width)
{
Ms[ty][tx] = src[ Row * width + Col];
}
else
{
Ms[ty][tx] = 0.0f;
}
__syncthreads();
if(ty < G_TILE_SIZE && tx < G_TILE_SIZE && Row < (height -1) && Col < (width-1))
{
float_t Gx = 0.5f * (Ms[ty+1][tx+2] - Ms[ty+1][tx]);
float_t Gy = 0.5f * (Ms[ty+2][tx+1] - Ms[ty][tx+1]);
float_t m = sqrt( Gx*Gx + Gy*Gy );
float_t x = atan2(Gy, Gx) + float(2*M_PI);
float_t t = (x >= 0)? fmod (x, float(2*M_PI)) : float(2*M_PI) + fmod (x, float(2*M_PI));
dst[ 2*width*(Row + 1) + 2*(Col + 1)] = m;
dst[ 2*width*(Row + 1) + 2*(Col + 1) + 1] = t;
}
}
void
Sift::GradinGpu(pixel_t* pt, int o, int width, int height)
{
//int S = smax - smin - 2;
int square = width * height;
//unsigned int dst_size = sizeof(pixel_t) * (2*S*width*height);
// unsigned int src_size = sizeof(pixel_t) * (S*width*height);
unsigned int dst_size = sizeof(pixel_t) * (2*width*height);
unsigned int src_size = sizeof(pixel_t) * (width*height);
pixel_t* src_d = NULL;
pixel_t* dst_d = NULL;
CUDA_SAFE_CALL( hipMalloc( (void**) &src_d, src_size));
CUDA_SAFE_CALL( hipMalloc( (void**) &dst_d, dst_size));
dim3 dimBlock, dimGrid;
dimBlock.x = G_BLOCK_SIZE;
dimBlock.y = G_BLOCK_SIZE;
dimGrid.x = ((width-2) / G_TILE_SIZE) + (((width-2) % G_TILE_SIZE) ? 1:0 );
dimGrid.y = ((height-2) / G_TILE_SIZE) + (((height-2) % G_TILE_SIZE) ? 1:0 );
//dimGrid.z = S;
dimGrid.z = 1;
for(int s = smin + 1 ; s <= smax-2 ; ++s)
{
pixel_t* src = getLevel(o, s);
CUDA_SAFE_CALL( hipMemcpy(src_d, src, src_size, hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( GradKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, src_d, dst_d, width, height, square);
CUDA_SAFE_CALL(hipMemcpy(pt, dst_d, dst_size, hipMemcpyDeviceToHost));
pt = pt + 2*width*height;
}
hipFree(src_d);
hipFree(dst_d);
}
void
Sift::prepareGrad(int o)
{
int const ow = getOctaveWidth(o) ;
int const oh = getOctaveHeight(o) ;
//int const xo = 1 ;
int const yo = ow ;
//int const so = oh*ow ;
if( ! tempIsGrad || tempOctave != o ) {
/*
// compute dx/dy
for(int s = smin+1 ; s <= smax-2 ; ++s) {
for(int y = 1 ; y < oh-1 ; ++y ) {
pixel_t* src = getLevel(o, s) + xo + yo*y ;
pixel_t* end = src + ow - 1 ;
pixel_t* grad = 2 * (xo + yo*y + (s - smin -1)*so) + temp ;
while(src != end) {
VL::float_t Gx = 0.5 * ( *(src+xo) - *(src-xo) ) ;
VL::float_t Gy = 0.5 * ( *(src+yo) - *(src-yo) ) ;
VL::float_t m = fast_sqrt( Gx*Gx + Gy*Gy ) ;
VL::float_t t = fast_mod_2pi( fast_atan2(Gy, Gx) + VL::float_t(2*M_PI) );
*grad++ = pixel_t( m ) ;
*grad++ = pixel_t( t ) ;
++src ;
}
}
}
*/
pixel_t* grad = temp;
GradinGpu(grad, o, yo, oh);
}
tempIsGrad = true ;
tempOctave = o ;
}
__device__ void normalize_histogram(float* L_begin, float* L_end)
{
float* L_iter ;
float norm = 0.0f ;
for(L_iter = L_begin; L_iter != L_end ; ++L_iter)
norm += (*L_iter) * (*L_iter) ;
norm = sqrt(norm) ;
for(L_iter = L_begin; L_iter != L_end ; ++L_iter)
*L_iter /= norm;
// *L_iter /= (norm + numeric_limits<float>::epsilon() ) ;
}
__global__ void GetkKernel(Sift::Keypoint* Kin, pixel_t* Grad, OKvalue* Kout, int Klength, int width, int height, int smin, float xperiod, int magnif)
{
int i;
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = bx * K_BLOCK_SIZE + tx;
VL::float_t angles [4];
int nbins = 36;
//VL::float_t WinFactor = 1.5f;
VL::float_t hist[36];
int ow = width;
int oh = height;
int xo = 2;
int yo = xo * ow;
int so = yo * oh;
if (index < Klength){
VL::float_t x = Kin[index].x / xperiod;
VL::float_t y = Kin[index].y / xperiod;
VL::float_t sigma = Kin[index].sigma / xperiod;
int xi = ((int) (x+0.5)) ;
int yi = ((int) (y+0.5)) ;
int si = Kin[index].is ;
VL::float_t sigmaw = 1.50f * sigma; //winFactor
int Wo = (int) floor(3.0 * sigmaw);
int NBO = 8;
int NBP = 4;
VL::float_t SBP = magnif*sigma;
int Wd = (int) floor (sqrt(2.0) * SBP * (NBP + 1) / 2.0 + 0.5) ;
int binto = 1;
int binyo = NBO*NBP;
int binxo = NBO;
int bin;
for (i = 0; i < nbins; i++)
hist[i] = 0.0f;
pixel_t* pt = Grad + xi * xo + yi * yo + (si - smin -1) * so ;
for(int ys = max(-Wo, 1-yi) ; ys <= min(+Wo, oh -2 -yi) ; ++ys) {
for(int xs = max(-Wo, 1-xi) ; xs <= min(+Wo, ow -2 -xi) ; ++xs) {
VL::float_t dx = xi + xs - x;
VL::float_t dy = yi + ys - y;
VL::float_t r2 = dx*dx + dy*dy ;
if(r2 >= Wo*Wo+0.5) continue ;
VL::float_t wgt = exp(-(r2 / (2*sigmaw*sigmaw))) ;
VL::float_t mod = *(pt + xs*xo + ys*yo) ;
VL::float_t ang = *(pt + xs*xo + ys*yo + 1) ;
int bin = (int) floor( nbins * ang / (2*M_PI) ) ;
hist[bin] += mod * wgt ;
}
}
#if defined VL_LOWE_STRICT
// Lowe's version apparently has a little issue with orientations
// around + or - pi, which we reproduce here for compatibility
for (int iter = 0; iter < 6; iter++) {
VL::float_t prev = hist[nbins/2] ;
for (int i = nbins/2-1; i >= -nbins/2 ; --i) {
int j = (i + nbins) % nbins ;
int jp = (i - 1 + nbins) % nbins ;
VL::float_t newh = (prev + hist[j] + hist[jp]) / 3.0;
prev = hist[j] ;
hist[j] = newh ;
}
}
#else
// this is slightly more correct
for (int iter = 0; iter < 6; iter++) {
VL::float_t prev = hist[nbins-1] ;
VL::float_t first = hist[0] ;
for (i = 0; i < nbins - 1; i++) {
VL::float_t newh = (prev + hist[i] + hist[(i+1) % nbins]) / 3.0;
prev = hist[i] ;
hist[i] = newh ;
}
hist[i] = (prev + hist[i] + first)/3.0 ;
}
#endif
//VL::float_t maxh = * std::max_element(hist, hist + nbins) ;
VL::float_t maxh = 0;
for (int i = 0; i < nbins; i++)
maxh = max(maxh, hist[i]);
int nangles = 0 ;
for(int i = 0 ; i < nbins ; ++i) {
VL::float_t h0 = hist [i] ;
VL::float_t hm = hist [(i-1+nbins) % nbins] ;
VL::float_t hp = hist [(i+1+nbins) % nbins] ;
// is this a peak?
if( h0 > 0.8*maxh && h0 > hm && h0 > hp ){
VL::float_t di = -0.5 * (hp - hm) / (hp+hm-2*h0) ;
VL::float_t th = 2*M_PI * (i+di+0.5) / nbins ;
angles [ nangles ] = th ;
Kout[index].th[nangles] = th;
nangles++;
if( nangles == 4 )
break;
}
}
Kout[index].nangles = nangles;
////**************descriptor section******************//
for(int a = 0 ; a < nangles ; ++a) {
VL::float_t descr_pt[128];
for (int i = 0; i < 128; i ++)
descr_pt[i] = 0.0f;
VL::float_t* dpt = descr_pt + (NBP/2) * binyo + (NBP/2) * binxo;
VL::float_t st0 = sinf( angles[a] ) ;
VL::float_t ct0 = cosf( angles[a] ) ;
#define atd(dbinx,dbiny,dbint) *(dpt + (dbint)*binto + (dbiny)*binyo + (dbinx)*binxo)
for(int dyi = max(-Wd, 1-yi) ; dyi <= min(+Wd, oh-2-yi) ; ++dyi) {
for(int dxi = max(-Wd, 1-xi) ; dxi <= min(+Wd, ow-2-xi) ; ++dxi) {
VL::float_t mod = *( pt + dxi*xo + dyi*yo + 0 ) ;
VL::float_t angle = *( pt + dxi*xo + dyi*yo + 1 ) ;
//VL::float_t x = (angles[a] - angle) ;
VL::float_t theta = ((angles[a] - angle) >= 0)? fmod ((angles[a] - angle), float(2*M_PI)) : float(2*M_PI) + fmod ((angles[a] - angle), float(2*M_PI)); // lowe compatible ?
VL::float_t dx = xi + dxi - x;
VL::float_t dy = yi + dyi - y;
// get the displacement normalized w.r.t. the keypoint
// orientation and extension.
VL::float_t nx = ( ct0 * dx + st0 * dy) / SBP ;
VL::float_t ny = (-st0 * dx + ct0 * dy) / SBP ;
VL::float_t nt = NBO * theta / (2*M_PI) ;
// Get the gaussian weight of the sample. The gaussian window
// has a standard deviation equal to NBP/2. Note that dx and dy
// are in the normalized frame, so that -NBP/2 <= dx <= NBP/2.
VL::float_t const wsigma = NBP/2 ;
VL::float_t win = exp(-((nx*nx + ny*ny)/(2.0 * wsigma * wsigma))) ;
// The sample will be distributed in 8 adjacent bins.
// We start from the ``lower-left'' bin.
int binx = floor( nx - 0.5 ) ;
int biny = floor( ny - 0.5 ) ;
int bint = floor( nt ) ;
VL::float_t rbinx = nx - (binx+0.5) ;
VL::float_t rbiny = ny - (biny+0.5) ;
VL::float_t rbint = nt - bint ;
int dbinx ;
int dbiny ;
int dbint ;
// Distribute the current sample into the 8 adjacent bins
for(dbinx = 0 ; dbinx < 2 ; ++dbinx)
for(dbiny = 0 ; dbiny < 2 ; ++dbiny)
for(dbint = 0 ; dbint < 2 ; ++dbint)
if( binx+dbinx >= -(NBP/2) && binx+dbinx < (NBP/2) && biny+dbiny >= -(NBP/2) && biny+dbiny < (NBP/2) ) {
VL::float_t weight = win * mod * abs (1 - dbinx - rbinx) * abs (1 - dbiny - rbiny) * abs (1 - dbint - rbint) ;
atd(binx+dbinx, biny+dbiny, (bint+dbint) % NBO) += weight ;
}
}
}
//if( normalizeDescriptor ) {
normalize_histogram(descr_pt, descr_pt + NBO*NBP*NBP) ;
for(bin = 0; bin < NBO*NBP*NBP ; ++bin) {
if (descr_pt[bin] > 0.2) descr_pt[bin] = 0.2;
}
normalize_histogram(descr_pt, descr_pt + NBO*NBP*NBP) ;
//}
for (int i = 0; i < 128; i ++)
Kout[index].descr_pt[a*128 + i ] = descr_pt[i];
}
}
}
/** @brief Compute the orientation(s) of a keypoint
**
** The function computes the orientation of the specified keypoint.
** The function returns up to four different orientations, obtained
** as strong peaks of the histogram of gradient orientations (a
** keypoint can theoretically generate more than four orientations,
** but this is very unlikely).
**
** @remark The function needs to compute the gradient modululs and
** orientation of the Gaussian scale space octave to which the
** keypoint belongs. The result is cached, but discarded if different
** octaves are visited. Thereofre it is much quicker to evaluate the
** keypoints in their natural octave order.
**
** The keypoint must lie within the scale space. In particular, the
** scale index is supposed to be in the range @c smin+1 and @c smax-1
** (this is from the SIFT detector). If this is not the case, the
** computation is silently aborted and no orientations are returned.
**
** @param angles buffers to store the resulting angles.
** @param keypoint keypoint to process.
** @return number of orientations found.
**/
int
Sift::computeKeypointOrientations(VL::float_t angles [4], Keypoint keypoint)
{
int const nbins = 36 ;
VL::float_t const winFactor = 1.5 ;
VL::float_t hist [nbins] ;
// octave
int o = keypoint.o ;
VL::float_t xperiod = getOctaveSamplingPeriod(o) ;
// offsets to move in the Gaussian scale space octave
const int ow = getOctaveWidth(o) ;
const int oh = getOctaveHeight(o) ;
const int xo = 2 ;
const int yo = xo * ow ;
const int so = yo * oh ;
// keypoint fractional geometry
VL::float_t x = keypoint.x / xperiod ;
VL::float_t y = keypoint.y / xperiod ;
VL::float_t sigma = keypoint.sigma / xperiod ;
// shall we use keypoints.ix,iy,is here?
int xi = ((int) (x+0.5)) ;
int yi = ((int) (y+0.5)) ;
int si = keypoint.is ;
VL::float_t const sigmaw = winFactor * sigma ;
int W = (int) floor(3.0 * sigmaw) ;
// skip the keypoint if it is out of bounds
if(o < omin ||
o >=omin+O ||
xi < 0 ||
xi > ow-1 ||
yi < 0 ||
yi > oh-1 ||
si < smin+1 ||
si > smax-2 ) {
std::cerr<<"!"<<std::endl ;
return 0 ;
}
// make sure that the gradient buffer is filled with octave o
prepareGrad(o) ;
// clear the SIFT histogram
std::fill(hist, hist + nbins, 0) ;
// fill the SIFT histogram
pixel_t* pt = temp + xi * xo + yi * yo + (si - smin -1) * so ;
#undef at
#define at(dx,dy) (*(pt + (dx)*xo + (dy)*yo))
for(int ys = ::max(-W, 1-yi) ; ys <= ::min(+W, oh -2 -yi) ; ++ys) {
for(int xs = ::max(-W, 1-xi) ; xs <= ::min(+W, ow -2 -xi) ; ++xs) {
VL::float_t dx = xi + xs - x;
VL::float_t dy = yi + ys - y;
VL::float_t r2 = dx*dx + dy*dy ;
// limit to a circular window
if(r2 >= W*W+0.5) continue ;
VL::float_t wgt = VL::fast_expn( r2 / (2*sigmaw*sigmaw) ) ;
VL::float_t mod = *(pt + xs*xo + ys*yo) ;
VL::float_t ang = *(pt + xs*xo + ys*yo + 1) ;
// int bin = (int) floor( nbins * ang / (2*M_PI) ) ;
int bin = (int) floor( nbins * ang / (2*M_PI) ) ;
hist[bin] += mod * wgt ;
}
}
// smooth the histogram
#if defined VL_LOWE_STRICT
// Lowe's version apparently has a little issue with orientations
// around + or - pi, which we reproduce here for compatibility
for (int iter = 0; iter < 6; iter++) {
VL::float_t prev = hist[nbins/2] ;
for (int i = nbins/2-1; i >= -nbins/2 ; --i) {
int const j = (i + nbins) % nbins ;
int const jp = (i - 1 + nbins) % nbins ;
VL::float_t newh = (prev + hist[j] + hist[jp]) / 3.0;
prev = hist[j] ;
hist[j] = newh ;
}
}
#else
// this is slightly more correct
for (int iter = 0; iter < 6; iter++) {
VL::float_t prev = hist[nbins-1] ;
VL::float_t first = hist[0] ;
int i ;
for (i = 0; i < nbins - 1; i++) {
VL::float_t newh = (prev + hist[i] + hist[(i+1) % nbins]) / 3.0;
prev = hist[i] ;
hist[i] = newh ;
}
hist[i] = (prev + hist[i] + first)/3.0 ;
}
#endif
// find the histogram maximum
VL::float_t maxh = * std::max_element(hist, hist + nbins) ;
// find peaks within 80% from max
int nangles = 0 ;
for(int i = 0 ; i < nbins ; ++i) {
VL::float_t h0 = hist [i] ;
VL::float_t hm = hist [(i-1+nbins) % nbins] ;
VL::float_t hp = hist [(i+1+nbins) % nbins] ;
// is this a peak?
if( h0 > 0.8*maxh && h0 > hm && h0 > hp ) {
// quadratic interpolation
// VL::float_t di = -0.5 * (hp - hm) / (hp+hm-2*h0) ;
VL::float_t di = -0.5 * (hp - hm) / (hp+hm-2*h0) ;
VL::float_t th = 2*M_PI * (i+di+0.5) / nbins ;
angles [ nangles++ ] = th ;
if( nangles == 4 )
goto enough_angles ;
}
}
enough_angles:
return nangles ;
}
// ===================================================================
// computeKeypointDescriptor()
// -------------------------------------------------------------------
namespace Detail {
/** Normalizes in norm L_2 a descriptor. */
void
normalize_histogram(VL::float_t* L_begin, VL::float_t* L_end)
{
VL::float_t* L_iter ;
VL::float_t norm = 0.0 ;
for(L_iter = L_begin; L_iter != L_end ; ++L_iter)
norm += (*L_iter) * (*L_iter) ;
norm = fast_sqrt(norm) ;
for(L_iter = L_begin; L_iter != L_end ; ++L_iter)
*L_iter /= (norm + std::numeric_limits<VL::float_t>::epsilon() ) ;
}
}
/** @brief SIFT descriptor
**
** The function computes the descriptor of the keypoint @a keypoint.
** The function fills the buffer @a descr_pt which must be large
** enough. The funciton uses @a angle0 as rotation of the keypoint.
** By calling the function multiple times, different orientations can
** be evaluated.
**
** @remark The function needs to compute the gradient modululs and
** orientation of the Gaussian scale space octave to which the
** keypoint belongs. The result is cached, but discarded if different
** octaves are visited. Thereofre it is much quicker to evaluate the
** keypoints in their natural octave order.
**
** The function silently abort the computations of keypoints without
** the scale space boundaries. See also siftComputeOrientations().
**/
void
Sift::computeKeypointDescriptor
(VL::float_t* descr_pt,
Keypoint keypoint,
VL::float_t angle0)
{
/* The SIFT descriptor is a three dimensional histogram of the position
* and orientation of the gradient. There are NBP bins for each spatial
* dimesions and NBO bins for the orientation dimesion, for a total of
* NBP x NBP x NBO bins.
*
* The support of each spatial bin has an extension of SBP = 3sigma
* pixels, where sigma is the scale of the keypoint. Thus all the bins
* together have a support SBP x NBP pixels wide . Since weighting and
* interpolation of pixel is used, another half bin is needed at both
* ends of the extension. Therefore, we need a square window of SBP x
* (NBP + 1) pixels. Finally, since the patch can be arbitrarly rotated,
* we need to consider a window 2W += sqrt(2) x SBP x (NBP + 1) pixels
* wide.
*/
// octave
int o = keypoint.o ;
VL::float_t xperiod = getOctaveSamplingPeriod(o) ;
// offsets to move in Gaussian scale space octave
const int ow = getOctaveWidth(o) ;
const int oh = getOctaveHeight(o) ;
const int xo = 2 ;
const int yo = xo * ow ;
const int so = yo * oh ;
// keypoint fractional geometry
VL::float_t x = keypoint.x / xperiod;
VL::float_t y = keypoint.y / xperiod ;
VL::float_t sigma = keypoint.sigma / xperiod ;
VL::float_t st0 = sinf( angle0 ) ;
VL::float_t ct0 = cosf( angle0 ) ;
// shall we use keypoints.ix,iy,is here?
int xi = ((int) (x+0.5)) ;
int yi = ((int) (y+0.5)) ;
int si = keypoint.is ;
// const VL::float_t magnif = 3.0f ;
const int NBO = 8 ;
const int NBP = 4 ;
const VL::float_t SBP = magnif * sigma ;
const int W = (int) floor (sqrt(2.0) * SBP * (NBP + 1) / 2.0 + 0.5) ;
/* Offsets to move in the descriptor. */
/* Use Lowe's convention. */
const int binto = 1 ;
const int binyo = NBO * NBP ;
const int binxo = NBO ;
// const int bino = NBO * NBP * NBP ;
int bin ;
// check bounds
if(o < omin ||
o >=omin+O ||
xi < 0 ||
xi > ow-1 ||
yi < 0 ||
yi > oh-1 ||
si < smin+1 ||
si > smax-2 )
return ;
// make sure gradient buffer is up-to-date
prepareGrad(o) ;
std::fill( descr_pt, descr_pt + NBO*NBP*NBP, 0 ) ;
/* Center the scale space and the descriptor on the current keypoint.
* Note that dpt is pointing to the bin of center (SBP/2,SBP/2,0).
*/
pixel_t const * pt = temp + xi*xo + yi*yo + (si - smin - 1)*so ;
VL::float_t * dpt = descr_pt + (NBP/2) * binyo + (NBP/2) * binxo ;
#define atd(dbinx,dbiny,dbint) *(dpt + (dbint)*binto + (dbiny)*binyo + (dbinx)*binxo)
/*
* Process pixels in the intersection of the image rectangle
* (1,1)-(M-1,N-1) and the keypoint bounding box.
*/
for(int dyi = ::max(-W, 1-yi) ; dyi <= ::min(+W, oh-2-yi) ; ++dyi) {
for(int dxi = ::max(-W, 1-xi) ; dxi <= ::min(+W, ow-2-xi) ; ++dxi) {
// retrieve
VL::float_t mod = *( pt + dxi*xo + dyi*yo + 0 ) ;
VL::float_t angle = *( pt + dxi*xo + dyi*yo + 1 ) ;
VL::float_t theta = fast_mod_2pi(-angle + angle0) ; // lowe compatible ?
// fractional displacement
VL::float_t dx = xi + dxi - x;
VL::float_t dy = yi + dyi - y;
// get the displacement normalized w.r.t. the keypoint
// orientation and extension.
VL::float_t nx = ( ct0 * dx + st0 * dy) / SBP ;
VL::float_t ny = (-st0 * dx + ct0 * dy) / SBP ;
VL::float_t nt = NBO * theta / (2*M_PI) ;
// Get the gaussian weight of the sample. The gaussian window
// has a standard deviation equal to NBP/2. Note that dx and dy
// are in the normalized frame, so that -NBP/2 <= dx <= NBP/2.
VL::float_t const wsigma = NBP/2 ;
VL::float_t win = VL::fast_expn((nx*nx + ny*ny)/(2.0 * wsigma * wsigma)) ;
// The sample will be distributed in 8 adjacent bins.
// We start from the ``lower-left'' bin.
int binx = fast_floor( nx - 0.5 ) ;
int biny = fast_floor( ny - 0.5 ) ;
int bint = fast_floor( nt ) ;
VL::float_t rbinx = nx - (binx+0.5) ;
VL::float_t rbiny = ny - (biny+0.5) ;
VL::float_t rbint = nt - bint ;
int dbinx ;
int dbiny ;
int dbint ;
// Distribute the current sample into the 8 adjacent bins
for(dbinx = 0 ; dbinx < 2 ; ++dbinx) {
for(dbiny = 0 ; dbiny < 2 ; ++dbiny) {
for(dbint = 0 ; dbint < 2 ; ++dbint) {
if( binx+dbinx >= -(NBP/2) &&
binx+dbinx < (NBP/2) &&
biny+dbiny >= -(NBP/2) &&
biny+dbiny < (NBP/2) ) {
VL::float_t weight = win
* mod
* fast_abs (1 - dbinx - rbinx)
* fast_abs (1 - dbiny - rbiny)
* fast_abs (1 - dbint - rbint) ;
atd(binx+dbinx, biny+dbiny, (bint+dbint) % NBO) += weight ;
}
}
}
}
}
}
/* Standard SIFT descriptors are normalized, truncated and normalized again */
if( normalizeDescriptor ) {
/* Normalize the histogram to L2 unit length. */
Detail::normalize_histogram(descr_pt, descr_pt + NBO*NBP*NBP) ;
/* Truncate at 0.2. */
for(bin = 0; bin < NBO*NBP*NBP ; ++bin) {
if (descr_pt[bin] > 0.2) descr_pt[bin] = 0.2;
}
/* Normalize again. */
Detail::normalize_histogram(descr_pt, descr_pt + NBO*NBP*NBP) ;
}
}
// namespace VL
}
| 208ab16fe3e2f7d1bb37653d43ffa498a5e211a0.cu | // file: sift.cu
// GPU implementation: Liang Men
// description: Sift definition
#include<sift.hpp>
#include<sift-conv.tpp>
#include<algorithm>
#include<iostream>
#include<sstream>
#include<cassert>
#include <cutil.h>
extern "C" {
#if defined (VL_MAC)
#include<libgen.h>
#else
#include<string.h>
}
#endif
#define BLOCK_SIZE 16
#define C_TILE_SIZE 250
#define D_BLOCK_SIZE 128
#define F_TILE_SIZE 14
#define F_BLOCK_SIZE F_TILE_SIZE+2
#define F_TILE_SIZE_S 12
#define F_BLOCK_SIZE_S F_TILE_SIZE_S+4
#define G_TILE_SIZE 14
#define G_BLOCK_SIZE G_TILE_SIZE+2
#define K_BLOCK_SIZE 128
using namespace VL ;
// on startup, pre-compute expn(x) = exp(-x)
namespace VL {
namespace Detail {
int const expnTableSize = 256 ;
VL::float_t const expnTableMax = VL::float_t(25.0) ;
VL::float_t expnTable [ expnTableSize + 1 ] ;
struct buildExpnTable
{
buildExpnTable() {
for(int k = 0 ; k < expnTableSize + 1 ; ++k) {
expnTable[k] = exp( - VL::float_t(k) / expnTableSize * expnTableMax ) ;
}
}
} _buildExpnTable ;
} }
namespace VL {
namespace Detail {
/** Comment eater istream manipulator */
class _cmnt {} cmnt ;
/** @brief Extract a comment from a stream
**
** The function extracts a block of consecutive comments from an
** input stream. A comment is a sequence of whitespaces, followed by
** a `#' character, other characters and terminated at the next line
** ending. A block of comments is just a sequence of comments.
**/
std::istream&
operator>>(std::istream& is, _cmnt& manip)
{
char c ;
char b [1024] ;
is>>c ;
if( c != '#' )
return is.putback(c) ;
is.getline(b,1024) ;
return is ;
}
}
/** @brief Insert PGM file into stream
**
** The function iserts into the stream @a os the grayscale image @a
** im encoded as a PGM file. The immage is assumed to be normalized
** in the range 0.0 - 1.0.
**
** @param os output stream.
** @param im pointer to image data.
** @param width image width.
** @param height image height.
** @return the stream @a os.
**/
std::ostream&
insertPgm(std::ostream& os, pixel_t const* im, int width, int height)
{
os<< "P5" << "\n"
<< width << " "
<< height << "\n"
<< "255" << "\n" ;
for(int y = 0 ; y < height ; ++y) {
for(int x = 0 ; x < width ; ++x) {
unsigned char v =
(unsigned char)
(std::max(std::min(*im++, 1.0f),0.f) * 255.0f) ;
os << v ;
}
}
return os ;
}
/** @brief Extract PGM file from stream.
**
** The function extracts from the stream @a in a grayscale image
** encoded as a PGM file. The function fills the structure @a buffer,
** containing the image dimensions and a pointer to the image data.
**
** The image data is an array of floats and is owned by the caller,
** which should erase it as in
**
** @code
** delete [] buffer.data.
** @endcode
**
** When the function encouters an error it throws a generic instance
** of VL::Exception.
**
** @param in input stream.
** @param buffer buffer descriptor to be filled.
** @return the stream @a in.
**/
std::istream&
extractPgm(std::istream& in, PgmBuffer& buffer)
{
pixel_t* im_pt ;
int width ;
int height ;
int maxval ;
char c ;
in>>c ;
if( c != 'P') VL_THROW("File is not in PGM format") ;
bool is_ascii ;
in>>c ;
switch( c ) {
case '2' : is_ascii = true ; break ;
case '5' : is_ascii = false ; break ;
default : VL_THROW("File is not in PGM format") ;
}
in >> Detail::cmnt
>> width
>> Detail::cmnt
>> height
>> Detail::cmnt
>> maxval ;
// after maxval no more comments, just a whitespace or newline
{char trash ; in.get(trash) ;}
if(maxval > 255)
VL_THROW("Only <= 8-bit per channel PGM files are supported") ;
if(! in.good())
VL_THROW("PGM header parsing error") ;
im_pt = new pixel_t [ width*height ];
try {
if( is_ascii ) {
pixel_t* start = im_pt ;
pixel_t* end = start + width*height ;
pixel_t norm = pixel_t( maxval ) ;
while( start != end ) {
int i ;
in >> i ;
if( ! in.good() ) VL_THROW
("PGM parsing error file (width="<<width
<<" height="<<height
<<" maxval="<<maxval
<<" at pixel="<<start-im_pt<<")") ;
*start++ = pixel_t( i ) / norm ;
}
} else {
std::streampos beg = in.tellg() ;
char* buffer = new char [width*height] ;
in.read(buffer, width*height) ;
if( ! in.good() ) VL_THROW
("PGM parsing error file (width="<<width
<<" height="<<height
<<" maxval="<<maxval
<<" at pixel="<<in.tellg()-beg<<")") ;
pixel_t* start = im_pt ;
pixel_t* end = start + width*height ;
uint8_t* src = reinterpret_cast<uint8_t*>(buffer) ;
while( start != end ) *start++ = *src++ / 255.0f ;
}
} catch(...) {
delete [] im_pt ;
throw ;
}
buffer.width = width ;
buffer.height = height ;
buffer.data = im_pt ;
return in ;
}
// ===================================================================
// Low level image operations
// -------------------------------------------------------------------
namespace Detail {
/** @brief Copy an image
** @param dst output imgage buffer.
** @param src input image buffer.
** @param width input image width.
** @param height input image height.
**/
void
copy(pixel_t* dst, pixel_t const* src, int width, int height)
{
memcpy(dst, src, sizeof(pixel_t)*width*height) ;
}
/** @brief Copy an image upsampling two times
**
** The destination buffer must be at least as big as two times the
** input buffer. Bilinear interpolation is used.
**
** @param dst output imgage buffer.
** @param src input image buffer.
** @param width input image width.
** @param height input image height.
**/
/*
void
copyAndUpsampleRows
(pixel_t* dst, pixel_t const* src, int width, int height)
{
for(int y = 0 ; y < height ; ++y) {
pixel_t b, a ;
b = a = *src++ ;
for(int x = 0 ; x < width-1 ; ++x) {
b = *src++ ;
*dst = a ; dst += height ;
*dst = 0.5*(a+b) ; dst += height ;
a = b ;
}
*dst = b ; dst += height ;
*dst = b ; dst += height ;
dst += 1 - width * 2 * height ;
}
}
void
copyAndDownsample(pixel_t* dst, pixel_t const* src,
int width, int height, int d)
{
for(int y = 0 ; y < height ; y+=d) {
pixel_t const * srcrowp = src + y * width ;
for(int x = 0 ; x < width - (d-1) ; x+=d) {
*dst++ = *srcrowp ;
srcrowp += d ;
}
}
}
*/
}
__global__ void UpsampleKernel(pixel_t* dst, pixel_t* src, int src_width, int src_height)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Col = bx*BLOCK_SIZE + tx;
int Row = by*BLOCK_SIZE + ty;
if ( Col < (src_width -1) && Row < src_height )
{
dst[2*Col*src_height + Row] = src[Row*src_width + Col];
dst[(2*Col+1)*src_height + Row] =
(src[Row*src_width + Col] + src[Row*src_width + Col + 1])/2;
}
else
{
if ( Col == (src_width - 1) && Row < src_height )
{
dst[2*Col*src_height + Row] = src[Row*src_width + Col];
dst[(2*Col+1)*src_height + Row] = src[Row*src_width + Col];
}
}
}
void
copyAndUpsampleRows (pixel_t* dst, pixel_t const* src, int width, int height)
{
int dst_width = height;
int dst_height = width * 2;
unsigned int src_size = sizeof(pixel_t) * (width*height);
unsigned int dst_size = sizeof(pixel_t) * (dst_width*dst_height);
pixel_t* dst_d = NULL;
pixel_t* src_d = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &src_d, src_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &dst_d, dst_size));
CUDA_SAFE_CALL( cudaMemcpy( src_d, src, src_size, cudaMemcpyHostToDevice) );
dim3 dimBlock, dimGrid1;
dimBlock.x = dimBlock.y = BLOCK_SIZE;
dimBlock.z = 1;
dimGrid1.x = (width / dimBlock.x) + ( (width % dimBlock.x) ? 1:0 );
dimGrid1.y = (height / dimBlock.y) + ( (height % dimBlock.y) ? 1:0 );
dimGrid1.z = 1;
UpsampleKernel<<<dimGrid1, dimBlock>>>(dst_d, src_d, width, height);
CUDA_SAFE_CALL(cudaMemcpy( dst, dst_d, dst_size, cudaMemcpyDeviceToHost));
cudaFree(dst_d);
cudaFree(src_d);
}
void //Use this function to reduce double call in the main function.
copyAndUpsampleRows2 (pixel_t* dst, pixel_t const* src, int width, int height)
{
int tmp_width = height;
int tmp_height = width * 2;
int dst_width = width * 2;
int dst_height = height * 2;
unsigned int src_size = sizeof(pixel_t) * (width*height);
unsigned int tmp_size = sizeof(pixel_t) * (2*width*height);
unsigned int dst_size = sizeof(pixel_t) * (dst_width*dst_height);
pixel_t* dst_d = NULL;
pixel_t* tmp_d = NULL;
pixel_t* src_d = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &src_d, src_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &tmp_d, tmp_size));
CUDA_SAFE_CALL( cudaMemcpy( src_d, src, src_size, cudaMemcpyHostToDevice) );
dim3 dimBlock, dimGrid1, dimGrid2;
dimBlock.x = dimBlock.y = BLOCK_SIZE;
dimBlock.z = 1;
dimGrid1.x = (width / dimBlock.x) + ( (width % dimBlock.x) ? 1:0 );
dimGrid1.y = (height / dimBlock.y) + ( (height % dimBlock.y) ? 1:0 );
dimGrid1.z = 1;
dimGrid2.x = (tmp_width / dimBlock.x) + ( (tmp_width % dimBlock.x) ? 1:0 );
dimGrid2.y = (tmp_height / dimBlock.y) + ( (tmp_height % dimBlock.y) ? 1:0 );
dimGrid2.z = 1;
UpsampleKernel<<<dimGrid1, dimBlock>>>(tmp_d, src_d, width, height);
cudaFree(src_d);
CUDA_SAFE_CALL( cudaMalloc( (void**) &dst_d, dst_size));
UpsampleKernel<<<dimGrid2, dimBlock>>>(dst_d, tmp_d, width, height);
CUDA_SAFE_CALL(cudaMemcpy( dst, dst_d, dst_size, cudaMemcpyDeviceToHost));
cudaFree(dst_d);
cudaFree(tmp_d);
}
__global__ void DownsampleKernel(pixel_t* dst, pixel_t* src, int src_width, int src_height, int dst_width, int d)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Col = bx*BLOCK_SIZE + tx;
int Row = by*BLOCK_SIZE + ty;
if ( d*Col < src_width && d*Row < src_height)
dst[Row*dst_width + Col] = src[d*Row*src_width + d*Col];
}
void copyAndDownsample(pixel_t* dst, pixel_t const* src, int width, int height, int d)
{
int dst_width = (width / d) + ((width % d) ? 1:0 );
int dst_height =(height / d) + ((height % d) ? 1:0);
unsigned int src_size = sizeof(pixel_t) * (width*height);
unsigned int dst_size = sizeof(pixel_t) * (dst_width*dst_height);
pixel_t* dst_d = NULL;
pixel_t* src_d = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &dst_d, dst_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &src_d, src_size));
CUDA_SAFE_CALL( cudaMemcpy( src_d, src, src_size, cudaMemcpyHostToDevice) );
dim3 dimBlock, dimGrid;
dimBlock.x = dimBlock.y = BLOCK_SIZE;
dimBlock.z = 1;
dimGrid.x = (dst_width / dimBlock.x) + ( (dst_width % dimBlock.x) ? 1:0 );
dimGrid.y = (dst_height / dimBlock.y) + ( (dst_height % dimBlock.y) ? 1:0 );
dimGrid.z = 1;
DownsampleKernel<<<dimGrid, dimBlock>>>(dst_d, src_d, width, height, dst_width, d);
CUDA_SAFE_CALL(cudaMemcpy( dst, dst_d, dst_size, cudaMemcpyDeviceToHost));
cudaFree(dst_d);
cudaFree(src_d);
}
/*
void econvolve(pixel_t* dst_pt,
pixel_t* src_pt, int M, int N,
pixel_t* filter_pt, int W)
{
//typedef T const TC ;
// convolve along columns, save transpose
// image is M by N
// buffer is N by M
// filter is (2*W+1) by 1
for(int j = 0 ; j < N ; ++j) {
for(int i = 0 ; i < M ; ++i) {
pixel_t acc = 0.0 ;
pixel_t* g = filter_pt ;
pixel_t* start = src_pt + (i-W) ;
pixel_t* stop ;
pixel_t x ;
// beginning
stop = src_pt + std::max(0, i-W) ;
x = *stop ;
while( start <= stop ) { acc += (*g++) * x ; start++ ; }
// middle
stop = src_pt + std::min(M-1, i+W) ;
while( start < stop ) acc += (*g++) * (*start++) ;
// end
x = *start ;
stop = src_pt + (i+W) ;
while( start <= stop ) { acc += (*g++) * x ; start++ ; }
// save
*dst_pt = acc ;
dst_pt += N ;
assert( g - filter_pt == 2*W+1 ) ;
}
// next column
src_pt += M ;
dst_pt -= M*N - 1 ;
}
}
*/
__global__ void ConvKernel(pixel_t* dst, pixel_t* src, int src_width, int src_height, pixel_t* filter, int w)
{
extern __shared__ pixel_t Ns[];
int tx = threadIdx.x;
int bx = blockIdx.x;
int by = blockIdx.y;
int Row = by;
int Col = bx*C_TILE_SIZE + tx;
int i;
pixel_t Pvalue = 0;
if ((Col - w) >= 0 && (Col - w) <= (src_width - 1))
{
Ns[tx] = src[Row * src_width + (Col - w)];
}
else
{
if((Col - w) < 0)
Ns[tx] = src[Row * src_width];
else
Ns[tx] = src[(Row + 1) * src_width - 1];
}
__syncthreads();
if (tx < C_TILE_SIZE)
{
for ( i = 0; i < 2*w+1; i++)
Pvalue += filter[i] * Ns[i+tx];
if (Col < src_width )
dst[Col * src_height + Row] = Pvalue;
}
}
void econvolve(pixel_t* dst, pixel_t* src, int src_width, int src_height,
pixel_t* filter_pt, int W)
{
// convolve along columns, save transpose
// image is M by N
// buffer is N by M
// filter is (2*W+1) by 1
unsigned int src_size = sizeof(pixel_t) * (src_width*src_height);
unsigned int dst_size = sizeof(pixel_t) * (src_width*src_height);
unsigned int filter_size = sizeof(pixel_t) * (2*W + 1);
pixel_t* dst_d = NULL;
pixel_t* src_d = NULL;
pixel_t* filter_d = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &dst_d, dst_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &src_d, src_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &filter_d, filter_size));
CUDA_SAFE_CALL( cudaMemcpy( src_d, src, src_size, cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMemcpy( filter_d, filter_pt, filter_size, cudaMemcpyHostToDevice) );
int SizeofSM = sizeof(pixel_t) * (2*W + C_TILE_SIZE);
dim3 dimBlock, dimGrid;
dimBlock.x = 2*W + C_TILE_SIZE;
dimBlock.y = 1;
dimGrid.x = (src_width / C_TILE_SIZE) + ( (src_width % C_TILE_SIZE) ? 1:0 );
dimGrid.y = src_height;
// std::cout
// << "econvolve: number of w : " << W<<std::endl;
ConvKernel<<<dimGrid, dimBlock, SizeofSM>>>(dst_d, src_d, src_width, src_height, filter_d, W);
CUDA_SAFE_CALL(cudaMemcpy( dst, dst_d, dst_size, cudaMemcpyDeviceToHost));
cudaFree(dst_d);
cudaFree(src_d);
cudaFree(filter_d);
}
/** @brief Smooth an image
**
** The function convolves the image @a src by a Gaussian kernel of
** variance @a s and writes the result to @a dst. The function also
** needs a scratch buffer @a dst of the same size of @a src and @a
** dst.
**
** @param dst output image buffer.
** @param temp scratch image buffer.
** @param src input image buffer.
** @param width width of the buffers.
** @param height height of the buffers.
** @param s standard deviation of the Gaussian kernel.
**/
void
Sift::smooth
(pixel_t* dst, pixel_t* temp,
pixel_t* src, int width, int height,
VL::float_t s)
{
// make sure a buffer larege enough has been allocated
// to hold the filter
int W = int( ceil( VL::float_t(4.0) * s ) ) ;
if( ! filter ) {
filterReserved = 0 ;
}
if( filterReserved < W ) {
filterReserved = W ;
if( filter ) delete [] filter ;
filter = new pixel_t [ 2* filterReserved + 1 ] ;
}
// pre-compute filter
for(int j = 0 ; j < 2*W+1 ; ++j)
filter[j] = VL::pixel_t
(std::exp
(VL::float_t
(-0.5 * (j-W) * (j-W) / (s*s) ))) ;
// normalize to one
normalize(filter, W) ;
// convolve
econvolve(temp, src, width, height, filter, W) ;
econvolve(dst, temp, height, width, filter, W) ;
}
// ===================================================================
// Sift(), ~Sift()
// -------------------------------------------------------------------
/** @brief Initialize Gaussian scale space parameters
**
** @param _im_pt Source image data
** @param _width Soruce image width
** @param _height Soruce image height
** @param _sigman Nominal smoothing value of the input image.
** @param _sigma0 Base smoothing level.
** @param _O Number of octaves.
** @param _S Number of levels per octave.
** @param _omin First octave.
** @param _smin First level in each octave.
** @param _smax Last level in each octave.
**/
Sift::Sift(const pixel_t* _im_pt, int _width, int _height,
VL::float_t _sigman,
VL::float_t _sigma0,
int _O, int _S,
int _omin, int _smin, int _smax)
: sigman( _sigman ),
sigma0( _sigma0 ),
O( _O ),
S( _S ),
omin( _omin ),
smin( _smin ),
smax( _smax ),
magnif( 3.0f ),
normalizeDescriptor( true ),
temp( NULL ),
octaves( NULL ),
filter( NULL )
{
process(_im_pt, _width, _height) ;
}
/** @brief Destroy SIFT filter.
**/
Sift::~Sift()
{
freeBuffers() ;
}
/** Allocate buffers. Buffer sizes depend on the image size and the
** value of omin.
**/
void
Sift::
prepareBuffers()
{
// compute buffer size
int w = (omin >= 0) ? (width >> omin) : (width << -omin) ;
int h = (omin >= 0) ? (height >> omin) : (height << -omin) ;
int size = w*h* std::max
((smax - smin), 2*((smax+1) - (smin-2) +1)) ;
if( temp && tempReserved == size ) return ;
freeBuffers() ;
// allocate
Kmid = new kvalue [w*h];
KeyNum = new int [O-omin];
temp = new pixel_t [ size ] ;
tempReserved = size ;
tempIsGrad = false ;
tempOctave = 0 ;
octaves = new pixel_t* [ O ] ;
for(int o = 0 ; o < O ; ++o) {
octaves[o] = new pixel_t [ (smax - smin + 1) * w * h ] ;
w >>= 1 ;
h >>= 1 ;
}
}
/** @brief Free buffers.
**
** This function releases any buffer allocated by prepareBuffers().
**
** @sa prepareBuffers().
**/
void
Sift::
freeBuffers()
{
if( filter ) {
delete [] filter ;
}
filter = 0 ;
if( octaves ) {
for(int o = 0 ; o < O ; ++o) {
delete [] octaves[ o ] ;
}
delete [] octaves ;
}
octaves = 0 ;
if( temp ) {
delete [] temp ;
}
temp = 0 ;
}
// ===================================================================
// getKeypoint
// -------------------------------------------------------------------
/** @brief Get keypoint from position and scale
**
** The function returns a keypoint with a given position and
** scale. Note that the keypoint structure contains fields that make
** sense only in conjunction with a specific scale space. Therefore
** the keypoint structure should be re-calculated whenever the filter
** is applied to a new image, even if the parameters @a x, @a y and
** @a sigma do not change.
**
** @param x x coordinate of the center.
** @peram y y coordinate of the center.
** @param sigma scale.
** @return Corresponing keypoint.
**/
Sift::Keypoint
Sift::getKeypoint(VL::float_t x, VL::float_t y, VL::float_t sigma) const
{
/*
The formula linking the keypoint scale sigma to the octave and
scale index is
(1) sigma(o,s) = sigma0 2^(o+s/S)
for which
(2) o + s/S = log2 sigma/sigma0 == phi.
In addition to the scale index s (which can be fractional due to
scale interpolation) a keypoint has an integer scale index is too
(which is the index of the scale level where it was detected in
the DoG scale space). We have the constraints:
- o and is are integer
- is is in the range [smin+1, smax-2 ]
- o is in the range [omin, omin+O-1]
- is = rand(s) most of the times (but not always, due to the way s
is obtained by quadratic interpolation of the DoG scale space).
Depending on the values of smin and smax, often (2) has multiple
solutions is,o that satisfy all constraints. In this case we
choose the one with biggest index o (this saves a bit of
computation).
DETERMINING THE OCTAVE INDEX O
From (2) we have o = phi - s/S and we want to pick the biggest
possible index o in the feasible range. This corresponds to
selecting the smallest possible index s. We write s = is + ds
where in most cases |ds|<.5 (but in general |ds|<1). So we have
o = phi - s/S, s = is + ds , |ds| < .5 (or |ds| < 1).
Since is is in the range [smin+1,smax-2], s is in the range
[smin+.5,smax-1.5] (or [smin,smax-1]), the number o is an integer
in the range phi+[-smax+1.5,-smin-.5] (or
phi+[-smax+1,-smin]). Thus the maximum value of o is obtained for
o = floor(phi-smin-.5) (or o = floor(phi-smin)).
Finally o is clamped to make sure it is contained in the feasible
range.
DETERMINING THE SCALE INDEXES S AND IS
Given o we can derive is by writing (2) as
s = is + ds = S(phi - o).
We then take is = round(s) and clamp its value to be in the
feasible range.
*/
int o,ix,iy,is ;
VL::float_t s,phi ;
phi = log2(sigma/sigma0) ;
o = fast_floor( phi - (VL::float_t(smin)+.5)/S ) ;
o = std::min(o, omin+O-1) ;
o = std::max(o, omin ) ;
s = S * (phi - o) ;
is = int(s + 0.5) ;
is = std::min(is, smax - 2) ;
is = std::max(is, smin + 1) ;
VL::float_t per = getOctaveSamplingPeriod(o) ;
ix = int(x / per + 0.5) ;
iy = int(y / per + 0.5) ;
Keypoint key ;
key.o = o ;
key.ix = ix ;
key.iy = iy ;
key.is = is ;
key.x = x ;
key.y = y ;
key.s = s ;
key.sigma = sigma ;
return key ;
}
// ===================================================================
// process()
// -------------------------------------------------------------------
/** @brief Compute Gaussian Scale Space
**
** The method computes the Gaussian scale space of the specified
** image. The scale space data is managed internally and can be
** accessed by means of getOctave() and getLevel().
**
** @remark Calling this method will delete the list of keypoints
** constructed by detectKeypoints().
**
** @param _im_pt pointer to image data.
** @param _width image width.
** @param _height image height .
**/
void
Sift::
process(const pixel_t* _im_pt, int _width, int _height)
{
using namespace Detail ;
width = _width ;
height = _height ;
prepareBuffers() ;
VL::float_t sigmak = powf(2.0f, 1.0 / S) ;
VL::float_t dsigma0 = sigma0 * sqrt (1.0f - 1.0f / (sigmak*sigmak) ) ;
// -----------------------------------------------------------------
// Make pyramid base
// -----------------------------------------------------------------
if( omin < 0 ) {
copyAndUpsampleRows(temp, _im_pt, width, height ) ;
copyAndUpsampleRows(octaves[0], temp, height, 2*width ) ;
for(int o = -1 ; o > omin ; --o) {
copyAndUpsampleRows(temp, octaves[0], width << -o, height << -o) ;
copyAndUpsampleRows(octaves[0], temp, height << -o, 2*(width << -o)) ; }
} else if( omin > 0 ) {
copyAndDownsample(octaves[0], _im_pt, width, height, 1 << omin) ;
} else {
copy(octaves[0], _im_pt, width, height) ;
}
{
VL::float_t sa = sigma0 * powf(sigmak, smin) ;
VL::float_t sb = sigman / powf(2.0f, omin) ; // review this
if( sa > sb ) {
VL::float_t sd = sqrt ( sa*sa - sb*sb ) ;
smooth( octaves[0], temp, octaves[0],
getOctaveWidth(omin),
getOctaveHeight(omin),
sd ) ;
}
}
// -----------------------------------------------------------------
// Make octaves
// -----------------------------------------------------------------
for(int o = omin ; o < omin+O ; ++o) {
// Prepare octave base
if( o > omin ) {
int sbest = std::min(smin + S, smax) ;
copyAndDownsample(getLevel(o, smin ),
getLevel(o-1, sbest),
getOctaveWidth(o-1),
getOctaveHeight(o-1), 2 ) ;
VL::float_t sa = sigma0 * powf(sigmak, smin ) ;
VL::float_t sb = sigma0 * powf(sigmak, sbest - S ) ;
if(sa > sb ) {
VL::float_t sd = sqrt ( sa*sa - sb*sb ) ;
smooth( getLevel(o,0), temp, getLevel(o,0),
getOctaveWidth(o), getOctaveHeight(o),
sd ) ;
}
}
// Make other levels
for(int s = smin+1 ; s <= smax ; ++s) {
VL::float_t sd = dsigma0 * powf(sigmak, s) ;
smooth( getLevel(o,s), temp, getLevel(o,s-1),
getOctaveWidth(o), getOctaveHeight(o),
sd ) ;
}
}
}
/** @brief Sift detector
**
** The function runs the SIFT detector on the stored Gaussian scale
** space (see process()). The detector consists in three steps
**
** - local maxima detection;
** - subpixel interpolation;
** - rejection of weak keypoints (@a threhsold);
** - rejection of keypoints on edge-like structures (@a edgeThreshold).
**
** As they are found, keypoints are added to an internal list. This
** list can be accessed by means of the member functions
** getKeypointsBegin() and getKeypointsEnd(). The list is ordered by
** octave, which is usefult to speed-up computeKeypointOrientations()
** and computeKeypointDescriptor().
**/
__global__ void DogKernel(pixel_t* dst, pixel_t* srca, pixel_t* srcb, int width)
{
__shared__ pixel_t src[D_BLOCK_SIZE];
int tx = threadIdx.x;
int bx = blockIdx.x;
int by = blockIdx.y;
int Row = by;
int Col = bx*D_BLOCK_SIZE + tx; //D_BLOCK_SIZE = 128
src[tx] = srcb[Row * width + Col];
__syncthreads();
if (Col < width)
{
dst[Row * width + Col] = srca[Row * width + Col] - src[tx];
srca[Row * width + Col] = src[tx];
}
}
void Sift::Compute_Dog (pixel_t* pt, int o, int smin, int smax, int width, int height)
{
unsigned int src_size = sizeof(pixel_t) * (width*height);
unsigned int dst_size = sizeof(pixel_t) * (width*height);
pixel_t* dst_d = NULL;
pixel_t* srca_d = NULL;
pixel_t* srcb_d = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &dst_d, dst_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &srca_d, src_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &srcb_d, src_size));
dim3 dimBlock, dimGrid;
dimBlock.x = D_BLOCK_SIZE;
dimBlock.y = 1;
dimGrid.x = (width / D_BLOCK_SIZE) + ( (width % D_BLOCK_SIZE) ? 1:0 );
dimGrid.y = height;
pixel_t* srca = getLevel(o, smin) ;
CUDA_SAFE_CALL( cudaMemcpy(srca_d, srca, src_size, cudaMemcpyHostToDevice) );
for(int s = smin + 1 ; s <= smax ; ++s)
{
pixel_t* srcb = getLevel(o, s) ;
CUDA_SAFE_CALL( cudaMemcpy(srcb_d, srcb, src_size, cudaMemcpyHostToDevice) );
DogKernel<<<dimGrid, dimBlock>>>(dst_d, srca_d, srcb_d, width);
CUDA_SAFE_CALL(cudaMemcpy(pt, dst_d, dst_size, cudaMemcpyDeviceToHost));
pt = pt + width*height;
}
cudaFree(dst_d);
cudaFree(srca_d);
cudaFree(srcb_d);
}
__global__ void FindkKernel(kvalue* dst, pixel_t* srcT, pixel_t* srcM, pixel_t* srcB, int width, int height, float threshold, float edgethreshold)
{
__shared__ pixel_t Mtop[F_BLOCK_SIZE][F_BLOCK_SIZE]; //F_BLOCK_SIZE = F_TILE_SIZE + 2
__shared__ pixel_t Mmid[F_BLOCK_SIZE][F_BLOCK_SIZE];
__shared__ pixel_t Mbot[F_BLOCK_SIZE][F_BLOCK_SIZE];
int tx, ty, bx, by;
tx = threadIdx.x;
ty = threadIdx.y;
bx = blockIdx.x;
by = blockIdx.y;
int i, j, Row, Col;
float extr = 1.0f;
float Threshold = threshold;
float edgeThreshold = edgethreshold;
Row = by*F_TILE_SIZE + ty;
Col = bx*F_TILE_SIZE + tx;
if (Row < height && Col < width)
{
Mtop[ty][tx] = srcT[Row * width + Col];
Mmid[ty][tx] = srcM[Row * width + Col];
Mbot[ty][tx] = srcB[Row * width + Col];
//dst[Row * width + Col].flag = 0.0f;
}
else
{
Mtop[ty][tx] = 0;
Mmid[ty][tx] = 0;
Mbot[ty][tx] = 0;
}
__syncthreads();
if(ty < F_TILE_SIZE && tx < F_TILE_SIZE && Row < (height -1) && Col < (width-1))
{
if (Mmid[ty+1][tx+1] > 0)
{
for(i = 0; i < 3; i++)
{
for(j = 0; j < 3; j++)
{
if ( Mmid[ty+1][tx+1] < Mtop[ty+i][tx+j] || Mmid[ty+1][tx+1] < Mbot[ty+i][tx+j] ||
Mmid[ty+1][tx+1] < Mmid[ty][tx+j] || Mmid[ty+1][tx+1] < Mmid[ty+2][tx+j] ||
Mmid[ty+1][tx+1] < Mmid[ty+1][tx] || Mmid[ty+1][tx+1] < Mmid[ty+1][tx+2] ||
Mmid[ty+1][tx+1] < Threshold)
{ extr = 0; break; }
}
if (extr == 0)
break;
}
}
else
{
for(i = 0; i < 3; i++)
{
for(j = 0; j < 3; j++)
{
if ( Mmid[ty+1][tx+1] > Mtop[ty+i][tx+j] || Mmid[ty+1][tx+1] > Mbot[ty+i][tx+j] ||
Mmid[ty+1][tx+1] > Mmid[ty][tx+j] || Mmid[ty+1][tx+1] > Mmid[ty+2][tx+j] ||
Mmid[ty+1][tx+1] > Mmid[ty+1][tx] || Mmid[ty+1][tx+1] > Mmid[ty+1][tx+2] ||
Mmid[ty+1][tx+1] > Threshold * (-1))
{ extr = 0; break; }
}
if (extr == 0)
break;
}
}
__syncthreads();
if(extr == 1)
{
//float4 value = RefineKernel(Mtop, Mmid, Mbot, width, height, threshold, edgethreshold)
//int StepX = 0;
//int StepY = 0;
float ds = 0.0f;
float dy = 0.0f;
float dx = 0.0f;
float Vx2, fx, fy, fs, fxx, fyy, fss, fxy, fxs, fys;
// for(int iter = 0 ; iter < 5 ; ++iter) {
//tx = threadIdx.x + StepX;
//ty = threadIdx.y + StepY;
Vx2 = Mmid[ty+1][tx+1] * 2.0f;
fx = 0.5f * (Mmid[ty+1][tx+2] - Mmid[ty+1][tx]);
fy = 0.5f * (Mmid[ty+2][tx+1] - Mmid[ty][tx+1]);
fs = 0.5f * (Mbot[ty+1][tx+1] - Mtop[ty+1][tx+1]);
fxx = Mmid[ty+1][tx+2] + Mmid[ty+1][tx] - Vx2;
fyy = Mmid[ty+2][tx+1] + Mmid[ty][tx+1] - Vx2;
fss = Mbot[ty+1][tx+1] + Mtop[ty+1][tx+1] - Vx2;
fxy = 0.25f * (Mmid[ty+2][tx+2] + Mmid[ty][tx] - Mmid[ty+2][tx] - Mmid[ty][tx+2]);
fxs = 0.25f * (Mbot[ty+1][tx+2] + Mtop[ty+1][tx] - Mbot[ty+1][tx] - Mtop[ty+1][tx+2]);
fys = 0.25f * (Mbot[ty+2][tx+1] + Mtop[ty][tx+1] - Mbot[ty][tx+1] - Mtop[ty+2][tx+1]);
//need to solve dx, dy, ds;
// |-fx| | fxx fxy fxs | |dx|
// |-fy| = | fxy fyy fys | * |dy|
// |-fs| | fxs fys fss | |ds|
float4 A0 = fxx > 0? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx);
float4 A1 = fxy > 0? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy);
float4 A2 = fxs > 0? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs);
float maxa = max(max(A0.x, A1.x), A2.x);
if(maxa >= 1e-10){
if(maxa == A1.x){
float4 TEMP = A1; A1 = A0; A0 = TEMP;
}else if(maxa == A2.x){
float4 TEMP = A2; A2 = A0; A0 = TEMP;
}
A0.y /= A0.x; A0.z /= A0.x; A0.w/= A0.x;
A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w;
A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w;
if(abs(A2.y) > abs(A1.y)){
float4 TEMP = A2; A2 = A1; A1 = TEMP;
}
if(abs(A1.y) >= 1e-10) {
A1.z /= A1.y; A1.w /= A1.y;
A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w;
if(abs(A2.z) >= 1e-10) {
ds = A2.w / A2.z;
dy = A1.w - ds * A1.z;
dx = A0.w - ds * A0.z - dy * A0.y;
}
}
}
// StepX= ((ds > 0.6 && ( bx*F_TILE_SIZE + tx + 1 ) < width -2) ? 1 : 0 ) + ((ds < -0.6 && (bx*F_TILE_SIZE + tx + 1) > 1 ) ? -1 : 0 ) ;
// StepY= ((dy > 0.6 && ( by*F_TILE_SIZE + ty + 1 )< height -2) ? 1 : 0 ) + ((dy < -0.6 && (by*F_TILE_SIZE + ty + 1) > 1 ) ? -1 : 0 ) ;
// if( StepX == 0 && StepY == 0 ) break ;
// }
float val = Mmid[ty+1][tx+1] + 0.5f * (fx * dx + fy * dy + fs * ds);
float score = (fxx + fyy) * (fxx + fyy) / (fxx * fyy - fxy * fxy);
if(fabs(val) > threshold && score < (edgeThreshold + 1)*(edgeThreshold + 1)/edgeThreshold && score >= 0 &&
fabs(dx) < 1.5 && fabs(dy) < 0.6 && fabs(ds) < 0.6 )
{
dst[(Row+1) * width + (Col+1)].dx = dx;
dst[(Row+1) * width + (Col+1)].dy = dy;
dst[(Row+1) * width + (Col+1)].ds = ds;
dst[(Row+1) * width + (Col+1)].flag = extr;
}
else
dst[(Row+1) * width + (Col+1)].flag = 0.0f;
}
else
dst[(Row+1) * width + (Col+1)].flag = extr;
}
}
void
Sift :: FindkExtrem(pixel_t* pt, int width, int height, int o, float xperiod, float threshold, float edgethreshold)
{
unsigned int dst_size = sizeof(kvalue) * (width*height);
unsigned int src_size = sizeof(pixel_t) * (width*height);
pixel_t* srcT_d = NULL;
pixel_t* srcM_d = NULL;
pixel_t* srcB_d = NULL;
kvalue* dst_d = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &srcT_d, src_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &srcM_d, src_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &srcB_d, src_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &dst_d, dst_size));
dim3 dimBlock, dimGrid;
dimBlock.x = F_BLOCK_SIZE;
dimBlock.y = F_BLOCK_SIZE;
dimGrid.x = ((width-2) / F_TILE_SIZE) + (((width-2) % F_TILE_SIZE) ? 1:0 );
dimGrid.y = ((height-2) / F_TILE_SIZE) + (((height-2) % F_TILE_SIZE) ? 1:0 );
pixel_t* src = pt;
// kvalue* dst;
// dst = new kvalue [width*height] ;
Keypoint k ;
int uu = 0;
for(int s = smin + 1 ; s <= smax-2 ; ++s)
{
CUDA_SAFE_CALL( cudaMemcpy(srcT_d, src, src_size, cudaMemcpyHostToDevice) );
src = src + width * height;
CUDA_SAFE_CALL( cudaMemcpy(srcM_d, src, src_size, cudaMemcpyHostToDevice) );
src = src + width * height;
CUDA_SAFE_CALL( cudaMemcpy(srcB_d, src, src_size, cudaMemcpyHostToDevice) );
src = src - width * height;
FindkKernel<<<dimGrid, dimBlock>>>(dst_d, srcT_d, srcM_d, srcB_d, width, height, 0.8*threshold, edgethreshold);
CUDA_SAFE_CALL(cudaMemcpy(Kmid, dst_d, dst_size, cudaMemcpyDeviceToHost));
float xn;
float yn;
float sn;
for(int y = 0; y < height-1; y++)
for(int x = 0; x < width-1; x++)
{
if (Kmid[width * y + x].flag == 1.0f)
{
xn = x + Kmid[width * y + x].dx;
yn = y + Kmid[width * y + x].dy;
sn = s + Kmid[width * y + x].ds;
if(xn >= 0 && xn <= width -1 && yn >= 0 && yn <= height -1 && sn >= smin && sn <= smax )
{
k.o = o;
k.ix = x ;
k.iy = y ;
k.is = s ;
k.x = xn * xperiod ;
k.y = yn * xperiod ;
k.s = sn;
k.sigma = getScaleFromIndex(o,sn) ;
keypoints.push_back(k);
KeyNum[o-omin]++;
uu++;
// std::cout<<x<<","<<y<<","<<s<<","<<k.x<<","<<k.y<<","<<k.sigma<<","<<"|| "<<std::flush;
}
}
}
}
//std::cout<<" " <<" "<<std::endl ;
std::cout<<"o is "<<o<<" total key number is "<<KeyNum[o-omin]<<std::endl;
cudaFree(srcT_d);
cudaFree(srcM_d);
cudaFree(srcB_d);
cudaFree(dst_d);
//free dst;
}
__global__ void FindkKernel_small(kvalue* dst, pixel_t* srcT, pixel_t* srcM, pixel_t* srcB, int width, int height, float threshold, float edgethreshold)
{
__shared__ pixel_t Mtop[F_BLOCK_SIZE_S][F_BLOCK_SIZE_S]; //F_BLOCK_SIZE_S = F_TILE_SIZE + 4
__shared__ pixel_t Mmid[F_BLOCK_SIZE_S][F_BLOCK_SIZE_S];
__shared__ pixel_t Mbot[F_BLOCK_SIZE_S][F_BLOCK_SIZE_S];
int tx, ty, bx, by;
tx = threadIdx.x;
ty = threadIdx.y;
bx = blockIdx.x;
by = blockIdx.y;
int i, j, Row, Col;
float extr = 1.0f;
float Threshold = threshold;
float edgeThreshold = edgethreshold;
Row = by*F_TILE_SIZE_S + ty;
Col = bx*F_TILE_SIZE_S + tx;
if ( ((Row - 1) < 0) || (Row > height) || ((Col - 1) < 0) || (Col > width) )
{
Mtop[ty][tx] = 0;
Mmid[ty][tx] = 0;
Mbot[ty][tx] = 0;
}
else
{
Mtop[ty][tx] = srcT[(Row-1) * width + (Col-1)];
Mmid[ty][tx] = srcM[(Row-1) * width + (Col-1)];
Mbot[ty][tx] = srcB[(Row-1) * width + (Col-1)];
//dst[(Row-1) * width + (Col-1)].flag = 0.0f;
}
__syncthreads();
if(ty < F_TILE_SIZE_S && tx < F_TILE_SIZE_S && Row < (height -2) && Col < (width-2))
{
if (Mmid[ty+2][tx+2] > 0)
{
for(i = 1; i < 4; i++)
{
for(j = 1; j < 4; j++)
{
if ( Mmid[ty+2][tx+2] < Mtop[ty+i][tx+j] || Mmid[ty+2][tx+2] < Mbot[ty+i][tx+j] ||
Mmid[ty+2][tx+2] < Mmid[ty+1][tx+j] || Mmid[ty+2][tx+2] < Mmid[ty+3][tx+j] ||
Mmid[ty+2][tx+2] < Mmid[ty+2][tx+1] || Mmid[ty+2][tx+2] < Mmid[ty+2][tx+3] ||
Mmid[ty+2][tx+2] < Threshold)
{ extr = 0; break; }
}
if (extr == 0)
break;
}
}
else
{
for(i = 1; i < 4; i++)
{
for(j = 1; j < 4; j++)
{
if ( Mmid[ty+2][tx+2] > Mtop[ty+i][tx+j] || Mmid[ty+2][tx+2] > Mbot[ty+i][tx+j] ||
Mmid[ty+2][tx+2] > Mmid[ty+1][tx+j] || Mmid[ty+2][tx+2] > Mmid[ty+3][tx+j] ||
Mmid[ty+2][tx+2] > Mmid[ty+2][tx+1] || Mmid[ty+2][tx+2] > Mmid[ty+2][tx+3] ||
Mmid[ty+2][tx+2] > Threshold * (-1))
{ extr = 0; break; }
}
if (extr == 0)
break;
}
}
__syncthreads();
if(extr == 1)
{
int StepX = 0;
int StepY = 0;
float ds = 0.0f;
float dy = 0.0f;
float dx = 0.0f;
float Vx2, fx, fy, fs, fxx, fyy, fss, fxy, fxs, fys;
for(int iter = 0 ; iter < 2 ; ++iter) {
tx = threadIdx.x + StepX;
ty = threadIdx.y + StepY;
Vx2 = Mmid[ty+2][tx+2] * 2.0f;
fx = 0.5f * (Mmid[ty+2][tx+3] - Mmid[ty+2][tx+1]);
fy = 0.5f * (Mmid[ty+3][tx+2] - Mmid[ty+1][tx+2]);
fs = 0.5f * (Mbot[ty+2][tx+2] - Mtop[ty+2][tx+2]);
fxx = Mmid[ty+2][tx+3] + Mmid[ty+2][tx+1] - Vx2;
fyy = Mmid[ty+3][tx+2] + Mmid[ty+1][tx+2] - Vx2;
fss = Mbot[ty+2][tx+2] + Mtop[ty+2][tx+2] - Vx2;
fxy = 0.25f * (Mmid[ty+3][tx+3] + Mmid[ty+1][tx+1] - Mmid[ty+3][tx+1] - Mmid[ty+1][tx+3]);
fxs = 0.25f * (Mbot[ty+2][tx+3] + Mtop[ty+2][tx+1] - Mbot[ty+2][tx+1] - Mtop[ty+2][tx+3]);
fys = 0.25f * (Mbot[ty+3][tx+2] + Mtop[ty+1][tx+2] - Mbot[ty+1][tx+2] - Mtop[ty+3][tx+2]);
//need to solve dx, dy, ds;
// |-fx| | fxx fxy fxs | |dx|
// |-fy| = | fxy fyy fys | * |dy|
// |-fs| | fxs fys fss | |ds|
float4 A0 = fxx > 0? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx);
float4 A1 = fxy > 0? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy);
float4 A2 = fxs > 0? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs);
float maxa = max(max(A0.x, A1.x), A2.x);
if(maxa >= 1e-10){
if(maxa == A1.x){
float4 TEMP = A1; A1 = A0; A0 = TEMP;
}else if(maxa == A2.x){
float4 TEMP = A2; A2 = A0; A0 = TEMP;
}
A0.y /= A0.x; A0.z /= A0.x; A0.w/= A0.x;
A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w;
A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w;
if(abs(A2.y) > abs(A1.y)){
float4 TEMP = A2; A2 = A1; A1 = TEMP;
}
if(abs(A1.y) >= 1e-10) {
A1.z /= A1.y; A1.w /= A1.y;
A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w;
if(abs(A2.z) >= 1e-10) {
ds = A2.w / A2.z;
dy = A1.w - ds * A1.z;
dx = A0.w - ds * A0.z - dy * A0.y;
}
}
}
StepX= ((dx > 0.6 && ( bx*F_TILE_SIZE_S + tx + 2 ) < width -2) ? 1 : 0 ) + ((dx < -0.6 && (bx*F_TILE_SIZE_S + tx ) > 1 ) ? -1 : 0 ) ;
StepY= ((dy > 0.6 && ( by*F_TILE_SIZE_S + ty + 2 )< height -2) ? 1 : 0 ) + ((dy < -0.6 && (by*F_TILE_SIZE_S + ty ) > 1 ) ? -1 : 0 ) ;
if( StepX == 0 && StepY == 0 ) break ;
}
float val = Mmid[ty+2][tx+2] + 0.5f * (fx * dx + fy * dy + fs * ds);
float score = (fxx + fyy) * (fxx + fyy) / (fxx * fyy - fxy * fxy);
Row = by*F_TILE_SIZE_S + ty;
Col = bx*F_TILE_SIZE_S + tx;
if(fabs(val) > threshold && score < (edgeThreshold + 1)*(edgeThreshold + 1)/edgeThreshold && score >= 0 &&
fabs(dx) < 0.6 && fabs(dy) < 0.6 && fabs(ds) < 1.5 )
{
dst[(Row+1) * width + (Col+1)].dx = dx;
dst[(Row+1) * width + (Col+1)].dy = dy;
dst[(Row+1) * width + (Col+1)].ds = ds;
dst[(Row+1) * width + (Col+1)].flag = extr;
}
else
dst[(Row+1) * width + (Col+1)].flag = 0.0f;
}
else
dst[(Row+1) * width + (Col+1)].flag = extr;
}
}
void
Sift :: FindkExtrem_small(pixel_t* pt, int width, int height, int o, float xperiod, float threshold, float edgethreshold)
{
unsigned int dst_size = sizeof(kvalue) * (width*height);
unsigned int src_size = sizeof(pixel_t) * (width*height);
pixel_t* srcT_d = NULL;
pixel_t* srcM_d = NULL;
pixel_t* srcB_d = NULL;
kvalue* dst_d = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &srcT_d, src_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &srcM_d, src_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &srcB_d, src_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &dst_d, dst_size));
dim3 dimBlock, dimGrid;
dimBlock.x = F_BLOCK_SIZE_S;
dimBlock.y = F_BLOCK_SIZE_S;
dimGrid.x = ((width-2) / F_TILE_SIZE_S) + (((width-2) % F_TILE_SIZE_S) ? 1:0 );
dimGrid.y = ((height-2) / F_TILE_SIZE_S) + (((height-2) % F_TILE_SIZE) ? 1:0 );
pixel_t* src = pt;
int uu = 0;
for(int s = smin + 1 ; s <= smax-2 ; ++s)
{
CUDA_SAFE_CALL( cudaMemcpy(srcT_d, src, src_size, cudaMemcpyHostToDevice) );
src = src + width * height;
CUDA_SAFE_CALL( cudaMemcpy(srcM_d, src, src_size, cudaMemcpyHostToDevice) );
src = src + width * height;
CUDA_SAFE_CALL( cudaMemcpy(srcB_d, src, src_size, cudaMemcpyHostToDevice) );
src = src - width * height;
FindkKernel_small<<<dimGrid, dimBlock>>>(dst_d, srcT_d, srcM_d, srcB_d, width, height, 0.8*threshold, edgethreshold);
CUDA_SAFE_CALL(cudaMemcpy(Kmid, dst_d, dst_size, cudaMemcpyDeviceToHost));
float xn;
float yn;
float sn;
for(int y = 1; y < height-1; y++)
for(int x = 1; x < width-1; x++)
{
if (Kmid[width * y + x].flag == 1.0f)
{
xn = x + Kmid[width * y + x].dx;
yn = y + Kmid[width * y + x].dy;
sn = s + Kmid[width * y + x].ds;
if(xn >= 0 && xn <= width -1 && yn >= 0 && yn <= height -1 && sn >= smin && sn <= smax )
{
Keypoint k ;
k.o = o ;
k.ix = x ;
k.iy = y ;
k.is = s ;
k.x = xn * xperiod ;
k.y = yn * xperiod ;
k.s = sn;
k.sigma = getScaleFromIndex(o,sn) ;
keypoints.push_back(k) ;
KeyNum[o-omin]++;
uu++;
// std::cout<<x<<","<<y<<","<<s<<","<<k.x<<","<<k.y<<","<<k.sigma<<","<<"|| "<<std::flush;
//std::cout<<x<<","<<y<<","<<s<<", "<<std::flush;
}
}
}
}
std::cout<<" " <<" "<<std::endl ;
std::cout<<"o is "<<o<<" total key number is "<<KeyNum[o-omin]<<std::endl;
cudaFree(srcT_d);
cudaFree(srcM_d);
cudaFree(srcB_d);
cudaFree(dst_d);
//free dst;
//delete []Kdst;
}
void
Sift::detectKeypoints(VL::float_t threshold, VL::float_t edgeThreshold)
{
keypoints.clear() ;
//int nValidatedKeypoints = 0 ;
// Process one octave per time
for(int o = omin; o < omin + O; ++o) {
//int const xo = 1 ;
int const yo = getOctaveWidth(o) ;
int const so = getOctaveWidth(o) * getOctaveHeight(o) ;
int const ow = getOctaveWidth(o) ;
int const oh = getOctaveHeight(o) ;
VL::float_t xperiod = getOctaveSamplingPeriod(o) ;
// -----------------------------------------------------------------
// Difference of Gaussians
// -----------------------------------------------------------------
pixel_t* dog = temp ;
tempIsGrad = false ;
KeyNum[o-omin] = 0;
{
pixel_t* pt = dog ;
Compute_Dog (pt, o, smin, smax, yo, oh); //gpu function
/*
for(int s = smin ; s <= smax-1 ; ++s) {
pixel_t* srca = getLevel(o, s ) ;
pixel_t* srcb = getLevel(o, s+1) ;
pixel_t* enda = srcb ;
while( srca != enda ) {
*pt++ = *srcb++ - *srca++ ;
}
}
*/
}
// -----------------------------------------------------------------
// Find points of extremum
// -----------------------------------------------------------------
// std::cout<<" " <<" "<<std::endl ;
std::cout<<" " <<" "<<std::endl ;
//std::cout<<"O is "<<o<<" "<<std::endl ;
pixel_t* pt = dog ;
if (O < 8)
FindkExtrem_small(pt, yo, oh, o, xperiod, threshold, edgeThreshold);
else
FindkExtrem(pt, yo, oh, o, xperiod, threshold, edgeThreshold);
/*
{
int uu;
pixel_t* pt = dog + xo + yo + so ;
for(int s = smin+1 ; s <= smax-2 ; ++s) {
for(int y = 1 ; y < oh - 1 ; ++y) {
for(int x = 1 ; x < ow - 1 ; ++x) {
pixel_t v = *pt ;
// assert( (pt - x*xo - y*yo - (s-smin)*so) - dog == 0 ) ;
#define CHECK_NEIGHBORS(CMP,SGN) \
( v CMP ## = SGN 0.8 * threshold && \
v CMP *(pt + xo) && \
v CMP *(pt - xo) && \
v CMP *(pt + so) && \
v CMP *(pt - so) && \
v CMP *(pt + yo) && \
v CMP *(pt - yo) && \
\
v CMP *(pt + yo + xo) && \
v CMP *(pt + yo - xo) && \
v CMP *(pt - yo + xo) && \
v CMP *(pt - yo - xo) && \
\
v CMP *(pt + xo + so) && \
v CMP *(pt - xo + so) && \
v CMP *(pt + yo + so) && \
v CMP *(pt - yo + so) && \
v CMP *(pt + yo + xo + so) && \
v CMP *(pt + yo - xo + so) && \
v CMP *(pt - yo + xo + so) && \
v CMP *(pt - yo - xo + so) && \
\
v CMP *(pt + xo - so) && \
v CMP *(pt - xo - so) && \
v CMP *(pt + yo - so) && \
v CMP *(pt - yo - so) && \
v CMP *(pt + yo + xo - so) && \
v CMP *(pt + yo - xo - so) && \
v CMP *(pt - yo + xo - so) && \
v CMP *(pt - yo - xo - so) )
if( CHECK_NEIGHBORS(>,+) || CHECK_NEIGHBORS(<,-) ) {
Keypoint k ;
k.ix = x ;
k.iy = y ;
k.is = s ;
keypoints.push_back(k) ;
std::cout<<x<<","<<y<<","<<s<<","<<o<<","
<<" "<<std::flush ;
uu++;
}
pt += 1 ;
}
pt += 2 ;
}
pt += 2*yo ;
std::cout<<" "<<std::endl;
std::cout<<"s is "<<s<<" total key number is "<<uu<<std::endl;
uu = 0;
}
}
*/
// -----------------------------------------------------------------
// Refine local maxima
// -----------------------------------------------------------------
/* int uu;
{ // refine
KeypointsIter siter ;
KeypointsIter diter ;
for(diter = siter = keypointsBegin() + nValidatedKeypoints ;
siter != keypointsEnd() ;
++siter) {
int x = int( siter->ix ) ;
int y = int( siter->iy ) ;
int s = int( siter->is ) ;
VL::float_t Dx=0,Dy=0,Ds=0,Dxx=0,Dyy=0,Dss=0,Dxy=0,Dxs=0,Dys=0 ;
VL::float_t b [3] ;
pixel_t* pt ;
int dx = 0 ;
int dy = 0 ;
// must be exec. at least once
for(int iter = 0 ; iter < 5 ; ++iter) {
VL::float_t A[3*3] ;
x += dx ;
y += dy ;
pt = dog
+ xo * x
+ yo * y
+ so * (s - smin) ;
#define at(dx,dy,ds) (*( pt + (dx)*xo + (dy)*yo + (ds)*so))
#define Aat(i,j) (A[(i)+(j)*3])
Dx = 0.5 * (at(+1,0,0) - at(-1,0,0)) ;
Dy = 0.5 * (at(0,+1,0) - at(0,-1,0));
Ds = 0.5 * (at(0,0,+1) - at(0,0,-1)) ;
// Compute the Hessian.
Dxx = (at(+1,0,0) + at(-1,0,0) - 2.0 * at(0,0,0)) ;
Dyy = (at(0,+1,0) + at(0,-1,0) - 2.0 * at(0,0,0)) ;
Dss = (at(0,0,+1) + at(0,0,-1) - 2.0 * at(0,0,0)) ;
Dxy = 0.25 * ( at(+1,+1,0) + at(-1,-1,0) - at(-1,+1,0) - at(+1,-1,0) ) ;
Dxs = 0.25 * ( at(+1,0,+1) + at(-1,0,-1) - at(-1,0,+1) - at(+1,0,-1) ) ;
Dys = 0.25 * ( at(0,+1,+1) + at(0,-1,-1) - at(0,-1,+1) - at(0,+1,-1) ) ;
// Solve linear system.
Aat(0,0) = Dxx ;
Aat(1,1) = Dyy ;
Aat(2,2) = Dss ;
Aat(0,1) = Aat(1,0) = Dxy ;
Aat(0,2) = Aat(2,0) = Dxs ;
Aat(1,2) = Aat(2,1) = Dys ;
b[0] = - Dx ;
b[1] = - Dy ;
b[2] = - Ds ;
// Gauss elimination
for(int j = 0 ; j < 3 ; ++j) {
// look for leading pivot
VL::float_t maxa = 0 ;
VL::float_t maxabsa = 0 ;
int maxi = -1 ;
int i ;
for(i = j ; i < 3 ; ++i) {
VL::float_t a = Aat(i,j) ;
VL::float_t absa = fabsf( a ) ;
if ( absa > maxabsa ) {
maxa = a ;
maxabsa = absa ;
maxi = i ;
}
}
// singular?
if( maxabsa < 1e-10f ) {
b[0] = 0 ;
b[1] = 0 ;
b[2] = 0 ;
break ;
}
i = maxi ;
// swap j-th row with i-th row and
// normalize j-th row
for(int jj = j ; jj < 3 ; ++jj) {
std::swap( Aat(j,jj) , Aat(i,jj) ) ;
Aat(j,jj) /= maxa ;
}
std::swap( b[j], b[i] ) ;
b[j] /= maxa ;
// elimination
for(int ii = j+1 ; ii < 3 ; ++ii) {
VL::float_t x = Aat(ii,j) ;
for(int jj = j ; jj < 3 ; ++jj) {
Aat(ii,jj) -= x * Aat(j,jj) ;
}
b[ii] -= x * b[j] ;
}
}
// backward substitution
for(int i = 2 ; i > 0 ; --i) {
VL::float_t x = b[i] ;
for(int ii = i-1 ; ii >= 0 ; --ii) {
b[ii] -= x * Aat(ii,i) ;
}
}
// If the translation of the keypoint is big, move the keypoint
// and re-iterate the computation. Otherwise we are all set.
dx= ((b[0] > 0.6 && x < ow-2) ? 1 : 0 )
+ ((b[0] < -0.6 && x > 1 ) ? -1 : 0 ) ;
dy= ((b[1] > 0.6 && y < oh-2) ? 1 : 0 )
+ ((b[1] < -0.6 && y > 1 ) ? -1 : 0 ) ;
// std::cout<<x<<","<<y<<"="<<at(0,0,0) <<"(" <<at(0,0,0)+0.5 * (Dx * b[0] + Dy * b[1] + Ds * b[2])<<")" <<" "<<std::flush ;
if( dx == 0 && dy == 0 ) break ;
}
// Accept-reject keypoint
{
VL::float_t val = at(0,0,0) + 0.5 * (Dx * b[0] + Dy * b[1] + Ds * b[2]) ;
VL::float_t score = (Dxx+Dyy)*(Dxx+Dyy) / (Dxx*Dyy - Dxy*Dxy) ;
VL::float_t xn = x + b[0] ;
VL::float_t yn = y + b[1] ;
VL::float_t sn = s + b[2] ;
if(fast_abs(val) > threshold &&
score < (edgeThreshold+1)*(edgeThreshold+1)/edgeThreshold &&
score >= 0 &&
fast_abs(b[0]) < 1.5 &&
fast_abs(b[1]) < 1.5 &&
fast_abs(b[2]) < 1.5 &&
xn >= 0 &&
xn <= ow-1 &&
yn >= 0 &&
yn <= oh-1 &&
sn >= smin &&
sn <= smax )
{
diter->o = o ;
diter->ix = x ;
diter->iy = y ;
diter->is = s ;
diter->x = xn * xperiod ;
diter->y = yn * xperiod ;
diter->s = sn ;
diter->sigma = getScaleFromIndex(o,sn) ;
++diter ;
// std::cout<<x<<","<<y<<","<<s<<","<<o<<","<<" "<<std::flush;
uu++;
}
}
} // next candidate keypoint
// prepare for next octave
keypoints.resize( diter - keypoints.begin() ) ;
nValidatedKeypoints = keypoints.size() ;
} // refine block
// std::cout<<" " <<" "<<std::endl ;
// std::cout<<" total key number is "<<uu<<std::endl;
uu = 0;
*/
} // next octave
}
// ===================================================================
// computeKeypointOrientations()
// -------------------------------------------------------------------
/** @brief Compute modulus and phase of the gradient
**
** The function computes the modulus and the angle of the gradient of
** the specified octave @a o. The result is stored in a temporary
** internal buffer accessed by computeKeypointDescriptor() and
** computeKeypointOrientations().
**
** The SIFT detector provides keypoint with scale index s in the
** range @c smin+1 and @c smax-2. As such, the buffer contains only
** these levels.
**
** If called mutliple time on the same data, the function exits
** immediately.
**
** @param o octave of interest.
**/
__global__ void GradKernelZ(pixel_t* src, pixel_t* dst, int width, int height, int square)
{
__shared__ pixel_t Ms[G_BLOCK_SIZE][G_BLOCK_SIZE]; //
int tx, ty, bx, by, bz;
//float m, t;
tx = threadIdx.x;
ty = threadIdx.y;
bx = blockIdx.x;
by = blockIdx.y;
bz = blockIdx.z;
int Row = by*G_TILE_SIZE + ty;
int Col = bx*G_TILE_SIZE + tx;
int Dep = bz*square;
if (Row < height && Col < width)
{
Ms[ty][tx] = src[Dep + Row * width + Col];
}
else
{
Ms[ty][tx] = 0.0f;
}
__syncthreads();
if(ty < G_TILE_SIZE && tx < G_TILE_SIZE && Row < (height -1) && Col < (width-1))
{
float_t Gx = 0.5f * (Ms[ty+1][tx+2] - Ms[ty+1][tx]);
float_t Gy = 0.5f * (Ms[ty+2][tx+1] - Ms[ty][tx+1]);
float_t m = sqrt( Gx*Gx + Gy*Gy );
float_t x = atan2(Gy, Gx) + float(2*M_PI);
float_t t = (x >= 0)? fmod (x, float(2*M_PI)) : float(2*M_PI) + fmod (x, float(2*M_PI));
dst[2*Dep + 2*width*(Row + 1) + 2*(Col + 1)] = m;
dst[2*Dep + 2*width*(Row + 1) + 2*(Col + 1) + 1] = t;
}
}
__global__ void GradKernel(pixel_t* src, pixel_t* dst, int width, int height, int square)
{
__shared__ pixel_t Ms[G_BLOCK_SIZE][G_BLOCK_SIZE]; //F_BLOCK_SIZE = F_TILE_SIZE + 2
int tx, ty, bx, by;
//float m, t;
tx = threadIdx.x;
ty = threadIdx.y;
bx = blockIdx.x;
by = blockIdx.y;
int Row = by*G_TILE_SIZE + ty;
int Col = bx*G_TILE_SIZE + tx;
if (Row < height && Col < width)
{
Ms[ty][tx] = src[ Row * width + Col];
}
else
{
Ms[ty][tx] = 0.0f;
}
__syncthreads();
if(ty < G_TILE_SIZE && tx < G_TILE_SIZE && Row < (height -1) && Col < (width-1))
{
float_t Gx = 0.5f * (Ms[ty+1][tx+2] - Ms[ty+1][tx]);
float_t Gy = 0.5f * (Ms[ty+2][tx+1] - Ms[ty][tx+1]);
float_t m = sqrt( Gx*Gx + Gy*Gy );
float_t x = atan2(Gy, Gx) + float(2*M_PI);
float_t t = (x >= 0)? fmod (x, float(2*M_PI)) : float(2*M_PI) + fmod (x, float(2*M_PI));
dst[ 2*width*(Row + 1) + 2*(Col + 1)] = m;
dst[ 2*width*(Row + 1) + 2*(Col + 1) + 1] = t;
}
}
void
Sift::GradinGpu(pixel_t* pt, int o, int width, int height)
{
//int S = smax - smin - 2;
int square = width * height;
//unsigned int dst_size = sizeof(pixel_t) * (2*S*width*height);
// unsigned int src_size = sizeof(pixel_t) * (S*width*height);
unsigned int dst_size = sizeof(pixel_t) * (2*width*height);
unsigned int src_size = sizeof(pixel_t) * (width*height);
pixel_t* src_d = NULL;
pixel_t* dst_d = NULL;
CUDA_SAFE_CALL( cudaMalloc( (void**) &src_d, src_size));
CUDA_SAFE_CALL( cudaMalloc( (void**) &dst_d, dst_size));
dim3 dimBlock, dimGrid;
dimBlock.x = G_BLOCK_SIZE;
dimBlock.y = G_BLOCK_SIZE;
dimGrid.x = ((width-2) / G_TILE_SIZE) + (((width-2) % G_TILE_SIZE) ? 1:0 );
dimGrid.y = ((height-2) / G_TILE_SIZE) + (((height-2) % G_TILE_SIZE) ? 1:0 );
//dimGrid.z = S;
dimGrid.z = 1;
for(int s = smin + 1 ; s <= smax-2 ; ++s)
{
pixel_t* src = getLevel(o, s);
CUDA_SAFE_CALL( cudaMemcpy(src_d, src, src_size, cudaMemcpyHostToDevice) );
GradKernel<<<dimGrid, dimBlock>>>(src_d, dst_d, width, height, square);
CUDA_SAFE_CALL(cudaMemcpy(pt, dst_d, dst_size, cudaMemcpyDeviceToHost));
pt = pt + 2*width*height;
}
cudaFree(src_d);
cudaFree(dst_d);
}
void
Sift::prepareGrad(int o)
{
int const ow = getOctaveWidth(o) ;
int const oh = getOctaveHeight(o) ;
//int const xo = 1 ;
int const yo = ow ;
//int const so = oh*ow ;
if( ! tempIsGrad || tempOctave != o ) {
/*
// compute dx/dy
for(int s = smin+1 ; s <= smax-2 ; ++s) {
for(int y = 1 ; y < oh-1 ; ++y ) {
pixel_t* src = getLevel(o, s) + xo + yo*y ;
pixel_t* end = src + ow - 1 ;
pixel_t* grad = 2 * (xo + yo*y + (s - smin -1)*so) + temp ;
while(src != end) {
VL::float_t Gx = 0.5 * ( *(src+xo) - *(src-xo) ) ;
VL::float_t Gy = 0.5 * ( *(src+yo) - *(src-yo) ) ;
VL::float_t m = fast_sqrt( Gx*Gx + Gy*Gy ) ;
VL::float_t t = fast_mod_2pi( fast_atan2(Gy, Gx) + VL::float_t(2*M_PI) );
*grad++ = pixel_t( m ) ;
*grad++ = pixel_t( t ) ;
++src ;
}
}
}
*/
pixel_t* grad = temp;
GradinGpu(grad, o, yo, oh);
}
tempIsGrad = true ;
tempOctave = o ;
}
__device__ void normalize_histogram(float* L_begin, float* L_end)
{
float* L_iter ;
float norm = 0.0f ;
for(L_iter = L_begin; L_iter != L_end ; ++L_iter)
norm += (*L_iter) * (*L_iter) ;
norm = sqrt(norm) ;
for(L_iter = L_begin; L_iter != L_end ; ++L_iter)
*L_iter /= norm;
// *L_iter /= (norm + numeric_limits<float>::epsilon() ) ;
}
__global__ void GetkKernel(Sift::Keypoint* Kin, pixel_t* Grad, OKvalue* Kout, int Klength, int width, int height, int smin, float xperiod, int magnif)
{
int i;
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = bx * K_BLOCK_SIZE + tx;
VL::float_t angles [4];
int nbins = 36;
//VL::float_t WinFactor = 1.5f;
VL::float_t hist[36];
int ow = width;
int oh = height;
int xo = 2;
int yo = xo * ow;
int so = yo * oh;
if (index < Klength){
VL::float_t x = Kin[index].x / xperiod;
VL::float_t y = Kin[index].y / xperiod;
VL::float_t sigma = Kin[index].sigma / xperiod;
int xi = ((int) (x+0.5)) ;
int yi = ((int) (y+0.5)) ;
int si = Kin[index].is ;
VL::float_t sigmaw = 1.50f * sigma; //winFactor
int Wo = (int) floor(3.0 * sigmaw);
int NBO = 8;
int NBP = 4;
VL::float_t SBP = magnif*sigma;
int Wd = (int) floor (sqrt(2.0) * SBP * (NBP + 1) / 2.0 + 0.5) ;
int binto = 1;
int binyo = NBO*NBP;
int binxo = NBO;
int bin;
for (i = 0; i < nbins; i++)
hist[i] = 0.0f;
pixel_t* pt = Grad + xi * xo + yi * yo + (si - smin -1) * so ;
for(int ys = max(-Wo, 1-yi) ; ys <= min(+Wo, oh -2 -yi) ; ++ys) {
for(int xs = max(-Wo, 1-xi) ; xs <= min(+Wo, ow -2 -xi) ; ++xs) {
VL::float_t dx = xi + xs - x;
VL::float_t dy = yi + ys - y;
VL::float_t r2 = dx*dx + dy*dy ;
if(r2 >= Wo*Wo+0.5) continue ;
VL::float_t wgt = exp(-(r2 / (2*sigmaw*sigmaw))) ;
VL::float_t mod = *(pt + xs*xo + ys*yo) ;
VL::float_t ang = *(pt + xs*xo + ys*yo + 1) ;
int bin = (int) floor( nbins * ang / (2*M_PI) ) ;
hist[bin] += mod * wgt ;
}
}
#if defined VL_LOWE_STRICT
// Lowe's version apparently has a little issue with orientations
// around + or - pi, which we reproduce here for compatibility
for (int iter = 0; iter < 6; iter++) {
VL::float_t prev = hist[nbins/2] ;
for (int i = nbins/2-1; i >= -nbins/2 ; --i) {
int j = (i + nbins) % nbins ;
int jp = (i - 1 + nbins) % nbins ;
VL::float_t newh = (prev + hist[j] + hist[jp]) / 3.0;
prev = hist[j] ;
hist[j] = newh ;
}
}
#else
// this is slightly more correct
for (int iter = 0; iter < 6; iter++) {
VL::float_t prev = hist[nbins-1] ;
VL::float_t first = hist[0] ;
for (i = 0; i < nbins - 1; i++) {
VL::float_t newh = (prev + hist[i] + hist[(i+1) % nbins]) / 3.0;
prev = hist[i] ;
hist[i] = newh ;
}
hist[i] = (prev + hist[i] + first)/3.0 ;
}
#endif
//VL::float_t maxh = * std::max_element(hist, hist + nbins) ;
VL::float_t maxh = 0;
for (int i = 0; i < nbins; i++)
maxh = max(maxh, hist[i]);
int nangles = 0 ;
for(int i = 0 ; i < nbins ; ++i) {
VL::float_t h0 = hist [i] ;
VL::float_t hm = hist [(i-1+nbins) % nbins] ;
VL::float_t hp = hist [(i+1+nbins) % nbins] ;
// is this a peak?
if( h0 > 0.8*maxh && h0 > hm && h0 > hp ){
VL::float_t di = -0.5 * (hp - hm) / (hp+hm-2*h0) ;
VL::float_t th = 2*M_PI * (i+di+0.5) / nbins ;
angles [ nangles ] = th ;
Kout[index].th[nangles] = th;
nangles++;
if( nangles == 4 )
break;
}
}
Kout[index].nangles = nangles;
////**************descriptor section******************//
for(int a = 0 ; a < nangles ; ++a) {
VL::float_t descr_pt[128];
for (int i = 0; i < 128; i ++)
descr_pt[i] = 0.0f;
VL::float_t* dpt = descr_pt + (NBP/2) * binyo + (NBP/2) * binxo;
VL::float_t st0 = sinf( angles[a] ) ;
VL::float_t ct0 = cosf( angles[a] ) ;
#define atd(dbinx,dbiny,dbint) *(dpt + (dbint)*binto + (dbiny)*binyo + (dbinx)*binxo)
for(int dyi = max(-Wd, 1-yi) ; dyi <= min(+Wd, oh-2-yi) ; ++dyi) {
for(int dxi = max(-Wd, 1-xi) ; dxi <= min(+Wd, ow-2-xi) ; ++dxi) {
VL::float_t mod = *( pt + dxi*xo + dyi*yo + 0 ) ;
VL::float_t angle = *( pt + dxi*xo + dyi*yo + 1 ) ;
//VL::float_t x = (angles[a] - angle) ;
VL::float_t theta = ((angles[a] - angle) >= 0)? fmod ((angles[a] - angle), float(2*M_PI)) : float(2*M_PI) + fmod ((angles[a] - angle), float(2*M_PI)); // lowe compatible ?
VL::float_t dx = xi + dxi - x;
VL::float_t dy = yi + dyi - y;
// get the displacement normalized w.r.t. the keypoint
// orientation and extension.
VL::float_t nx = ( ct0 * dx + st0 * dy) / SBP ;
VL::float_t ny = (-st0 * dx + ct0 * dy) / SBP ;
VL::float_t nt = NBO * theta / (2*M_PI) ;
// Get the gaussian weight of the sample. The gaussian window
// has a standard deviation equal to NBP/2. Note that dx and dy
// are in the normalized frame, so that -NBP/2 <= dx <= NBP/2.
VL::float_t const wsigma = NBP/2 ;
VL::float_t win = exp(-((nx*nx + ny*ny)/(2.0 * wsigma * wsigma))) ;
// The sample will be distributed in 8 adjacent bins.
// We start from the ``lower-left'' bin.
int binx = floor( nx - 0.5 ) ;
int biny = floor( ny - 0.5 ) ;
int bint = floor( nt ) ;
VL::float_t rbinx = nx - (binx+0.5) ;
VL::float_t rbiny = ny - (biny+0.5) ;
VL::float_t rbint = nt - bint ;
int dbinx ;
int dbiny ;
int dbint ;
// Distribute the current sample into the 8 adjacent bins
for(dbinx = 0 ; dbinx < 2 ; ++dbinx)
for(dbiny = 0 ; dbiny < 2 ; ++dbiny)
for(dbint = 0 ; dbint < 2 ; ++dbint)
if( binx+dbinx >= -(NBP/2) && binx+dbinx < (NBP/2) && biny+dbiny >= -(NBP/2) && biny+dbiny < (NBP/2) ) {
VL::float_t weight = win * mod * abs (1 - dbinx - rbinx) * abs (1 - dbiny - rbiny) * abs (1 - dbint - rbint) ;
atd(binx+dbinx, biny+dbiny, (bint+dbint) % NBO) += weight ;
}
}
}
//if( normalizeDescriptor ) {
normalize_histogram(descr_pt, descr_pt + NBO*NBP*NBP) ;
for(bin = 0; bin < NBO*NBP*NBP ; ++bin) {
if (descr_pt[bin] > 0.2) descr_pt[bin] = 0.2;
}
normalize_histogram(descr_pt, descr_pt + NBO*NBP*NBP) ;
//}
for (int i = 0; i < 128; i ++)
Kout[index].descr_pt[a*128 + i ] = descr_pt[i];
}
}
}
/** @brief Compute the orientation(s) of a keypoint
**
** The function computes the orientation of the specified keypoint.
** The function returns up to four different orientations, obtained
** as strong peaks of the histogram of gradient orientations (a
** keypoint can theoretically generate more than four orientations,
** but this is very unlikely).
**
** @remark The function needs to compute the gradient modululs and
** orientation of the Gaussian scale space octave to which the
** keypoint belongs. The result is cached, but discarded if different
** octaves are visited. Thereofre it is much quicker to evaluate the
** keypoints in their natural octave order.
**
** The keypoint must lie within the scale space. In particular, the
** scale index is supposed to be in the range @c smin+1 and @c smax-1
** (this is from the SIFT detector). If this is not the case, the
** computation is silently aborted and no orientations are returned.
**
** @param angles buffers to store the resulting angles.
** @param keypoint keypoint to process.
** @return number of orientations found.
**/
int
Sift::computeKeypointOrientations(VL::float_t angles [4], Keypoint keypoint)
{
int const nbins = 36 ;
VL::float_t const winFactor = 1.5 ;
VL::float_t hist [nbins] ;
// octave
int o = keypoint.o ;
VL::float_t xperiod = getOctaveSamplingPeriod(o) ;
// offsets to move in the Gaussian scale space octave
const int ow = getOctaveWidth(o) ;
const int oh = getOctaveHeight(o) ;
const int xo = 2 ;
const int yo = xo * ow ;
const int so = yo * oh ;
// keypoint fractional geometry
VL::float_t x = keypoint.x / xperiod ;
VL::float_t y = keypoint.y / xperiod ;
VL::float_t sigma = keypoint.sigma / xperiod ;
// shall we use keypoints.ix,iy,is here?
int xi = ((int) (x+0.5)) ;
int yi = ((int) (y+0.5)) ;
int si = keypoint.is ;
VL::float_t const sigmaw = winFactor * sigma ;
int W = (int) floor(3.0 * sigmaw) ;
// skip the keypoint if it is out of bounds
if(o < omin ||
o >=omin+O ||
xi < 0 ||
xi > ow-1 ||
yi < 0 ||
yi > oh-1 ||
si < smin+1 ||
si > smax-2 ) {
std::cerr<<"!"<<std::endl ;
return 0 ;
}
// make sure that the gradient buffer is filled with octave o
prepareGrad(o) ;
// clear the SIFT histogram
std::fill(hist, hist + nbins, 0) ;
// fill the SIFT histogram
pixel_t* pt = temp + xi * xo + yi * yo + (si - smin -1) * so ;
#undef at
#define at(dx,dy) (*(pt + (dx)*xo + (dy)*yo))
for(int ys = std::max(-W, 1-yi) ; ys <= std::min(+W, oh -2 -yi) ; ++ys) {
for(int xs = std::max(-W, 1-xi) ; xs <= std::min(+W, ow -2 -xi) ; ++xs) {
VL::float_t dx = xi + xs - x;
VL::float_t dy = yi + ys - y;
VL::float_t r2 = dx*dx + dy*dy ;
// limit to a circular window
if(r2 >= W*W+0.5) continue ;
VL::float_t wgt = VL::fast_expn( r2 / (2*sigmaw*sigmaw) ) ;
VL::float_t mod = *(pt + xs*xo + ys*yo) ;
VL::float_t ang = *(pt + xs*xo + ys*yo + 1) ;
// int bin = (int) floor( nbins * ang / (2*M_PI) ) ;
int bin = (int) floor( nbins * ang / (2*M_PI) ) ;
hist[bin] += mod * wgt ;
}
}
// smooth the histogram
#if defined VL_LOWE_STRICT
// Lowe's version apparently has a little issue with orientations
// around + or - pi, which we reproduce here for compatibility
for (int iter = 0; iter < 6; iter++) {
VL::float_t prev = hist[nbins/2] ;
for (int i = nbins/2-1; i >= -nbins/2 ; --i) {
int const j = (i + nbins) % nbins ;
int const jp = (i - 1 + nbins) % nbins ;
VL::float_t newh = (prev + hist[j] + hist[jp]) / 3.0;
prev = hist[j] ;
hist[j] = newh ;
}
}
#else
// this is slightly more correct
for (int iter = 0; iter < 6; iter++) {
VL::float_t prev = hist[nbins-1] ;
VL::float_t first = hist[0] ;
int i ;
for (i = 0; i < nbins - 1; i++) {
VL::float_t newh = (prev + hist[i] + hist[(i+1) % nbins]) / 3.0;
prev = hist[i] ;
hist[i] = newh ;
}
hist[i] = (prev + hist[i] + first)/3.0 ;
}
#endif
// find the histogram maximum
VL::float_t maxh = * std::max_element(hist, hist + nbins) ;
// find peaks within 80% from max
int nangles = 0 ;
for(int i = 0 ; i < nbins ; ++i) {
VL::float_t h0 = hist [i] ;
VL::float_t hm = hist [(i-1+nbins) % nbins] ;
VL::float_t hp = hist [(i+1+nbins) % nbins] ;
// is this a peak?
if( h0 > 0.8*maxh && h0 > hm && h0 > hp ) {
// quadratic interpolation
// VL::float_t di = -0.5 * (hp - hm) / (hp+hm-2*h0) ;
VL::float_t di = -0.5 * (hp - hm) / (hp+hm-2*h0) ;
VL::float_t th = 2*M_PI * (i+di+0.5) / nbins ;
angles [ nangles++ ] = th ;
if( nangles == 4 )
goto enough_angles ;
}
}
enough_angles:
return nangles ;
}
// ===================================================================
// computeKeypointDescriptor()
// -------------------------------------------------------------------
namespace Detail {
/** Normalizes in norm L_2 a descriptor. */
void
normalize_histogram(VL::float_t* L_begin, VL::float_t* L_end)
{
VL::float_t* L_iter ;
VL::float_t norm = 0.0 ;
for(L_iter = L_begin; L_iter != L_end ; ++L_iter)
norm += (*L_iter) * (*L_iter) ;
norm = fast_sqrt(norm) ;
for(L_iter = L_begin; L_iter != L_end ; ++L_iter)
*L_iter /= (norm + std::numeric_limits<VL::float_t>::epsilon() ) ;
}
}
/** @brief SIFT descriptor
**
** The function computes the descriptor of the keypoint @a keypoint.
** The function fills the buffer @a descr_pt which must be large
** enough. The funciton uses @a angle0 as rotation of the keypoint.
** By calling the function multiple times, different orientations can
** be evaluated.
**
** @remark The function needs to compute the gradient modululs and
** orientation of the Gaussian scale space octave to which the
** keypoint belongs. The result is cached, but discarded if different
** octaves are visited. Thereofre it is much quicker to evaluate the
** keypoints in their natural octave order.
**
** The function silently abort the computations of keypoints without
** the scale space boundaries. See also siftComputeOrientations().
**/
void
Sift::computeKeypointDescriptor
(VL::float_t* descr_pt,
Keypoint keypoint,
VL::float_t angle0)
{
/* The SIFT descriptor is a three dimensional histogram of the position
* and orientation of the gradient. There are NBP bins for each spatial
* dimesions and NBO bins for the orientation dimesion, for a total of
* NBP x NBP x NBO bins.
*
* The support of each spatial bin has an extension of SBP = 3sigma
* pixels, where sigma is the scale of the keypoint. Thus all the bins
* together have a support SBP x NBP pixels wide . Since weighting and
* interpolation of pixel is used, another half bin is needed at both
* ends of the extension. Therefore, we need a square window of SBP x
* (NBP + 1) pixels. Finally, since the patch can be arbitrarly rotated,
* we need to consider a window 2W += sqrt(2) x SBP x (NBP + 1) pixels
* wide.
*/
// octave
int o = keypoint.o ;
VL::float_t xperiod = getOctaveSamplingPeriod(o) ;
// offsets to move in Gaussian scale space octave
const int ow = getOctaveWidth(o) ;
const int oh = getOctaveHeight(o) ;
const int xo = 2 ;
const int yo = xo * ow ;
const int so = yo * oh ;
// keypoint fractional geometry
VL::float_t x = keypoint.x / xperiod;
VL::float_t y = keypoint.y / xperiod ;
VL::float_t sigma = keypoint.sigma / xperiod ;
VL::float_t st0 = sinf( angle0 ) ;
VL::float_t ct0 = cosf( angle0 ) ;
// shall we use keypoints.ix,iy,is here?
int xi = ((int) (x+0.5)) ;
int yi = ((int) (y+0.5)) ;
int si = keypoint.is ;
// const VL::float_t magnif = 3.0f ;
const int NBO = 8 ;
const int NBP = 4 ;
const VL::float_t SBP = magnif * sigma ;
const int W = (int) floor (sqrt(2.0) * SBP * (NBP + 1) / 2.0 + 0.5) ;
/* Offsets to move in the descriptor. */
/* Use Lowe's convention. */
const int binto = 1 ;
const int binyo = NBO * NBP ;
const int binxo = NBO ;
// const int bino = NBO * NBP * NBP ;
int bin ;
// check bounds
if(o < omin ||
o >=omin+O ||
xi < 0 ||
xi > ow-1 ||
yi < 0 ||
yi > oh-1 ||
si < smin+1 ||
si > smax-2 )
return ;
// make sure gradient buffer is up-to-date
prepareGrad(o) ;
std::fill( descr_pt, descr_pt + NBO*NBP*NBP, 0 ) ;
/* Center the scale space and the descriptor on the current keypoint.
* Note that dpt is pointing to the bin of center (SBP/2,SBP/2,0).
*/
pixel_t const * pt = temp + xi*xo + yi*yo + (si - smin - 1)*so ;
VL::float_t * dpt = descr_pt + (NBP/2) * binyo + (NBP/2) * binxo ;
#define atd(dbinx,dbiny,dbint) *(dpt + (dbint)*binto + (dbiny)*binyo + (dbinx)*binxo)
/*
* Process pixels in the intersection of the image rectangle
* (1,1)-(M-1,N-1) and the keypoint bounding box.
*/
for(int dyi = std::max(-W, 1-yi) ; dyi <= std::min(+W, oh-2-yi) ; ++dyi) {
for(int dxi = std::max(-W, 1-xi) ; dxi <= std::min(+W, ow-2-xi) ; ++dxi) {
// retrieve
VL::float_t mod = *( pt + dxi*xo + dyi*yo + 0 ) ;
VL::float_t angle = *( pt + dxi*xo + dyi*yo + 1 ) ;
VL::float_t theta = fast_mod_2pi(-angle + angle0) ; // lowe compatible ?
// fractional displacement
VL::float_t dx = xi + dxi - x;
VL::float_t dy = yi + dyi - y;
// get the displacement normalized w.r.t. the keypoint
// orientation and extension.
VL::float_t nx = ( ct0 * dx + st0 * dy) / SBP ;
VL::float_t ny = (-st0 * dx + ct0 * dy) / SBP ;
VL::float_t nt = NBO * theta / (2*M_PI) ;
// Get the gaussian weight of the sample. The gaussian window
// has a standard deviation equal to NBP/2. Note that dx and dy
// are in the normalized frame, so that -NBP/2 <= dx <= NBP/2.
VL::float_t const wsigma = NBP/2 ;
VL::float_t win = VL::fast_expn((nx*nx + ny*ny)/(2.0 * wsigma * wsigma)) ;
// The sample will be distributed in 8 adjacent bins.
// We start from the ``lower-left'' bin.
int binx = fast_floor( nx - 0.5 ) ;
int biny = fast_floor( ny - 0.5 ) ;
int bint = fast_floor( nt ) ;
VL::float_t rbinx = nx - (binx+0.5) ;
VL::float_t rbiny = ny - (biny+0.5) ;
VL::float_t rbint = nt - bint ;
int dbinx ;
int dbiny ;
int dbint ;
// Distribute the current sample into the 8 adjacent bins
for(dbinx = 0 ; dbinx < 2 ; ++dbinx) {
for(dbiny = 0 ; dbiny < 2 ; ++dbiny) {
for(dbint = 0 ; dbint < 2 ; ++dbint) {
if( binx+dbinx >= -(NBP/2) &&
binx+dbinx < (NBP/2) &&
biny+dbiny >= -(NBP/2) &&
biny+dbiny < (NBP/2) ) {
VL::float_t weight = win
* mod
* fast_abs (1 - dbinx - rbinx)
* fast_abs (1 - dbiny - rbiny)
* fast_abs (1 - dbint - rbint) ;
atd(binx+dbinx, biny+dbiny, (bint+dbint) % NBO) += weight ;
}
}
}
}
}
}
/* Standard SIFT descriptors are normalized, truncated and normalized again */
if( normalizeDescriptor ) {
/* Normalize the histogram to L2 unit length. */
Detail::normalize_histogram(descr_pt, descr_pt + NBO*NBP*NBP) ;
/* Truncate at 0.2. */
for(bin = 0; bin < NBO*NBP*NBP ; ++bin) {
if (descr_pt[bin] > 0.2) descr_pt[bin] = 0.2;
}
/* Normalize again. */
Detail::normalize_histogram(descr_pt, descr_pt + NBO*NBP*NBP) ;
}
}
// namespace VL
}
|
a807ce7fd5eff9c35b454a46f9988c08274c2aa3.hip | // !!! This is a file automatically generated by hipify!!!
/*!
@file rx_cu_common.cu
@brief CUDA
*/
// FILE --rx_cu_common.cu--
#ifndef _RX_CU_COMMON_CU_
#define _RX_CU_COMMON_CU_
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
#include <stdio.h>
#include <math.h>
#include "helper_math.h"
#include <math_constants.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include "rx_cu_common.cuh"
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
__device__ __host__
inline uint calUintPow(uint x, uint y)
{
uint x_y = 1;
for(uint i=0; i < y;i++) x_y *= x;
return x_y;
}
/*!
* a/b
* @param[in] a,b a/b
* @return
*/
__device__ __host__
inline uint DivCeil(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
/*!
* [a,b]
* @param[in] x
* @param[in] a,b
* @return
*/
__device__
inline float CuClamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__
inline int CuClamp(int x, int a, int b)
{
return max(a, min(b, x));
}
/*!
* for float3
* @param[in] v
*/
__device__
inline int CuIsZero(float3 v)
{
if(fabsf(v.x) < 1.0e-10 && fabsf(v.y) < 1.0e-10 && fabsf(v.z) < 1.0e-10){
return 1;
}
else{
return 0;
}
}
/*!
*
* @param[in] m 3x3
* @param[in] v 3D
* @return
*/
__device__
inline float3 CuMulMV(matrix3x3 m, float3 v)
{
return make_float3(dot(m.e[0], v), dot(m.e[1], v), dot(m.e[2], v));
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
#ifdef RX_USE_ATOMIC_FUNC
/*!
* floatatomicAdd
*/
__device__
inline void atomicFloatAdd(float *address, float val)
{
int i_val = __float_as_int(val);
int tmp0 = 0;
int tmp1;
while( (tmp1 = atomicCAS((int *)address, tmp0, i_val)) != tmp0)
{
tmp0 = tmp1;
i_val = __float_as_int(val + __int_as_float(tmp1));
}
}
/*!
* doubleatomicAdd
*/
__device__
inline double atomicDoubleAdd(double *address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val+__longlong_as_double(assumed)));
}while(assumed != old);
return __longlong_as_double(old);
}
/*!
* floatatomicMin
*/
__device__
inline float atomicFloatMin(float *address, float val)
{
int *address_as_int = (int*)address;
int old = atomicMin(address_as_int, __float_as_int(val));
return __int_as_float(old);
}
/*!
* floatatomicMax
*/
__device__
inline float atomicFloatMax(float *address, float val)
{
int *address_as_int = (int*)address;
int old = atomicMax(address_as_int, __float_as_int(val));
return __int_as_float(old);
}
#endif // #ifdef RX_USE_ATOMIC_FUNC
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
/*!
* 1D3D()
* @param[in] i 1D
* @param[in] gridSize
* @return 3D
*/
__device__
inline uint3 calcGridPosU(uint i, uint3 ngrid)
{
uint3 gridPos;
uint w = i%(ngrid.x*ngrid.y);
gridPos.x = w%ngrid.x;
gridPos.y = w/ngrid.x;
gridPos.z = i/(ngrid.x*ngrid.y);
return gridPos;
}
/*!
* 3D1D()
* @param[in] p 3D
* @param[in] gridSize
* @return 1D
*/
__device__
inline uint calcGridPos3(uint3 p, uint3 ngrid)
{
p.x = min(p.x, ngrid.x-1);
p.y = min(p.y, ngrid.y-1);
p.z = min(p.z, ngrid.z-1);
return (p.z*ngrid.x*ngrid.y)+(p.y*ngrid.x)+p.x;
}
//-----------------------------------------------------------------------------
// CWT
//-----------------------------------------------------------------------------
/*!
*
* @param[in] t
* @return
*/
__device__
inline float MexicanHat(float t)
{
t = t*t;
return MEXICAN_HAT_C*(1.0-t)*exp(-t/2.0);
}
__device__
inline float MexicanHatIm(float t)
{
return 0.0f;
}
/*!
* ()
* @param[in] w
* @return
*/
__device__
inline float MexicanHatWave(float w)
{
w = w*w;
return MEXICAN_HAT_C*M_SQRT2PI*w*exp(-w/2.0);
}
inline float MexicanHatWaveIm(float w)
{
return 0.0f;
}
/*!
* (2D)
* @param[in] x,y
* @return
*/
__device__
inline float MexicanHat2D(float x, float y)
{
x = x*x;
y = y*y;
return MEXICAN_HAT_C*(x+y-2)*exp(-(x+y)/2.0);
}
__device__
inline float MexicanHat2DIm(float x, float y)
{
return 0.0f;
}
/*!
* (3D)
* @param[in] x,y
* @return
*/
__device__ __host__
inline float MexicanHat3D(float x, float y, float z)
{
x = x*x;
y = y*y;
z = z*z;
return MEXICAN_HAT_C*(x+y+z-3.0f)*exp(-(x+y+z)/2.0f);
}
__device__ __host__
inline float MexicanHat3DIm(float x, float y)
{
return 0.0f;
}
__device__
inline int Mod(int x, int n)
{
int m = (int)fmodf((float)x, (float)n);
return ((m < 0) ? m+n : m);
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT];
static mt_struct_stripped h_MT[MT_RNG_COUNT];
/*!
* Mersenne Twister (CUDA)
* @param[out] d_Random
* @param[in] NPerRng
*/
__global__
static void RandomGPU(float *d_Random, int NPerRng)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN];
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){
//Load bit-vector Mersenne Twister parameters
mt_struct_stripped config = ds_MT[iRng];
//Initialize current state
mt[0] = config.seed;
for(iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for(iOut = 0; iOut < NPerRng; iOut++){
//iState1 = (iState + 1) % MT_NN
//iStateM = (iState + MT_MM) % MT_NN
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & config.mask_b;
x ^= (x << MT_SHIFTC) & config.mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f;
}
}
}
// (C)
__device__ static unsigned int randx = 1;
__device__
inline void Srand(unsigned int s)
{
randx = s;
}
__device__
inline unsigned int Rand()
{
randx = randx*1103515245+12345;
return randx&2147483647;
}
__device__
inline unsigned int Rand2(unsigned int x)
{
x = x*1103515245+12345;
return x&2147483647;
}
#define RAND2_MAX (2147483647)
// XORShift
__device__ static unsigned long xors_x = 123456789;
__device__ static unsigned long xors_y = 362436069;
__device__ static unsigned long xors_z = 521288629;
__device__ static unsigned long xors_w = 88675123;
/*!
* G. Marsaglia, "Xorshift RNGs", Journal of Statistical Software, Vol. 8(14), pp.1-6, 2003.
* - http://www.jstatsoft.org/v08/i14/
* @param[in]
* @return
*/
__device__
inline unsigned long Xorshift128()
{
unsigned long t;
t = (xors_x^(xors_x<<11));
xors_x = xors_y; xors_y = xors_z; xors_z = xors_w;
return ( xors_w = (xors_w^(xors_w>>19))^(t^(t>>8)) );
}
__device__
inline long Xorshift128(long l, long h)
{
unsigned long t;
t = (xors_x^(xors_x<<11));
xors_x = xors_y; xors_y = xors_z; xors_z = xors_w;
xors_w = (xors_w^(xors_w>>19))^(t^(t>>8));
return l+(xors_w%(h-l));
}
__device__
inline float XorFrand(float l, float h)
{
return l+(h-l)*(Xorshift128(0, 1000000)/1000000.0f);
}
__device__
inline void Random(float2 &x, float a, float b)
{
x.x = XorFrand(a, b);
x.y = XorFrand(a, b);
}
__device__
inline void Random(float3 &x, float a, float b)
{
x.x = XorFrand(a, b);
x.y = XorFrand(a, b);
x.z = XorFrand(a, b);
}
//
__device__
inline float GaussianNoise(void)
{
float x1, x2;
float ret;
float r2;
do {
x1 = 2.0 * XorFrand(0.0, 1.0-(1e-10)) - 1.0; /* [-1, 1) */
x2 = 2.0 * XorFrand(0.0, 1.0-(1e-10)) - 1.0;
r2 = x1*x1 + x2*x2;
} while ((r2 == 0) || (r2 > 1.0));
ret = x1 * sqrtf((-2.0 * logf(r2))/r2);
ret *= 0.25; // Possibility of ( N(0, 1) < 4.0 ) = 100%
if (ret < -1.0) ret = -1.0; /* Account for loss of precision. */
if (ret > 1.0) ret = 1.0;
return ret;
}
//-----------------------------------------------------------------------------
//
//-----------------------------------------------------------------------------
/*!
* (2D, A)
* @param[in] A,B
* @param[in] C
* @param[in] r
* @param[out] P
* @return
*/
__device__
static int CuLineCircleIntersection(float2 A, float2 B, float2 C, float r, float2 P[2], float t[2])
{
float rr = r*r;
float2 AC = C-A;
float2 BC = C-B;
float2 v = B-A;
float l = length(v);
v /= l;
float td = dot(v, AC);
float2 D = A+td*v;
float dd = dot(D-C, D-C);
if(dd < rr){
float dt = sqrtf(rr-dd);
float da = rr-dot(AC, AC);
float db = rr-dot(BC, BC);
int inter = 0;
float t1 = td-dt;
float t2 = td+dt;
if(t1 >= 0 && t1 <= l){
P[inter] = A+t1*v;
t[inter] = t1;
inter++;
}
if(t2 >= 0 && t2 <= l){
P[inter] = A+t2*v;
t[inter] = t2;
inter++;
}
return inter;
}
else{
return 0;
}
}
/*!
* AABB
* @param[in] spos
* @param[in] r
* @param[in] sgn
* @param[in] box_min,box_max AABB
* @param[out] cp AABB
* @param[out] d AABB
* @param[out] n
*/
__device__
inline int collisionSphereAABB(float3 spos, float r, int sgn, float3 box_min, float3 box_max, float3 &cp, float &d, float3 &n)
{
float3 dist_min; // box_min
float3 dist_max; // box_max
float d0 = 0.0f;
float3 n0 = make_float3(0.0f, 0.0f, 0.0f);
int bout = 0;
int count = 0;
//
if((dist_min.x = (spos.x-r)-box_min.x) < 0.0){ bout |= 0x0001; count++; d0 = dist_min.x; n0 = make_float3( 1.0, 0.0, 0.0);}
if((dist_min.y = (spos.y-r)-box_min.y) < 0.0){ bout |= 0x0002; count++; d0 = dist_min.y; n0 = make_float3( 0.0, 1.0, 0.0);}
if((dist_min.z = (spos.z-r)-box_min.z) < 0.0){ bout |= 0x0004; count++; d0 = dist_min.z; n0 = make_float3( 0.0, 0.0, 1.0);}
if((dist_max.x = box_max.x-(spos.x+r)) < 0.0){ bout |= 0x0008; count++; d0 = dist_max.x; n0 = make_float3(-1.0, 0.0, 0.0);}
if((dist_max.y = box_max.y-(spos.y+r)) < 0.0){ bout |= 0x0010; count++; d0 = dist_max.y; n0 = make_float3( 0.0, -1.0, 0.0);}
if((dist_max.z = box_max.z-(spos.z+r)) < 0.0){ bout |= 0x0020; count++; d0 = dist_max.z; n0 = make_float3( 0.0, 0.0, -1.0);}
// ()
if(bout == 0){
float min_d = 1e10;
if(dist_min.x < min_d){ min_d = dist_min.x; n = make_float3( 1.0, 0.0, 0.0); }
if(dist_min.y < min_d){ min_d = dist_min.y; n = make_float3( 0.0, 1.0, 0.0); }
if(dist_min.z < min_d){ min_d = dist_min.z; n = make_float3( 0.0, 0.0, 1.0); }
if(dist_max.x < min_d){ min_d = dist_max.x; n = make_float3(-1.0, 0.0, 0.0); }
if(dist_max.y < min_d){ min_d = dist_max.y; n = make_float3( 0.0, -1.0, 0.0); }
if(dist_max.z < min_d){ min_d = dist_max.z; n = make_float3( 0.0, 0.0, -1.0); }
d = (float)sgn*min_d;
n *= (float)sgn;
cp = spos+n*fabs(d);
return 1;
}
//
// sgn = 1:-1:
if(count == 1){
//
d = (float)sgn*d0;
n = (float)sgn*n0;
cp = spos+n*fabs(d);
}
else{
// /
float3 x = make_float3(0.0f, 0.0f, 0.0f);
if(bout & 0x0001) x.x = dist_min.x;
if(bout & 0x0002) x.y = dist_min.y;
if(bout & 0x0004) x.z = dist_min.z;
if(bout & 0x0008) x.x = -dist_max.x;
if(bout & 0x0010) x.y = -dist_max.y;
if(bout & 0x0020) x.z = -dist_max.z;
d = length(x);
n = normalize(x);
d *= -(float)sgn;
n *= -(float)sgn;
cp = spos+n*fabs(d);
float3 disp = make_float3(0.00001);
//Random(disp, 0, 0.00001);
disp = disp*n;
cp += disp;
}
return 0;
}
/*!
* AABB
* @param[in] p
* @param[in] box_cen AABB
* @param[in] box_ext AABB1/2
* @param[out] cp AABB
* @param[out] d AABB
* @param[out] n
*/
__device__
inline int collisionPointAABB(float3 p, float3 box_cen, float3 box_ext, float3 &cp, float &d, float3 &n)
{
cp = p-box_cen;
float3 tmp = fabs(cp)-box_ext;
float res = ((tmp.x > tmp.y && tmp.x > tmp.z) ? tmp.x : (tmp.y > tmp.z ? tmp.y : tmp.z));
float sgn = (res > 0.0) ? -1.0 : 1.0;
int coli = 0;
n = make_float3(0.0f);
if(cp.x > box_ext.x){
cp.x = box_ext.x;
n.x -= 1.0;
coli++;
}
else if(cp.x < -box_ext.x){
cp.x = -box_ext.x;
n.x += 1.0;
coli++;
}
if(cp.y > box_ext.y){
cp.y = box_ext.y;
n.y -= 1.0;
coli++;
}
else if(cp.y < -box_ext.y){
cp.y = -box_ext.y;
n.y += 1.0;
coli++;
}
if(cp.z > box_ext.z){
cp.z = box_ext.z;
n.z -= 1.0;
coli++;
}
else if(cp.z < -box_ext.z){
cp.z = -box_ext.z;
n.z += 1.0;
coli++;
}
n = normalize(n);
//if(coli > 1){
// float3 disp;
// Random(disp, 0, 0.00001);
// disp = disp*n;
// cp += disp;
//}
cp += box_cen;
d = sgn*length(cp-p);
return 0;
}
/*!
* BOX
* @param[in] p
* @param[in] box_cen BOX
* @param[in] box_ext BOX1/2
* @param[in] box_rot BOX(3x3)
* @param[in] box_inv_rot BOX(3x3)
* @param[out] cp BOX
* @param[out] d BOX
* @param[out] n
*/
__device__
inline int collisionPointBox(float3 p, float3 box_cen, float3 box_ext, matrix3x3 box_rot, matrix3x3 box_inv_rot, float3 &cp, float &d, float3 &n)
{
cp = p-box_cen;
cp = CuMulMV(box_rot, cp);
float3 tmp = fabs(cp)-box_ext;
int coli = 0;
n = make_float3(0.0f);
if(tmp.x < 0.0 && tmp.y < 0.0 && tmp.z < 0.0){
tmp = fabs(tmp);
if(tmp.x <= tmp.y && tmp.x <= tmp.z){ // x
if(cp.x > 0){
cp.x = box_ext.x;
n.x += 1.0;
}
else{
cp.x = -box_ext.x;
n.x -= 1.0;
}
}
else if(tmp.y <= tmp.x && tmp.y <= tmp.z){ // y
if(cp.y > 0){
cp.y = box_ext.y;
n.y += 1.0;
}
else{
cp.y = -box_ext.y;
n.y -= 1.0;
}
}
else{ // z
if(cp.z > 0){
cp.z = box_ext.z;
n.z += 1.0;
}
else{
cp.z = -box_ext.z;
n.z -= 1.0;
}
}
coli++;
}
cp = CuMulMV(box_inv_rot, cp);
n = CuMulMV(box_inv_rot, n);
n = normalize(n);
cp += box_cen;
float sgn = (coli) ? -1.0 : 1.0;
d = sgn*(length(cp-p));
return 0;
}
/*!
*
* @param[in] p
* @param[in] sphere_cen
* @param[in] sphere_rad
* @param[out] cp
* @param[out] d
* @param[out] n
*/
__device__
inline int collisionPointSphere(float3 p, float3 sphere_cen, float sphere_rad, float3 &cp, float &d, float3 &n)
{
n = make_float3(0.0f);
float3 l = p-sphere_cen;
float ll = length(l);
d = ll-sphere_rad;
if(d < 0.0){
n = normalize(p-sphere_cen);
cp = sphere_cen+n*sphere_rad;
}
return 0;
}
/*!
*
* @param[in] v
* @param[in] px
* @param[in] pn
* @return
*/
__device__
inline float distPointPlane(float3 v, float3 px, float3 pn)
{
return dot((v-px), pn)/length(pn);
}
/*!
*
* @param[in] v0,v1,v2
* @param[in] n
* @param[in] p
* @return
*/
__device__
inline int distPointTriangle(float3 v0, float3 v1, float3 v2, float3 n, float3 p, float &dist, float3 &p0)
{
//
float l = distPointPlane(p, v0, n);
//
float3 np = p-l*n;
//
float3 n1 = cross((v0-p), (v1-p));
float3 n2 = cross((v1-p), (v2-p));
float3 n3 = cross((v2-p), (v0-p));
if(dot(n1, n2) > 0 && dot(n2, n3) > 0){
//
dist = l;
p0 = np;
return 1;
}
else{
//
return 0;
}
}
/*!
* /
* @param[in] P0,P1 /or
* @param[in] V0,V1,V2
* @param[out] I
* @retval 1 I
* @retval 0
* @retval 2
* @retval -1 "degenerate"(0)
*/
inline __device__
int intersectSegmentTriangle(float3 P0, float3 P1,
float3 V0, float3 V1, float3 V2,
float3 &I, float3 &n, float rp = 0.01)
{
//
float3 u = V1-V0;
float3 v = V2-V0;
n = normalize(cross(u, v));
if(CuIsZero(n)){
return -1; // "degenerate"(0)
}
//
float3 dir = P1-P0;
float a = dot(n, P0-V0);
float b = dot(n, dir);
if(fabs(b) < 1e-10){ //
if(a == 0){
return 2; //
}
else{
return 0; //
}
}
//
// 2
float r = -a/b;
if(r < 0.0 || fabs(a) > fabs(b) || b > 0){
return 0;
}
//if(r < 0.0){
// return 0;
//}
//else{
// if(fabs(a) > fabs(b)){
// return 0;
// }
// else{
// if(b > 0){
// return 0;
// }
// }
//}
//
I = P0+r*dir;
//
float uu, uv, vv, wu, wv, D;
uu = dot(u, u);
uv = dot(u, v);
vv = dot(v, v);
float3 w = I-V0;
wu = dot(w, u);
wv = dot(w, v);
D = uv*uv-uu*vv;
float s, t;
s = (uv*wv-vv*wu)/D;
if(s < 0.0 || s > 1.0){
return 0;
}
t = (uv*wu-uu*wv)/D;
if(t < 0.0 || (s+t) > 1.0){
return 0;
}
return 1;
}
#endif // #ifndef _RX_CU_COMMON_CU_
| a807ce7fd5eff9c35b454a46f9988c08274c2aa3.cu | /*!
@file rx_cu_common.cu
@brief CUDA共通デバイス関数
*/
// FILE --rx_cu_common.cu--
#ifndef _RX_CU_COMMON_CU_
#define _RX_CU_COMMON_CU_
//-----------------------------------------------------------------------------
// インクルードファイル
//-----------------------------------------------------------------------------
#include <stdio.h>
#include <math.h>
#include "helper_math.h"
#include <math_constants.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include "rx_cu_common.cuh"
//-----------------------------------------------------------------------------
// 関数
//-----------------------------------------------------------------------------
__device__ __host__
inline uint calUintPow(uint x, uint y)
{
uint x_y = 1;
for(uint i=0; i < y;i++) x_y *= x;
return x_y;
}
/*!
* a/bの計算結果を切り上げ
* @param[in] a,b a/b
* @return 切り上げた除算結果
*/
__device__ __host__
inline uint DivCeil(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
/*!
* [a,b]にクランプ
* @param[in] x クランプしたい数値
* @param[in] a,b クランプ境界
* @return クランプされた数値
*/
__device__
inline float CuClamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__
inline int CuClamp(int x, int a, int b)
{
return max(a, min(b, x));
}
/*!
* ゼロ判定 for float3
* @param[in] v 値
*/
__device__
inline int CuIsZero(float3 v)
{
if(fabsf(v.x) < 1.0e-10 && fabsf(v.y) < 1.0e-10 && fabsf(v.z) < 1.0e-10){
return 1;
}
else{
return 0;
}
}
/*!
* 行列とベクトルの積
* @param[in] m 3x3行列
* @param[in] v 3Dベクトル
* @return 積の結果
*/
__device__
inline float3 CuMulMV(matrix3x3 m, float3 v)
{
return make_float3(dot(m.e[0], v), dot(m.e[1], v), dot(m.e[2], v));
}
//-----------------------------------------------------------------------------
// アトミック関数
//-----------------------------------------------------------------------------
#ifdef RX_USE_ATOMIC_FUNC
/*!
* float版atomicAdd
*/
__device__
inline void atomicFloatAdd(float *address, float val)
{
int i_val = __float_as_int(val);
int tmp0 = 0;
int tmp1;
while( (tmp1 = atomicCAS((int *)address, tmp0, i_val)) != tmp0)
{
tmp0 = tmp1;
i_val = __float_as_int(val + __int_as_float(tmp1));
}
}
/*!
* double版atomicAdd
*/
__device__
inline double atomicDoubleAdd(double *address, double val)
{
unsigned long long int *address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do{
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val+__longlong_as_double(assumed)));
}while(assumed != old);
return __longlong_as_double(old);
}
/*!
* float版atomicMin
*/
__device__
inline float atomicFloatMin(float *address, float val)
{
int *address_as_int = (int*)address;
int old = atomicMin(address_as_int, __float_as_int(val));
return __int_as_float(old);
}
/*!
* float版atomicMax
*/
__device__
inline float atomicFloatMax(float *address, float val)
{
int *address_as_int = (int*)address;
int old = atomicMax(address_as_int, __float_as_int(val));
return __int_as_float(old);
}
#endif // #ifdef RX_USE_ATOMIC_FUNC
//-----------------------------------------------------------------------------
// グリッド
//-----------------------------------------------------------------------------
/*!
* 1Dインデックスから3Dインデックスへの変換(グリッド数は任意)
* @param[in] i 1Dインデックス
* @param[in] gridSize グリッド数
* @return 3Dインデックス
*/
__device__
inline uint3 calcGridPosU(uint i, uint3 ngrid)
{
uint3 gridPos;
uint w = i%(ngrid.x*ngrid.y);
gridPos.x = w%ngrid.x;
gridPos.y = w/ngrid.x;
gridPos.z = i/(ngrid.x*ngrid.y);
return gridPos;
}
/*!
* 3Dインデックスから1Dインデックスへの変換(グリッド数は任意)
* @param[in] p 3Dインデックス
* @param[in] gridSize グリッド数
* @return 1Dインデックス
*/
__device__
inline uint calcGridPos3(uint3 p, uint3 ngrid)
{
p.x = min(p.x, ngrid.x-1);
p.y = min(p.y, ngrid.y-1);
p.z = min(p.z, ngrid.z-1);
return (p.z*ngrid.x*ngrid.y)+(p.y*ngrid.x)+p.x;
}
//-----------------------------------------------------------------------------
// CWTデバイス関数
//-----------------------------------------------------------------------------
/*!
* メキシカンハット
* @param[in] t 座標
* @return ウェーブレット母関数値
*/
__device__
inline float MexicanHat(float t)
{
t = t*t;
return MEXICAN_HAT_C*(1.0-t)*exp(-t/2.0);
}
__device__
inline float MexicanHatIm(float t)
{
return 0.0f;
}
/*!
* メキシカンハット(波数空間)
* @param[in] w 波数
* @return ウェーブレット母関数値
*/
__device__
inline float MexicanHatWave(float w)
{
w = w*w;
return MEXICAN_HAT_C*M_SQRT2PI*w*exp(-w/2.0);
}
inline float MexicanHatWaveIm(float w)
{
return 0.0f;
}
/*!
* メキシカンハット(2D)
* @param[in] x,y 座標
* @return ウェーブレット母関数値
*/
__device__
inline float MexicanHat2D(float x, float y)
{
x = x*x;
y = y*y;
return MEXICAN_HAT_C*(x+y-2)*exp(-(x+y)/2.0);
}
__device__
inline float MexicanHat2DIm(float x, float y)
{
return 0.0f;
}
/*!
* メキシカンハット(3D)
* @param[in] x,y 座標
* @return ウェーブレット母関数値
*/
__device__ __host__
inline float MexicanHat3D(float x, float y, float z)
{
x = x*x;
y = y*y;
z = z*z;
return MEXICAN_HAT_C*(x+y+z-3.0f)*exp(-(x+y+z)/2.0f);
}
__device__ __host__
inline float MexicanHat3DIm(float x, float y)
{
return 0.0f;
}
__device__
inline int Mod(int x, int n)
{
int m = (int)fmodf((float)x, (float)n);
return ((m < 0) ? m+n : m);
}
//-----------------------------------------------------------------------------
// 乱数
//-----------------------------------------------------------------------------
__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT];
static mt_struct_stripped h_MT[MT_RNG_COUNT];
/*!
* Mersenne Twister による乱数生成 (CUDAサンプルより)
* @param[out] d_Random 乱数生成結果
* @param[in] NPerRng 生成数
*/
__global__
static void RandomGPU(float *d_Random, int NPerRng)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
int iState, iState1, iStateM, iOut;
unsigned int mti, mti1, mtiM, x;
unsigned int mt[MT_NN];
for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){
//Load bit-vector Mersenne Twister parameters
mt_struct_stripped config = ds_MT[iRng];
//Initialize current state
mt[0] = config.seed;
for(iState = 1; iState < MT_NN; iState++)
mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK;
iState = 0;
mti1 = mt[0];
for(iOut = 0; iOut < NPerRng; iOut++){
//iState1 = (iState + 1) % MT_NN
//iStateM = (iState + MT_MM) % MT_NN
iState1 = iState + 1;
iStateM = iState + MT_MM;
if(iState1 >= MT_NN) iState1 -= MT_NN;
if(iStateM >= MT_NN) iStateM -= MT_NN;
mti = mti1;
mti1 = mt[iState1];
mtiM = mt[iStateM];
x = (mti & MT_UMASK) | (mti1 & MT_LMASK);
x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0);
mt[iState] = x;
iState = iState1;
//Tempering transformation
x ^= (x >> MT_SHIFT0);
x ^= (x << MT_SHIFTB) & config.mask_b;
x ^= (x << MT_SHIFTC) & config.mask_c;
x ^= (x >> MT_SHIFT1);
//Convert to (0, 1] float and write to global memory
d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f;
}
}
}
// 線型合同法による乱数生成(C言語などと同じ)
__device__ static unsigned int randx = 1;
__device__
inline void Srand(unsigned int s)
{
randx = s;
}
__device__
inline unsigned int Rand()
{
randx = randx*1103515245+12345;
return randx&2147483647;
}
__device__
inline unsigned int Rand2(unsigned int x)
{
x = x*1103515245+12345;
return x&2147483647;
}
#define RAND2_MAX (2147483647)
// XORShiftによる乱数
__device__ static unsigned long xors_x = 123456789;
__device__ static unsigned long xors_y = 362436069;
__device__ static unsigned long xors_z = 521288629;
__device__ static unsigned long xors_w = 88675123;
/*!
* G. Marsaglia, "Xorshift RNGs", Journal of Statistical Software, Vol. 8(14), pp.1-6, 2003.
* - http://www.jstatsoft.org/v08/i14/
* @param[in]
* @return
*/
__device__
inline unsigned long Xorshift128()
{
unsigned long t;
t = (xors_x^(xors_x<<11));
xors_x = xors_y; xors_y = xors_z; xors_z = xors_w;
return ( xors_w = (xors_w^(xors_w>>19))^(t^(t>>8)) );
}
__device__
inline long Xorshift128(long l, long h)
{
unsigned long t;
t = (xors_x^(xors_x<<11));
xors_x = xors_y; xors_y = xors_z; xors_z = xors_w;
xors_w = (xors_w^(xors_w>>19))^(t^(t>>8));
return l+(xors_w%(h-l));
}
__device__
inline float XorFrand(float l, float h)
{
return l+(h-l)*(Xorshift128(0, 1000000)/1000000.0f);
}
__device__
inline void Random(float2 &x, float a, float b)
{
x.x = XorFrand(a, b);
x.y = XorFrand(a, b);
}
__device__
inline void Random(float3 &x, float a, float b)
{
x.x = XorFrand(a, b);
x.y = XorFrand(a, b);
x.z = XorFrand(a, b);
}
// ガウスノイズ
__device__
inline float GaussianNoise(void)
{
float x1, x2;
float ret;
float r2;
do {
x1 = 2.0 * XorFrand(0.0, 1.0-(1e-10)) - 1.0; /* [-1, 1) */
x2 = 2.0 * XorFrand(0.0, 1.0-(1e-10)) - 1.0;
r2 = x1*x1 + x2*x2;
} while ((r2 == 0) || (r2 > 1.0));
ret = x1 * sqrtf((-2.0 * logf(r2))/r2);
ret *= 0.25; // Possibility of ( N(0, 1) < 4.0 ) = 100%
if (ret < -1.0) ret = -1.0; /* Account for loss of precision. */
if (ret > 1.0) ret = 1.0;
return ret;
}
//-----------------------------------------------------------------------------
// 交差判定
//-----------------------------------------------------------------------------
/*!
* 線分と円の交差判定(2D, Aに)
* @param[in] A,B 線分の両端点座標
* @param[in] C 円の中心
* @param[in] r 円の半径
* @param[out] P 交点座標
* @return 交点数
*/
__device__
static int CuLineCircleIntersection(float2 A, float2 B, float2 C, float r, float2 P[2], float t[2])
{
float rr = r*r;
float2 AC = C-A;
float2 BC = C-B;
float2 v = B-A;
float l = length(v);
v /= l;
float td = dot(v, AC);
float2 D = A+td*v;
float dd = dot(D-C, D-C);
if(dd < rr){
float dt = sqrtf(rr-dd);
float da = rr-dot(AC, AC);
float db = rr-dot(BC, BC);
int inter = 0;
float t1 = td-dt;
float t2 = td+dt;
if(t1 >= 0 && t1 <= l){
P[inter] = A+t1*v;
t[inter] = t1;
inter++;
}
if(t2 >= 0 && t2 <= l){
P[inter] = A+t2*v;
t[inter] = t2;
inter++;
}
return inter;
}
else{
return 0;
}
}
/*!
* AABBと球の距離
* @param[in] spos 球中心
* @param[in] r 球半径
* @param[in] sgn
* @param[in] box_min,box_max AABB最小,最大座標値
* @param[out] cp AABB表面の最近傍点
* @param[out] d 旧都とAABBの距離
* @param[out] n 交点における単位法線ベクトル
*/
__device__
inline int collisionSphereAABB(float3 spos, float r, int sgn, float3 box_min, float3 box_max, float3 &cp, float &d, float3 &n)
{
float3 dist_min; // box_minとの距離
float3 dist_max; // box_maxとの距離
float d0 = 0.0f;
float3 n0 = make_float3(0.0f, 0.0f, 0.0f);
int bout = 0;
int count = 0;
// 各軸ごとに最小と最大境界外になっていないか調べる
if((dist_min.x = (spos.x-r)-box_min.x) < 0.0){ bout |= 0x0001; count++; d0 = dist_min.x; n0 = make_float3( 1.0, 0.0, 0.0);}
if((dist_min.y = (spos.y-r)-box_min.y) < 0.0){ bout |= 0x0002; count++; d0 = dist_min.y; n0 = make_float3( 0.0, 1.0, 0.0);}
if((dist_min.z = (spos.z-r)-box_min.z) < 0.0){ bout |= 0x0004; count++; d0 = dist_min.z; n0 = make_float3( 0.0, 0.0, 1.0);}
if((dist_max.x = box_max.x-(spos.x+r)) < 0.0){ bout |= 0x0008; count++; d0 = dist_max.x; n0 = make_float3(-1.0, 0.0, 0.0);}
if((dist_max.y = box_max.y-(spos.y+r)) < 0.0){ bout |= 0x0010; count++; d0 = dist_max.y; n0 = make_float3( 0.0, -1.0, 0.0);}
if((dist_max.z = box_max.z-(spos.z+r)) < 0.0){ bout |= 0x0020; count++; d0 = dist_max.z; n0 = make_float3( 0.0, 0.0, -1.0);}
// 立方体内(全軸で境界内)
if(bout == 0){
float min_d = 1e10;
if(dist_min.x < min_d){ min_d = dist_min.x; n = make_float3( 1.0, 0.0, 0.0); }
if(dist_min.y < min_d){ min_d = dist_min.y; n = make_float3( 0.0, 1.0, 0.0); }
if(dist_min.z < min_d){ min_d = dist_min.z; n = make_float3( 0.0, 0.0, 1.0); }
if(dist_max.x < min_d){ min_d = dist_max.x; n = make_float3(-1.0, 0.0, 0.0); }
if(dist_max.y < min_d){ min_d = dist_max.y; n = make_float3( 0.0, -1.0, 0.0); }
if(dist_max.z < min_d){ min_d = dist_max.z; n = make_float3( 0.0, 0.0, -1.0); }
d = (float)sgn*min_d;
n *= (float)sgn;
cp = spos+n*fabs(d);
return 1;
}
// 立方体外
// sgn = 1:箱,-1:オブジェクト
if(count == 1){
// 平面近傍
d = (float)sgn*d0;
n = (float)sgn*n0;
cp = spos+n*fabs(d);
}
else{
// エッジ/コーナー近傍
float3 x = make_float3(0.0f, 0.0f, 0.0f);
if(bout & 0x0001) x.x = dist_min.x;
if(bout & 0x0002) x.y = dist_min.y;
if(bout & 0x0004) x.z = dist_min.z;
if(bout & 0x0008) x.x = -dist_max.x;
if(bout & 0x0010) x.y = -dist_max.y;
if(bout & 0x0020) x.z = -dist_max.z;
d = length(x);
n = normalize(x);
d *= -(float)sgn;
n *= -(float)sgn;
cp = spos+n*fabs(d);
float3 disp = make_float3(0.00001);
//Random(disp, 0, 0.00001);
disp = disp*n;
cp += disp;
}
return 0;
}
/*!
* AABBと点の距離
* @param[in] p 点座標
* @param[in] box_cen AABBの中心
* @param[in] box_ext AABBの各辺の長さの1/2
* @param[out] cp AABB表面の最近傍点
* @param[out] d 旧都とAABBの距離
* @param[out] n 交点における単位法線ベクトル
*/
__device__
inline int collisionPointAABB(float3 p, float3 box_cen, float3 box_ext, float3 &cp, float &d, float3 &n)
{
cp = p-box_cen;
float3 tmp = fabs(cp)-box_ext;
float res = ((tmp.x > tmp.y && tmp.x > tmp.z) ? tmp.x : (tmp.y > tmp.z ? tmp.y : tmp.z));
float sgn = (res > 0.0) ? -1.0 : 1.0;
int coli = 0;
n = make_float3(0.0f);
if(cp.x > box_ext.x){
cp.x = box_ext.x;
n.x -= 1.0;
coli++;
}
else if(cp.x < -box_ext.x){
cp.x = -box_ext.x;
n.x += 1.0;
coli++;
}
if(cp.y > box_ext.y){
cp.y = box_ext.y;
n.y -= 1.0;
coli++;
}
else if(cp.y < -box_ext.y){
cp.y = -box_ext.y;
n.y += 1.0;
coli++;
}
if(cp.z > box_ext.z){
cp.z = box_ext.z;
n.z -= 1.0;
coli++;
}
else if(cp.z < -box_ext.z){
cp.z = -box_ext.z;
n.z += 1.0;
coli++;
}
n = normalize(n);
//if(coli > 1){
// float3 disp;
// Random(disp, 0, 0.00001);
// disp = disp*n;
// cp += disp;
//}
cp += box_cen;
d = sgn*length(cp-p);
return 0;
}
/*!
* 点とBOXの距離
* @param[in] p 点座標
* @param[in] box_cen BOXの中心
* @param[in] box_ext BOXの各辺の長さの1/2
* @param[in] box_rot BOXの方向行列(3x3回転行列)
* @param[in] box_inv_rot BOXの方向行列の逆行列(3x3)
* @param[out] cp BOX表面の最近傍点
* @param[out] d 点とBOXの距離
* @param[out] n 交点における単位法線ベクトル
*/
__device__
inline int collisionPointBox(float3 p, float3 box_cen, float3 box_ext, matrix3x3 box_rot, matrix3x3 box_inv_rot, float3 &cp, float &d, float3 &n)
{
cp = p-box_cen;
cp = CuMulMV(box_rot, cp);
float3 tmp = fabs(cp)-box_ext;
int coli = 0;
n = make_float3(0.0f);
if(tmp.x < 0.0 && tmp.y < 0.0 && tmp.z < 0.0){
tmp = fabs(tmp);
if(tmp.x <= tmp.y && tmp.x <= tmp.z){ // x平面に近い
if(cp.x > 0){
cp.x = box_ext.x;
n.x += 1.0;
}
else{
cp.x = -box_ext.x;
n.x -= 1.0;
}
}
else if(tmp.y <= tmp.x && tmp.y <= tmp.z){ // y平面に近い
if(cp.y > 0){
cp.y = box_ext.y;
n.y += 1.0;
}
else{
cp.y = -box_ext.y;
n.y -= 1.0;
}
}
else{ // z平面に近い
if(cp.z > 0){
cp.z = box_ext.z;
n.z += 1.0;
}
else{
cp.z = -box_ext.z;
n.z -= 1.0;
}
}
coli++;
}
cp = CuMulMV(box_inv_rot, cp);
n = CuMulMV(box_inv_rot, n);
n = normalize(n);
cp += box_cen;
float sgn = (coli) ? -1.0 : 1.0;
d = sgn*(length(cp-p));
return 0;
}
/*!
* 点と球の距離
* @param[in] p 点座標
* @param[in] sphere_cen 球の中心
* @param[in] sphere_rad 球の半径
* @param[out] cp 点と球中心を結ぶ線分と球の交点
* @param[out] d 点と球表面の距離
* @param[out] n 球中心から点への単位ベクトル
*/
__device__
inline int collisionPointSphere(float3 p, float3 sphere_cen, float sphere_rad, float3 &cp, float &d, float3 &n)
{
n = make_float3(0.0f);
float3 l = p-sphere_cen;
float ll = length(l);
d = ll-sphere_rad;
if(d < 0.0){
n = normalize(p-sphere_cen);
cp = sphere_cen+n*sphere_rad;
}
return 0;
}
/*!
* 点と平面の距離
* @param[in] v 点の座標
* @param[in] px 平面上の点
* @param[in] pn 平面の法線
* @return 距離
*/
__device__
inline float distPointPlane(float3 v, float3 px, float3 pn)
{
return dot((v-px), pn)/length(pn);
}
/*!
* 三角形と点の距離と最近傍点
* @param[in] v0,v1,v2 三角形の頂点
* @param[in] n 三角形の法線
* @param[in] p 点
* @return
*/
__device__
inline int distPointTriangle(float3 v0, float3 v1, float3 v2, float3 n, float3 p, float &dist, float3 &p0)
{
// ポリゴンを含む平面と点の距離
float l = distPointPlane(p, v0, n);
// 平面との最近傍点座標
float3 np = p-l*n;
// 近傍点が三角形内かどうかの判定
float3 n1 = cross((v0-p), (v1-p));
float3 n2 = cross((v1-p), (v2-p));
float3 n3 = cross((v2-p), (v0-p));
if(dot(n1, n2) > 0 && dot(n2, n3) > 0){
// 三角形内
dist = l;
p0 = np;
return 1;
}
else{
// 三角形外
return 0;
}
}
/*!
* レイ/線分と三角形の交差
* @param[in] P0,P1 レイ/線分の端点orレイ上の点
* @param[in] V0,V1,V2 三角形の頂点座標
* @param[out] I 交点座標
* @retval 1 交点Iで交差
* @retval 0 交点なし
* @retval 2 三角形の平面内
* @retval -1 三角形が"degenerate"である(面積が0,つまり,線分か点になっている)
*/
inline __device__
int intersectSegmentTriangle(float3 P0, float3 P1,
float3 V0, float3 V1, float3 V2,
float3 &I, float3 &n, float rp = 0.01)
{
// 三角形のエッジベクトルと法線
float3 u = V1-V0;
float3 v = V2-V0;
n = normalize(cross(u, v));
if(CuIsZero(n)){
return -1; // 三角形が"degenerate"である(面積が0)
}
// 線分
float3 dir = P1-P0;
float a = dot(n, P0-V0);
float b = dot(n, dir);
if(fabs(b) < 1e-10){ // 線分と三角形平面が平行
if(a == 0){
return 2; // 線分が平面上
}
else{
return 0; // 交点なし
}
}
// 交点計算
// 2端点がそれぞれ異なる面にあるかどうかを判定
float r = -a/b;
if(r < 0.0 || fabs(a) > fabs(b) || b > 0){
return 0;
}
//if(r < 0.0){
// return 0;
//}
//else{
// if(fabs(a) > fabs(b)){
// return 0;
// }
// else{
// if(b > 0){
// return 0;
// }
// }
//}
// 線分と平面の交点
I = P0+r*dir;
// 交点が三角形内にあるかどうかの判定
float uu, uv, vv, wu, wv, D;
uu = dot(u, u);
uv = dot(u, v);
vv = dot(v, v);
float3 w = I-V0;
wu = dot(w, u);
wv = dot(w, v);
D = uv*uv-uu*vv;
float s, t;
s = (uv*wv-vv*wu)/D;
if(s < 0.0 || s > 1.0){
return 0;
}
t = (uv*wu-uu*wv)/D;
if(t < 0.0 || (s+t) > 1.0){
return 0;
}
return 1;
}
#endif // #ifndef _RX_CU_COMMON_CU_
|
3216b8b055795d1ed2c3e528264958a348aa33e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ------------------------------------------------------------------
//
// Licensed under The MIT License
// Modified from MATLAB Faster R-CNN (https://github.com/shaoqingren/faster_rcnn)
// ------------------------------------------------------------------
#include "gpu_nms.hpp"
#include <vector>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(hipGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to hipSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(hipSetDevice(device_id));
}
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(hipMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(hipMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(hipFree(boxes_dev));
CUDA_CHECK(hipFree(mask_dev));
}
| 3216b8b055795d1ed2c3e528264958a348aa33e4.cu | // ------------------------------------------------------------------
//
// Licensed under The MIT License
// Modified from MATLAB Faster R-CNN (https://github.com/shaoqingren/faster_rcnn)
// ------------------------------------------------------------------
#include "gpu_nms.hpp"
#include <vector>
#include <iostream>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _set_device(int device_id) {
int current_device;
CUDA_CHECK(cudaGetDevice(¤t_device));
if (current_device == device_id) {
return;
}
// The call to cudaSetDevice must come before any calls to Get, which
// may perform initialization using the GPU.
CUDA_CHECK(cudaSetDevice(device_id));
}
void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num,
int boxes_dim, float nms_overlap_thresh, int device_id) {
_set_device(device_id);
float* boxes_dev = NULL;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
CUDA_CHECK(cudaMalloc(&boxes_dev,
boxes_num * boxes_dim * sizeof(float)));
CUDA_CHECK(cudaMemcpy(boxes_dev,
boxes_host,
boxes_num * boxes_dim * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
CUDA_CHECK(cudaFree(boxes_dev));
CUDA_CHECK(cudaFree(mask_dev));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.