hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
165d710a7e14228bfdb5654095766a9de791f03d.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/hip/HIPBlas.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
return legacy::cuda::_th_addmm(b_self, mat1, mat2, beta, alpha);
}
Tensor& addmm_cuda_out(Tensor &result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return legacy::cuda::_th_addmm_out(result, b_self, mat1, mat2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor addbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
return legacy::cuda::_th_addbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& addbmm_cuda_out(Tensor& result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm_out");
return legacy::cuda::_th_addbmm_out(result, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
if ((tensor_strides[0] == 1) && (tensor_strides[1] != 0)) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] != 0)) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
// Check https://github.com/pytorch/pytorch/issues/22078
// for information about the bug. We don't know the exact conditions that trigger it,
// but using Sgemm or Hgemm on Maxwell or Pascal seems to be a
// necessary condition.
static void checkCuda90Bug(int i_m, int i_n, int i_k)
{
#if TORCH_HIP_VERSION < 9200 && TORCH_HIP_VERSION >= 9000
static std::once_flag alreadyWarned;
const int LIMIT = 1 << 21;
if (i_m > LIMIT || i_n > LIMIT || i_k > LIMIT) {
hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major == 5 || prop->major == 6) {
std::call_once(alreadyWarned, []() {
TORCH_WARN("Matrix multiplication for dimensions larger than 2^21 has known bugs on your combination of CUDA version and device type. Please consider upgrading to CUDA 9.2 or later.");
});
}
}
#endif
}
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(
(mat1.dim() == 2) && (mat2.dim() == 2) &&
(self.dim() == 2) && (result.dim() == 2),
"tensors must be 2-D"
);
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self_sizes = self.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self_sizes[0] == mat1_sizes[0], "self dim 0 must match mat1 dim 0");
TORCH_CHECK(self_sizes[1] == mat2_sizes[1], "self dim 1 must match mat2 dim 1");
// If self and result either point to the same data or if beta is zero,
// we can avoid copying self into result. Otherwise, we need to copy.
if (beta.to<double>() != 0.0) {
if ((result.data_ptr() != self.data_ptr()) || (result.strides() != self.strides())) {
result.copy_(self);
}
}
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self.scalar_type();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
if (scalar_type == at::ScalarType::Half || scalar_type == at::ScalarType::Float) {
checkCuda90Bug(static_cast<int>(m), static_cast<int>(n), static_cast<int>(k));
}
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
} }
| 165d710a7e14228bfdb5654095766a9de791f03d.cu | #include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/cuda/CUDABlas.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
return legacy::cuda::_th_addmm(b_self, mat1, mat2, beta, alpha);
}
Tensor& addmm_cuda_out(Tensor &result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return legacy::cuda::_th_addmm_out(result, b_self, mat1, mat2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor addbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
return legacy::cuda::_th_addbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& addbmm_cuda_out(Tensor& result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm_out");
return legacy::cuda::_th_addbmm_out(result, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
if ((tensor_strides[0] == 1) && (tensor_strides[1] != 0)) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] != 0)) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
// Check https://github.com/pytorch/pytorch/issues/22078
// for information about the bug. We don't know the exact conditions that trigger it,
// but using Sgemm or Hgemm on Maxwell or Pascal seems to be a
// necessary condition.
static void checkCuda90Bug(int i_m, int i_n, int i_k)
{
#if CUDA_VERSION < 9200 && CUDA_VERSION >= 9000
static std::once_flag alreadyWarned;
const int LIMIT = 1 << 21;
if (i_m > LIMIT || i_n > LIMIT || i_k > LIMIT) {
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major == 5 || prop->major == 6) {
std::call_once(alreadyWarned, []() {
TORCH_WARN("Matrix multiplication for dimensions larger than 2^21 has known bugs on your combination of CUDA version and device type. Please consider upgrading to CUDA 9.2 or later.");
});
}
}
#endif
}
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(
(mat1.dim() == 2) && (mat2.dim() == 2) &&
(self.dim() == 2) && (result.dim() == 2),
"tensors must be 2-D"
);
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self_sizes = self.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self_sizes[0] == mat1_sizes[0], "self dim 0 must match mat1 dim 0");
TORCH_CHECK(self_sizes[1] == mat2_sizes[1], "self dim 1 must match mat2 dim 1");
// If self and result either point to the same data or if beta is zero,
// we can avoid copying self into result. Otherwise, we need to copy.
if (beta.to<double>() != 0.0) {
if ((result.data_ptr() != self.data_ptr()) || (result.strides() != self.strides())) {
result.copy_(self);
}
}
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self.scalar_type();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
if (scalar_type == at::ScalarType::Half || scalar_type == at::ScalarType::Float) {
checkCuda90Bug(static_cast<int>(m), static_cast<int>(n), static_cast<int>(k));
}
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
} }
|
c95368f354997d77a551897b8be7c23b50ef0d9a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <math.h>
#include <iostream>
#include "utils.h"
#define Block_Dim 32
#define NUM_THREADS Block_Dim * Block_Dim
using namespace std;
void memcopy_gpu(FP *dst, const FP *src, int size)
{
hipMemcpy(dst, src, size, hipMemcpyHostToHost);
}
__global__ void matmul_tiled_kernel(const FP *a, const FP *b, FP *c, int n, int m, int p) {
int TW = blockDim.x;
extern __shared__ FP bigarray[];
FP *atile = &bigarray[0], *btile = &bigarray[TW * TW];
int t, k, i, j, aIndex, bIndex, atIndex, btIndex;
int tx = threadIdx.x, ty = threadIdx.y;
int col = tx + blockDim.x * blockIdx.x;
int row = ty + blockDim.y * blockIdx.y;
FP cvalue = 0.;
aIndex = row * p + tx;
atIndex = ty * TW + tx;
bIndex = ty * m + col;
btIndex = ty * TW + tx;
for (t = 0; t * TW < p; t++, aIndex += TW, bIndex += TW * m) {
if (row < n && t * TW + tx < p) {
atile[atIndex] = a[aIndex];
}
if (col < m && t * TW + ty < p) {
btile[btIndex] = b[bIndex];
}
__syncthreads();
if(col < m && row < n) {
for (k = 0, i = ty * TW, j = tx; k < min(p - t * TW, TW); k++, i++, j+=TW)
cvalue += atile[i] * btile[j];
}
__syncthreads();
}
if(col < m && row < n) {
c[row * m + col] = cvalue;
}
}
__global__ void atrans_matmul_tiled_kernel(const FP *a, const FP *b, FP *c, int n, int m, int p) {
int TW = blockDim.x;
extern __shared__ FP bigarray[];
FP *atile = &bigarray[0], *btile = &bigarray[TW * TW];
int t, k, i, j, aIndex, bIndex, atIndex, btIndex;
int tx = threadIdx.x, ty = threadIdx.y;
int col = tx + blockDim.x * blockIdx.x;
int row = ty + blockDim.y * blockIdx.y;
FP cvalue = 0.;
aIndex = tx * n + row;
atIndex = ty * TW + tx;
bIndex = ty * m + col;
btIndex = ty * TW + tx;
for (t = 0; t * TW < p; t++, aIndex += TW * n, bIndex += TW * m) {
if (row < n && t * TW + tx < p) {
atile[atIndex] = a[aIndex];
}
if (col < m && t * TW + ty < p) {
btile[btIndex] = b[bIndex];
}
__syncthreads();
if(col < m && row < n) {
for (k = 0, i = ty * TW, j = tx; k < min(p - t * TW, TW); k++, i++, j+=TW)
cvalue += atile[i] * btile[j];
}
__syncthreads();
}
if(col < m && row < n) {
c[row * m + col] = cvalue;
}
}
__global__ void btrans_matmul_tiled_kernel(const FP *a, const FP *b, FP *c, int n, int m, int p) {
int TW = blockDim.x;
extern __shared__ FP bigarray[];
FP *atile = &bigarray[0], *btile = &bigarray[TW * TW];
int t, k, i, j, aIndex, bIndex, atIndex, btIndex;
int tx = threadIdx.x, ty = threadIdx.y;
int col = tx + blockDim.x * blockIdx.x;
int row = ty + blockDim.y * blockIdx.y;
FP cvalue = 0.;
aIndex = row * p + tx;
atIndex = ty * TW + tx;
bIndex = col * p + ty;
btIndex = ty * TW + tx;
for (t = 0; t * TW < p; t++, aIndex += TW, bIndex += TW) {
if (row < n && t * TW + tx < p) {
atile[atIndex] = a[aIndex];
}
if (col < m && t * TW + ty < p) {
btile[btIndex] = b[bIndex];
}
__syncthreads();
if(col < m && row < n) {
for (k = 0, i = ty * TW, j = tx; k < min(p - t * TW, TW); k++, i++, j+=TW)
cvalue += atile[i] * btile[j];
}
__syncthreads();
}
if(col < m && row < n) {
c[row * m + col] = cvalue;
}
}
void matrixmult_gpu(bool aTrans, bool bTrans, const FP *a, const FP *b, FP *c, int n, int m, int p,
FP* dev_a, FP* dev_b, FP* dev_c) {
hipMemcpy(dev_a, a , n * p * sizeof(FP) ,hipMemcpyHostToDevice);
hipMemcpy(dev_b, b , p * m * sizeof(FP),hipMemcpyHostToDevice);
int Grid_Dim = ceil(max(n, m) / float(Block_Dim));
dim3 Grid(Grid_Dim, Grid_Dim); //Grid structure
dim3 Block(Block_Dim, Block_Dim); //Block structure
//size of dynamic shared memory
int Ns = 2 * Block_Dim * Block_Dim * sizeof(FP);
if (aTrans && !bTrans) {
hipLaunchKernelGGL(( atrans_matmul_tiled_kernel), dim3(Grid), dim3(Block), Ns, 0, dev_a, dev_b, dev_c, n, m, p);
} else if (!aTrans && bTrans) {
hipLaunchKernelGGL(( btrans_matmul_tiled_kernel), dim3(Grid), dim3(Block), Ns, 0, dev_a, dev_b, dev_c, n, m, p);
} else if (!aTrans && !bTrans) {
hipLaunchKernelGGL(( matmul_tiled_kernel), dim3(Grid), dim3(Block), Ns, 0, dev_a, dev_b, dev_c, n, m, p);
}
hipMemcpy(c,dev_c, n * m * sizeof(FP),hipMemcpyDeviceToHost);
}
__global__ void im2col_gpu_kernel(const int n, const FP* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
FP* data_col) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (n); index += blockDim.x * gridDim.x) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
FP* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const FP* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i;
int w_im = w_offset + j;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(const int channels, const int height, const int width, const int kernel_h,
const int kernel_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const FP* data_im, FP* dev_data_im, FP* data_col,
FP* dev_data_col, const int data_im_size, const int data_col_size,
const int height_col, const int width_col,
const int num_kernels, const int num_blocks) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
hipMemcpy(dev_data_im, data_im, data_im_size ,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( im2col_gpu_kernel), dim3(num_blocks), dim3(NUM_THREADS), 0, 0,
num_kernels, dev_data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col, width_col, dev_data_col);
hipMemcpy(data_col, dev_data_col, data_col_size, hipMemcpyDeviceToHost);
}
__global__ void col2im_gpu_kernel(const int n, const FP* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
FP* data_im) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (n); index += blockDim.x * gridDim.x) {
FP val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_w) ? 0 : (w_im - kernel_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_h) ? 0 : (h_im - kernel_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
data_im[index] = val;
}
}
void col2im_gpu(const int channels, const int height, const int width, const int kernel_h,
const int kernel_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const FP* data_col, FP* dev_data_col, FP* data_im,
FP* dev_data_im, const int data_im_size, const int data_col_size,
const int height_col, const int width_col,
const int num_kernels, const int num_blocks) {
hipMemcpy(dev_data_col, data_col , data_col_size ,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( col2im_gpu_kernel), dim3(num_blocks), dim3(NUM_THREADS), 0, 0,
num_kernels, dev_data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, height_col, width_col, dev_data_im);
hipMemcpy(data_im, dev_data_im, data_im_size, hipMemcpyDeviceToHost);
}
| c95368f354997d77a551897b8be7c23b50ef0d9a.cu | #include <cuda.h>
#include <math.h>
#include <iostream>
#include "utils.h"
#define Block_Dim 32
#define NUM_THREADS Block_Dim * Block_Dim
using namespace std;
void memcopy_gpu(FP *dst, const FP *src, int size)
{
cudaMemcpy(dst, src, size, cudaMemcpyHostToHost);
}
__global__ void matmul_tiled_kernel(const FP *a, const FP *b, FP *c, int n, int m, int p) {
int TW = blockDim.x;
extern __shared__ FP bigarray[];
FP *atile = &bigarray[0], *btile = &bigarray[TW * TW];
int t, k, i, j, aIndex, bIndex, atIndex, btIndex;
int tx = threadIdx.x, ty = threadIdx.y;
int col = tx + blockDim.x * blockIdx.x;
int row = ty + blockDim.y * blockIdx.y;
FP cvalue = 0.;
aIndex = row * p + tx;
atIndex = ty * TW + tx;
bIndex = ty * m + col;
btIndex = ty * TW + tx;
for (t = 0; t * TW < p; t++, aIndex += TW, bIndex += TW * m) {
if (row < n && t * TW + tx < p) {
atile[atIndex] = a[aIndex];
}
if (col < m && t * TW + ty < p) {
btile[btIndex] = b[bIndex];
}
__syncthreads();
if(col < m && row < n) {
for (k = 0, i = ty * TW, j = tx; k < min(p - t * TW, TW); k++, i++, j+=TW)
cvalue += atile[i] * btile[j];
}
__syncthreads();
}
if(col < m && row < n) {
c[row * m + col] = cvalue;
}
}
__global__ void atrans_matmul_tiled_kernel(const FP *a, const FP *b, FP *c, int n, int m, int p) {
int TW = blockDim.x;
extern __shared__ FP bigarray[];
FP *atile = &bigarray[0], *btile = &bigarray[TW * TW];
int t, k, i, j, aIndex, bIndex, atIndex, btIndex;
int tx = threadIdx.x, ty = threadIdx.y;
int col = tx + blockDim.x * blockIdx.x;
int row = ty + blockDim.y * blockIdx.y;
FP cvalue = 0.;
aIndex = tx * n + row;
atIndex = ty * TW + tx;
bIndex = ty * m + col;
btIndex = ty * TW + tx;
for (t = 0; t * TW < p; t++, aIndex += TW * n, bIndex += TW * m) {
if (row < n && t * TW + tx < p) {
atile[atIndex] = a[aIndex];
}
if (col < m && t * TW + ty < p) {
btile[btIndex] = b[bIndex];
}
__syncthreads();
if(col < m && row < n) {
for (k = 0, i = ty * TW, j = tx; k < min(p - t * TW, TW); k++, i++, j+=TW)
cvalue += atile[i] * btile[j];
}
__syncthreads();
}
if(col < m && row < n) {
c[row * m + col] = cvalue;
}
}
__global__ void btrans_matmul_tiled_kernel(const FP *a, const FP *b, FP *c, int n, int m, int p) {
int TW = blockDim.x;
extern __shared__ FP bigarray[];
FP *atile = &bigarray[0], *btile = &bigarray[TW * TW];
int t, k, i, j, aIndex, bIndex, atIndex, btIndex;
int tx = threadIdx.x, ty = threadIdx.y;
int col = tx + blockDim.x * blockIdx.x;
int row = ty + blockDim.y * blockIdx.y;
FP cvalue = 0.;
aIndex = row * p + tx;
atIndex = ty * TW + tx;
bIndex = col * p + ty;
btIndex = ty * TW + tx;
for (t = 0; t * TW < p; t++, aIndex += TW, bIndex += TW) {
if (row < n && t * TW + tx < p) {
atile[atIndex] = a[aIndex];
}
if (col < m && t * TW + ty < p) {
btile[btIndex] = b[bIndex];
}
__syncthreads();
if(col < m && row < n) {
for (k = 0, i = ty * TW, j = tx; k < min(p - t * TW, TW); k++, i++, j+=TW)
cvalue += atile[i] * btile[j];
}
__syncthreads();
}
if(col < m && row < n) {
c[row * m + col] = cvalue;
}
}
void matrixmult_gpu(bool aTrans, bool bTrans, const FP *a, const FP *b, FP *c, int n, int m, int p,
FP* dev_a, FP* dev_b, FP* dev_c) {
cudaMemcpy(dev_a, a , n * p * sizeof(FP) ,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b , p * m * sizeof(FP),cudaMemcpyHostToDevice);
int Grid_Dim = ceil(max(n, m) / float(Block_Dim));
dim3 Grid(Grid_Dim, Grid_Dim); //Grid structure
dim3 Block(Block_Dim, Block_Dim); //Block structure
//size of dynamic shared memory
int Ns = 2 * Block_Dim * Block_Dim * sizeof(FP);
if (aTrans && !bTrans) {
atrans_matmul_tiled_kernel<<<Grid, Block, Ns>>>(dev_a, dev_b, dev_c, n, m, p);
} else if (!aTrans && bTrans) {
btrans_matmul_tiled_kernel<<<Grid, Block, Ns>>>(dev_a, dev_b, dev_c, n, m, p);
} else if (!aTrans && !bTrans) {
matmul_tiled_kernel<<<Grid, Block, Ns>>>(dev_a, dev_b, dev_c, n, m, p);
}
cudaMemcpy(c,dev_c, n * m * sizeof(FP),cudaMemcpyDeviceToHost);
}
__global__ void im2col_gpu_kernel(const int n, const FP* data_im,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
FP* data_col) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (n); index += blockDim.x * gridDim.x) {
const int h_index = index / width_col;
const int h_col = h_index % height_col;
const int w_col = index % width_col;
const int c_im = h_index / height_col;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col * stride_h - pad_h;
const int w_offset = w_col * stride_w - pad_w;
FP* data_col_ptr = data_col;
data_col_ptr += (c_col * height_col + h_col) * width_col + w_col;
const FP* data_im_ptr = data_im;
data_im_ptr += (c_im * height + h_offset) * width + w_offset;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
int h_im = h_offset + i;
int w_im = w_offset + j;
*data_col_ptr =
(h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ?
data_im_ptr[i * width + j] : 0;
data_col_ptr += height_col * width_col;
}
}
}
}
void im2col_gpu(const int channels, const int height, const int width, const int kernel_h,
const int kernel_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const FP* data_im, FP* dev_data_im, FP* data_col,
FP* dev_data_col, const int data_im_size, const int data_col_size,
const int height_col, const int width_col,
const int num_kernels, const int num_blocks) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
cudaMemcpy(dev_data_im, data_im, data_im_size ,cudaMemcpyHostToDevice);
im2col_gpu_kernel<<<num_blocks, NUM_THREADS>>>(
num_kernels, dev_data_im, height, width, kernel_h, kernel_w, pad_h,
pad_w, stride_h, stride_w, height_col, width_col, dev_data_col);
cudaMemcpy(data_col, dev_data_col, data_col_size, cudaMemcpyDeviceToHost);
}
__global__ void col2im_gpu_kernel(const int n, const FP* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int height_col, const int width_col,
FP* data_im) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (n); index += blockDim.x * gridDim.x) {
FP val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_w) ? 0 : (w_im - kernel_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_h) ? 0 : (h_im - kernel_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
data_im[index] = val;
}
}
void col2im_gpu(const int channels, const int height, const int width, const int kernel_h,
const int kernel_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const FP* data_col, FP* dev_data_col, FP* data_im,
FP* dev_data_im, const int data_im_size, const int data_col_size,
const int height_col, const int width_col,
const int num_kernels, const int num_blocks) {
cudaMemcpy(dev_data_col, data_col , data_col_size ,cudaMemcpyHostToDevice);
col2im_gpu_kernel<<<num_blocks, NUM_THREADS>>>(
num_kernels, dev_data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, height_col, width_col, dev_data_im);
cudaMemcpy(data_im, dev_data_im, data_im_size, cudaMemcpyDeviceToHost);
}
|
c670748125c97eb764938a172f7e70bab9695d16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.cuh"
#include "ops_copy.cuh"
#include "spatial_deform.cuh"
#include "interpolate.cuh"
__global__ void set_coords_2D(float* coords, size_t y, size_t x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t id_x = index % x;
size_t id_y = index / x;
if(index < x * y){
coords[id_x + id_y * x] = id_y - (float)y/2.0;
coords[id_x + id_y * x + x*y] = id_x - (float)x/2.0;
}
__syncthreads();
}
__global__ void set_coords_3D(float* coords, size_t z, size_t y, size_t x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t id_x = index % x;
size_t id_y = (index / x) % y;
size_t id_z = index / (x * y);
if(index < x * y * z){
coords[index] = id_z - (float)z/2.0;
coords[index + x * y * z] = id_y - (float)y/2.0;
coords[index + 2 * x * y * z] = id_x -(float)x/2.0;
}
__syncthreads();
}
void Handle::set_2D(size_t y, size_t x){
is_3D = false;
dim_x = x;
dim_y = y;
total_size = dim_x * dim_y;
coords_size = total_size * 2;
std::cout<<"Malloc for 2D image ----------\n"
<<" dim_x : "<<dim_x
<<" dim_y : "<<dim_y
<<" total : "<<total_size<<std::endl;
std::cout<<"Malloc "<< 6 * total_size * sizeof(float)/1024/1024
<< "MB"<<std::endl;
checkCudaErrors(hipMalloc((void **)&img,
total_size * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&output,
total_size * sizeof(float)));
checkCudaErrors(hipHostMalloc((void **)&pin_img,
total_size * sizeof(float)));
checkCudaErrors(hipHostMalloc((void **)&pin_output,
total_size * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&random,
coords_size * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&coords,
coords_size * sizeof(float)));
checkCudaErrors(hipHostMalloc((void **)&pin_coords,
coords_size * sizeof(float)));
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( set_coords_2D), dim3(blocks), dim3(threads), 0, stream, coords, dim_y, dim_x);
checkCudaErrors(hipStreamSynchronize(stream));
}
void Handle::set_3D(size_t z, size_t y, size_t x){
is_3D = true;
dim_x = x;
dim_y = y;
dim_z = z;
total_size = dim_x * dim_y * dim_z;
coords_size = total_size * 3;
std::cout<<"Malloc for 3D image ----------\n"
<<" dim_x : "<<dim_x
<<" dim_y : "<<dim_y
<<" dim_z : "<<dim_z
<<" total : "<<total_size<<std::endl;
std::cout<<"Malloc "<< 8 * total_size * sizeof(float)/1024/1024
<< "MB"<<std::endl;
checkCudaErrors(hipMalloc((void **)&gpu_rot_matrix, 9 * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&img,
total_size * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&output,
total_size * sizeof(float)));
checkCudaErrors(hipHostMalloc((void **)&pin_img,
total_size * sizeof(float)));
checkCudaErrors(hipHostMalloc((void **)&pin_output,
total_size * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&random,
coords_size * sizeof(float)));
checkCudaErrors(hipMalloc((void **)&coords,
coords_size * sizeof(float)));
checkCudaErrors(hipHostMalloc((void **)&pin_coords,
coords_size * sizeof(float)));
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( set_coords_3D), dim3(blocks), dim3(threads), 0, stream, coords, dim_z, dim_y, dim_x);
checkCudaErrors(hipStreamSynchronize(stream));
}
void Handle::scale(float scale){
assert(scale <= 1.0 && scale > 0.0);
dim3 threads(min(coords_size, (long)512), 1, 1);
dim3 blocks(coords_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( device_apply_scale), dim3(blocks), dim3(threads), 0, stream, coords, scale, coords_size);
}
void Handle::flip(int do_x, int do_y, int do_z){
if(is_3D){
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( flip_3D), dim3(blocks), dim3(threads), 0, stream, coords, dim_z, dim_y, dim_x,
do_z, do_y, do_x);
checkCudaErrors(hipStreamSynchronize(stream));
}
else{
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( flip_2D), dim3(blocks), dim3(threads), 0, stream, coords, dim_y, dim_x, do_y, do_x);
checkCudaErrors(hipStreamSynchronize(stream));
}
}
void Handle::host_rotate_2D(float angle){
float cos_angle = cos(angle);
float sin_angle = sin(angle);
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( rotate_2D), dim3(blocks), dim3(threads), 0, stream, coords, dim_y, dim_x, cos_angle, sin_angle);
checkCudaErrors(hipStreamSynchronize(stream));
}
void Handle::host_rotate_3D(float* rot_matrix){
checkCudaErrors(hipMemcpyAsync(gpu_rot_matrix,
rot_matrix,
9 * sizeof(float),
hipMemcpyHostToDevice,
stream));
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( rotate_3D), dim3(blocks), dim3(threads), 0, stream, coords, dim_z, dim_y, dim_x, gpu_rot_matrix);
checkCudaErrors(hipStreamSynchronize(stream));
}
void Handle::elastic(float sigma, float alpha, float truncate,
int mode_type, float c_val){
// generate random offset by coords.size
checkCudaErrors(hiprandGenerateUniform(gen, random, coords_size));
// make the radius of the filter equal to truncate standard deviations
int lw = int(sigma * truncate + 0.5);
float sigma2 = sigma * sigma;
// generate kernel
float total = 0;
for(int i = -lw; i < lw + 1; i++){
kernel_pin[i + lw] = exp(i * i * -0.5 / sigma2);
total += kernel_pin[i + lw];
}
for(int i = -lw; i < lw + 1; i++){
kernel_pin[i + lw] = kernel_pin[i + lw] / total;
}
// Copy kernel
checkCudaErrors(hipMemcpyAsync(kernel,
kernel_pin,
(2 * lw + 1) * sizeof(float),
hipMemcpyHostToDevice,
stream));
checkCudaErrors(hipStreamSynchronize(stream));
if(is_3D){
dim3 threads(min(coords_size, (long)512), 1, 1);
dim3 blocks(coords_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( scale_random), dim3(blocks), dim3(threads), 0, stream, random, coords_size);
hipLaunchKernelGGL(( gussain_filter_x), dim3(blocks), dim3(threads), 0, stream, random, kernel, lw, dim_z,
dim_y, dim_x, mode_type, c_val);
hipLaunchKernelGGL(( gussain_filter_y), dim3(blocks), dim3(threads), 0, stream, random, kernel, lw, dim_z,
dim_y, dim_x, mode_type, c_val);
hipLaunchKernelGGL(( gussain_filter_z), dim3(blocks), dim3(threads), 0, stream, random, kernel, lw, dim_z,
dim_y, dim_x, mode_type, c_val);
hipLaunchKernelGGL(( plus_offsets), dim3(blocks), dim3(threads), 0, stream, coords, random, coords_size, alpha);
checkCudaErrors(hipStreamSynchronize(stream));
}
else{
dim3 threads(min(coords_size, (long)512), 1, 1);
dim3 blocks(coords_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( scale_random), dim3(blocks), dim3(threads), 0, stream, random, coords_size);
hipLaunchKernelGGL(( gussain_filter_x), dim3(blocks), dim3(threads), 0, stream, random, kernel, lw, 1,
dim_y, dim_x, mode_type, c_val);
hipLaunchKernelGGL(( gussain_filter_y), dim3(blocks), dim3(threads), 0, stream, random, kernel, lw, 1,
dim_y, dim_x, mode_type, c_val);
hipLaunchKernelGGL(( plus_offsets), dim3(blocks), dim3(threads), 0, stream, coords, random, coords_size, alpha);
checkCudaErrors(hipStreamSynchronize(stream));
}
}
void Handle::translate(float seg_x, float seg_y, float seg_z){
if(is_3D){
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( translate_3D), dim3(blocks), dim3(threads), 0, stream, coords, dim_z, dim_y, dim_x,
seg_z, seg_y, seg_x);
checkCudaErrors(hipStreamSynchronize(stream));
}
else{
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( translate_2D), dim3(blocks), dim3(threads), 0, stream, coords, dim_y, dim_x, seg_y, seg_x);
checkCudaErrors(hipStreamSynchronize(stream));
}
}
void Handle::copy_input(float* input){
memcpy(pin_img, input, total_size * sizeof(float));
checkCudaErrors(hipMemcpyAsync(img, pin_img, total_size * sizeof(float),
hipMemcpyHostToDevice, stream));
}
void Handle::do_nothing(){
only_copy(output, img, total_size);
}
void Handle::copy_output(float* ret){
checkCudaErrors(hipMemcpyAsync(pin_output, output, total_size * sizeof(float),
hipMemcpyDeviceToHost, stream));
checkCudaErrors(hipStreamSynchronize(stream));
memcpy(ret, pin_output, total_size * sizeof(float));
}
void Handle::check_coords(float* output){
checkCudaErrors(hipMemcpyAsync(pin_coords, coords, coords_size * sizeof(float),
hipMemcpyDeviceToHost, stream));
checkCudaErrors(hipStreamSynchronize(stream));
memcpy(output, pin_coords, coords_size * sizeof(float));
}
void Handle::interpolate(int order){
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
if(is_3D){
hipLaunchKernelGGL(( interplate_3D), dim3(blocks), dim3(threads), 0, stream, coords, img, output, order,
dim_z, dim_y, dim_x, mode_type, c_val);
}
else{
hipLaunchKernelGGL(( interplate_2D), dim3(blocks), dim3(threads), 0, stream, coords, img, output, order,
dim_y, dim_x, mode_type, c_val);
}
}
void Handle::reset(){
if(is_3D){
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( set_coords_3D), dim3(blocks), dim3(threads), 0, stream, coords, dim_z, dim_y, dim_x);
checkCudaErrors(hipStreamSynchronize(stream));
}
else{
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( set_coords_2D), dim3(blocks), dim3(threads), 0, stream, coords, dim_y, dim_x);
checkCudaErrors(hipStreamSynchronize(stream));
}
}
void Handle::recenter(){
if(is_3D){
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( recenter_3D), dim3(blocks), dim3(threads), 0, stream, coords, dim_z, dim_y, dim_x);
checkCudaErrors(hipStreamSynchronize(stream));
}
else{
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
hipLaunchKernelGGL(( recenter_2D), dim3(blocks), dim3(threads), 0, stream, coords, dim_y, dim_x);
checkCudaErrors(hipStreamSynchronize(stream));
}
}
| c670748125c97eb764938a172f7e70bab9695d16.cu | #include "utils.cuh"
#include "ops_copy.cuh"
#include "spatial_deform.cuh"
#include "interpolate.cuh"
__global__ void set_coords_2D(float* coords, size_t y, size_t x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t id_x = index % x;
size_t id_y = index / x;
if(index < x * y){
coords[id_x + id_y * x] = id_y - (float)y/2.0;
coords[id_x + id_y * x + x*y] = id_x - (float)x/2.0;
}
__syncthreads();
}
__global__ void set_coords_3D(float* coords, size_t z, size_t y, size_t x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t id_x = index % x;
size_t id_y = (index / x) % y;
size_t id_z = index / (x * y);
if(index < x * y * z){
coords[index] = id_z - (float)z/2.0;
coords[index + x * y * z] = id_y - (float)y/2.0;
coords[index + 2 * x * y * z] = id_x -(float)x/2.0;
}
__syncthreads();
}
void Handle::set_2D(size_t y, size_t x){
is_3D = false;
dim_x = x;
dim_y = y;
total_size = dim_x * dim_y;
coords_size = total_size * 2;
std::cout<<"Malloc for 2D image ----------\n"
<<" dim_x : "<<dim_x
<<" dim_y : "<<dim_y
<<" total : "<<total_size<<std::endl;
std::cout<<"Malloc "<< 6 * total_size * sizeof(float)/1024/1024
<< "MB"<<std::endl;
checkCudaErrors(cudaMalloc((void **)&img,
total_size * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&output,
total_size * sizeof(float)));
checkCudaErrors(cudaMallocHost((void **)&pin_img,
total_size * sizeof(float)));
checkCudaErrors(cudaMallocHost((void **)&pin_output,
total_size * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&random,
coords_size * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&coords,
coords_size * sizeof(float)));
checkCudaErrors(cudaMallocHost((void **)&pin_coords,
coords_size * sizeof(float)));
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
set_coords_2D<<<blocks, threads, 0, stream>>>(coords, dim_y, dim_x);
checkCudaErrors(cudaStreamSynchronize(stream));
}
void Handle::set_3D(size_t z, size_t y, size_t x){
is_3D = true;
dim_x = x;
dim_y = y;
dim_z = z;
total_size = dim_x * dim_y * dim_z;
coords_size = total_size * 3;
std::cout<<"Malloc for 3D image ----------\n"
<<" dim_x : "<<dim_x
<<" dim_y : "<<dim_y
<<" dim_z : "<<dim_z
<<" total : "<<total_size<<std::endl;
std::cout<<"Malloc "<< 8 * total_size * sizeof(float)/1024/1024
<< "MB"<<std::endl;
checkCudaErrors(cudaMalloc((void **)&gpu_rot_matrix, 9 * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&img,
total_size * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&output,
total_size * sizeof(float)));
checkCudaErrors(cudaMallocHost((void **)&pin_img,
total_size * sizeof(float)));
checkCudaErrors(cudaMallocHost((void **)&pin_output,
total_size * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&random,
coords_size * sizeof(float)));
checkCudaErrors(cudaMalloc((void **)&coords,
coords_size * sizeof(float)));
checkCudaErrors(cudaMallocHost((void **)&pin_coords,
coords_size * sizeof(float)));
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
set_coords_3D<<<blocks, threads, 0, stream>>>(coords, dim_z, dim_y, dim_x);
checkCudaErrors(cudaStreamSynchronize(stream));
}
void Handle::scale(float scale){
assert(scale <= 1.0 && scale > 0.0);
dim3 threads(min(coords_size, (long)512), 1, 1);
dim3 blocks(coords_size/512 + 1, 1, 1);
device_apply_scale<<<blocks, threads, 0, stream>>>(coords, scale, coords_size);
}
void Handle::flip(int do_x, int do_y, int do_z){
if(is_3D){
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
flip_3D<<<blocks, threads, 0, stream>>>(coords, dim_z, dim_y, dim_x,
do_z, do_y, do_x);
checkCudaErrors(cudaStreamSynchronize(stream));
}
else{
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
flip_2D<<<blocks, threads, 0, stream>>>(coords, dim_y, dim_x, do_y, do_x);
checkCudaErrors(cudaStreamSynchronize(stream));
}
}
void Handle::host_rotate_2D(float angle){
float cos_angle = cos(angle);
float sin_angle = sin(angle);
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
rotate_2D<<<blocks, threads, 0, stream>>>(coords, dim_y, dim_x, cos_angle, sin_angle);
checkCudaErrors(cudaStreamSynchronize(stream));
}
void Handle::host_rotate_3D(float* rot_matrix){
checkCudaErrors(cudaMemcpyAsync(gpu_rot_matrix,
rot_matrix,
9 * sizeof(float),
cudaMemcpyHostToDevice,
stream));
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
rotate_3D<<<blocks, threads, 0, stream>>>(coords, dim_z, dim_y, dim_x, gpu_rot_matrix);
checkCudaErrors(cudaStreamSynchronize(stream));
}
void Handle::elastic(float sigma, float alpha, float truncate,
int mode_type, float c_val){
// generate random offset by coords.size
checkCudaErrors(curandGenerateUniform(gen, random, coords_size));
// make the radius of the filter equal to truncate standard deviations
int lw = int(sigma * truncate + 0.5);
float sigma2 = sigma * sigma;
// generate kernel
float total = 0;
for(int i = -lw; i < lw + 1; i++){
kernel_pin[i + lw] = exp(i * i * -0.5 / sigma2);
total += kernel_pin[i + lw];
}
for(int i = -lw; i < lw + 1; i++){
kernel_pin[i + lw] = kernel_pin[i + lw] / total;
}
// Copy kernel
checkCudaErrors(cudaMemcpyAsync(kernel,
kernel_pin,
(2 * lw + 1) * sizeof(float),
cudaMemcpyHostToDevice,
stream));
checkCudaErrors(cudaStreamSynchronize(stream));
if(is_3D){
dim3 threads(min(coords_size, (long)512), 1, 1);
dim3 blocks(coords_size/512 + 1, 1, 1);
scale_random<<<blocks, threads, 0, stream>>>(random, coords_size);
gussain_filter_x<<<blocks, threads, 0, stream>>>(random, kernel, lw, dim_z,
dim_y, dim_x, mode_type, c_val);
gussain_filter_y<<<blocks, threads, 0, stream>>>(random, kernel, lw, dim_z,
dim_y, dim_x, mode_type, c_val);
gussain_filter_z<<<blocks, threads, 0, stream>>>(random, kernel, lw, dim_z,
dim_y, dim_x, mode_type, c_val);
plus_offsets<<<blocks, threads, 0, stream>>>(coords, random, coords_size, alpha);
checkCudaErrors(cudaStreamSynchronize(stream));
}
else{
dim3 threads(min(coords_size, (long)512), 1, 1);
dim3 blocks(coords_size/512 + 1, 1, 1);
scale_random<<<blocks, threads, 0, stream>>>(random, coords_size);
gussain_filter_x<<<blocks, threads, 0, stream>>>(random, kernel, lw, 1,
dim_y, dim_x, mode_type, c_val);
gussain_filter_y<<<blocks, threads, 0, stream>>>(random, kernel, lw, 1,
dim_y, dim_x, mode_type, c_val);
plus_offsets<<<blocks, threads, 0, stream>>>(coords, random, coords_size, alpha);
checkCudaErrors(cudaStreamSynchronize(stream));
}
}
void Handle::translate(float seg_x, float seg_y, float seg_z){
if(is_3D){
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
translate_3D<<<blocks, threads, 0, stream>>>(coords, dim_z, dim_y, dim_x,
seg_z, seg_y, seg_x);
checkCudaErrors(cudaStreamSynchronize(stream));
}
else{
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
translate_2D<<<blocks, threads, 0, stream>>>(coords, dim_y, dim_x, seg_y, seg_x);
checkCudaErrors(cudaStreamSynchronize(stream));
}
}
void Handle::copy_input(float* input){
memcpy(pin_img, input, total_size * sizeof(float));
checkCudaErrors(cudaMemcpyAsync(img, pin_img, total_size * sizeof(float),
cudaMemcpyHostToDevice, stream));
}
void Handle::do_nothing(){
only_copy(output, img, total_size);
}
void Handle::copy_output(float* ret){
checkCudaErrors(cudaMemcpyAsync(pin_output, output, total_size * sizeof(float),
cudaMemcpyDeviceToHost, stream));
checkCudaErrors(cudaStreamSynchronize(stream));
memcpy(ret, pin_output, total_size * sizeof(float));
}
void Handle::check_coords(float* output){
checkCudaErrors(cudaMemcpyAsync(pin_coords, coords, coords_size * sizeof(float),
cudaMemcpyDeviceToHost, stream));
checkCudaErrors(cudaStreamSynchronize(stream));
memcpy(output, pin_coords, coords_size * sizeof(float));
}
void Handle::interpolate(int order){
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
if(is_3D){
interplate_3D<<<blocks, threads, 0, stream>>>(coords, img, output, order,
dim_z, dim_y, dim_x, mode_type, c_val);
}
else{
interplate_2D<<<blocks, threads, 0, stream>>>(coords, img, output, order,
dim_y, dim_x, mode_type, c_val);
}
}
void Handle::reset(){
if(is_3D){
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
set_coords_3D<<<blocks, threads, 0, stream>>>(coords, dim_z, dim_y, dim_x);
checkCudaErrors(cudaStreamSynchronize(stream));
}
else{
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
set_coords_2D<<<blocks, threads, 0, stream>>>(coords, dim_y, dim_x);
checkCudaErrors(cudaStreamSynchronize(stream));
}
}
void Handle::recenter(){
if(is_3D){
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
recenter_3D<<<blocks, threads, 0, stream>>>(coords, dim_z, dim_y, dim_x);
checkCudaErrors(cudaStreamSynchronize(stream));
}
else{
dim3 threads(min(total_size, (long)512), 1, 1);
dim3 blocks(total_size/512 + 1, 1, 1);
recenter_2D<<<blocks, threads, 0, stream>>>(coords, dim_y, dim_x);
checkCudaErrors(cudaStreamSynchronize(stream));
}
}
|
8ca787ed7ac0cf68d6517904d46dc5def0a33944.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// see LICENSE_mxnet_permutohedral
#include "permutohedral_ops.h"
#include "cu_hash_table.h"
#include "caffe/util/device_alternate.hpp"
//#define CUDABLOCKSIZE 64
#define CUDABLOCKSIZE 256
#define PARAM_NORMALIZE_TRUE true
template <typename Dtype>
__global__ void computeSpatialCoords_1D(const int n_elements, float* output_buf) {
CUDA_KERNEL_LOOP(p, n_elements) {
output_buf[p] = static_cast<float>(p);
}
}
template <typename Dtype>
__global__ void computeSpatialCoords_2D(const int n_elements, float* output_buf,
const int width_dim1) {
CUDA_KERNEL_LOOP(p, n_elements) {
output_buf[p ] = static_cast<float>(p / width_dim1);
output_buf[p + n_elements] = static_cast<float>(p % width_dim1);
}
}
template <typename Dtype>
__global__ void computeSpatialCoords_3D(const int n_elements, float* output_buf,
const int width_dim1, const int width_dim2) {
int a;
CUDA_KERNEL_LOOP(p, n_elements) {
a = (p / width_dim2);
output_buf[p ] = static_cast<float>(a / width_dim1);
output_buf[p + n_elements] = static_cast<float>(a % width_dim1);
output_buf[p + 2*n_elements] = static_cast<float>(p % width_dim2);
}
}
namespace permutohedral {
template<int key_size>
__global__ void init(CuHashTable<key_size> table,
const int n_elements,
const float *pos1, const int n_dim_pos1, const float *pos2,
const float *scale,
Pair *matrix) {
float elevated[key_size+1];
int greedy[key_size+1];
int rank[key_size+1];
float barycentric[key_size+2];
short key[key_size];
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n_elements) return;
float sm = 0;
for (int i = key_size; i > 0; i--) {
float cf = (i <= n_dim_pos1 ? pos1[(i-1)*n_elements + idx] : pos2[(i-1-n_dim_pos1)*n_elements + idx])*scale[i-1];
elevated[i] = sm - i*cf;
sm += cf;
}
elevated[0] = sm;
// find the closest zero-colored lattice point
// greedily search for the closest zero-colored lattice point
short sum = 0;
for (int i = 0; i <= key_size; i++) {
float v = elevated[i]*(1.0f/(key_size+1));
float up = ceilf(v) * (key_size+1);
float down = floorf(v) * (key_size+1);
if (up - elevated[i] < elevated[i] - down) {
greedy[i] = static_cast<short>(up);
} else {
greedy[i] = static_cast<short>(down);
}
sum += greedy[i];
}
sum /= key_size+1;
// sort differential to find the permutation between this simplex and the canonical one
for (int i = 0; i <= key_size; i++) {
rank[i] = 0;
for (int j = 0; j <= key_size; j++) {
if (elevated[i] - greedy[i] < elevated[j] - greedy[j] ||
(elevated[i] - greedy[i] == elevated[j] - greedy[j]
&& i > j)) {
rank[i]++;
}
}
}
if (sum > 0) { // sum too large, need to bring down the ones with the smallest differential
for (int i = 0; i <= key_size; i++) {
if (rank[i] >= key_size + 1 - sum) {
greedy[i] -= key_size+1;
rank[i] += sum - (key_size+1);
} else {
rank[i] += sum;
}
}
} else if (sum < 0) { // sum too small, need to bring up the ones with largest differential
for (int i = 0; i <= key_size; i++) {
if (rank[i] < -sum) {
greedy[i] += key_size+1;
rank[i] += (key_size+1) + sum;
} else {
rank[i] += sum;
}
}
}
// turn delta into barycentric coords
for (int i = 0; i <= key_size+1; i++) {
barycentric[i] = 0;
}
for (int i = 0; i <= key_size; i++) {
float delta = (elevated[i] - greedy[i]) * (1.0f/(key_size+1));
barycentric[key_size-rank[i]] += delta;
barycentric[key_size+1-rank[i]] -= delta;
}
barycentric[0] += 1.0f + barycentric[key_size+1];
for (int color = 0; color <= key_size; color++) {
// Compute the location of the lattice point explicitly (all but
// the last coordinate - it's redundant because they sum to zero)
for (int i = 0; i < key_size; i++) {
key[i] = greedy[i] + color;
if (rank[i] > key_size-color) key[i] -= (key_size+1);
}
Pair r;
r.index = table.insert(key, idx*(key_size+1)+color);
r.weight = barycentric[color];
matrix[idx*(key_size+1) + color] = r;
}
}
template<int key_size, bool normalize>
__global__ void splat(CuHashTable<key_size> table,
const int32_t n_elements,
const int32_t val_size,
const float *data,
float *val,
const Pair *matrix) {
const int idx = threadIdx.y + blockIdx.y * blockDim.y;
if (idx >= n_elements) return;
const int color = threadIdx.x;
Pair r = matrix[idx*(key_size+1)+color];
float *dst = val + r.index*val_size;
if (!normalize) {
for (int j = 0; j < val_size; j++) {
atomicAdd(dst+j, data[j*n_elements + idx]*r.weight);
}
} else {
for (int j = 0; j < val_size-1; j++) {
atomicAdd(dst+j, data[j*n_elements + idx]*r.weight);
}
atomicAdd(dst+val_size-1, 1.f*r.weight);
}
}
template<int key_size>
__global__ static void blur(CuHashTable<key_size> table,
const int32_t val_size,
const int32_t color,
const float *val,
float *new_val,
const Pair *matrix) {
short key[key_size+1];
short np[key_size+1];
short nm[key_size+1];
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= table.n_keys_) return;
// Check if I'm valid
if (matrix[idx].index != idx) return;
// find my key and the keys of my neighbours
for (int i = 0; i < key_size; i++) {
key[i] = table.keys_[idx*key_size+i];
np[i] = key[i]+1;
nm[i] = key[i]-1;
}
np[color] -= key_size+1;
nm[color] += key_size+1;
int offNp = table.find(np);
int offNm = table.find(nm);
const float *valMe = val + val_size*idx;
const float *valNp = val + val_size*offNp;
const float *valNm = val + val_size*offNm;
float *valOut = new_val + val_size*idx;
for (int i = 0; i < val_size; i++) {
float o = valMe[i];
if (offNp >= 0) o += 0.5f*valNp[i];
if (offNm >= 0) o += 0.5f*valNm[i];
valOut[i] = o;
}
}
template<int key_size, bool normalize, bool save>
__global__ void slice(CuHashTable<key_size> table,
const int32_t n_elements,
const int32_t val_size,
const float *val,
float *out,
const Pair *matrix,
float *norm) {
const float alpha = 1.0f / (1+powf(2, -key_size-1));
int32_t index[key_size+1];
float weight[key_size+1];
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elements) return;
for (int i = 0; i <= key_size; ++i) {
Pair r = matrix[idx*(key_size+1) + i];
index[i] = r.index;
weight[i] = r.weight;
}
if (!normalize) {
for (int j = 0; j < val_size; ++j) {
float v = 0.0f;
for (int i = 0; i <= key_size; ++i) {
v += weight[i]*val[index[i]*val_size + j];
}
out[j*n_elements + idx] = v * alpha;
}
} else {
float n = 0.0f;
for (int i = 0; i <= key_size; ++i) {
n += weight[i]*val[index[i]*val_size + val_size - 1];
}
n = 1.0f/n;
for (int j = 0; j < val_size-1; ++j) {
float v = 0.0f;
for (int i = 0; i <= key_size; ++i) {
v += weight[i]*val[index[i]*val_size + j];
}
out[j*n_elements + idx] = v * n;
}
if(save)
norm[idx] = n;
}
}
template<int key_size, bool normalize>
__global__ void pos_grad_init(const int32_t n_elements, const int32_t val_size,
const float *ograd,
const float *pos1, const int n_dim_pos1, const float *pos2,
const float *data, const float *out,
const float *norm, float *buf) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elements) return;
float *f1 = buf;
float *f2 = f1 + key_size*val_size*n_elements;
float *f3 = f2 + val_size*n_elements;
float *f4 = f3 + key_size*val_size*n_elements;
float p[key_size];
for (int i = 0; i < key_size; ++i)
p[i] = (i < n_dim_pos1 ? pos1[i*n_elements + idx] : pos2[(i-n_dim_pos1)*n_elements + idx]);
float n;
if (normalize)
n = norm[idx];
float deltan = 0.f;
for (int j = 0; j < (normalize ? val_size - 1 : val_size); ++j) {
const int idx24 = j*n_elements + idx;
const float vj = data[idx24];
const float deltaj = normalize ? ograd[idx24]*n : ograd[idx24];
f2[idx24] = vj;
f4[idx24] = deltaj;
if (normalize)
deltan -= out[idx24]*deltaj;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + j)*n_elements + idx;
f1[idx13] = p[i]*vj;
f3[idx13] = p[i]*deltaj;
}
}
if (normalize) {
const int idx24 = (val_size-1)*n_elements + idx;
const float vj = 1.f;
f2[idx24] = vj;
f4[idx24] = deltan;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + val_size-1)*n_elements + idx;
f1[idx13] = p[i]*vj;
f3[idx13] = p[i]*deltan;
}
}
}
template<int key_size, bool normalize>
__global__ void pos_grad_reduce(const int32_t n_elements, const int32_t val_size,
const float *ograd,
const float *pos1, const int n_dim_pos1, const float *pos2,
const float *data, const float *out,
const float *norm, float *buf, float *pgrad) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elements) return;
float *f1 = buf;
float *f2 = f1 + key_size*val_size*n_elements;
float *f3 = f2 + val_size*n_elements;
float *f4 = f3 + key_size*val_size*n_elements;
float p[key_size];
float pg[key_size];
for (int i = 0; i < key_size; ++i) {
p[i] = (i < n_dim_pos1 ? pos1[i*n_elements + idx] : pos2[(i-n_dim_pos1)*n_elements + idx]);
pg[i] = 0;
}
float n;
if (normalize)
n = norm[idx];
float deltan = 0.f;
for (int j = 0; j < (normalize ? val_size - 1 : val_size); ++j) {
const int idx24 = j*n_elements + idx;
const float vj = data[idx24];
const float deltaj = normalize ? ograd[idx24]*n : ograd[idx24];
if (normalize)
deltan -= out[idx24]*deltaj;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + j)*n_elements + idx;
pg[i] += deltaj*f1[idx13] - deltaj*p[i]*f2[idx24]
+ vj*f3[idx13] - vj*p[i]*f4[idx24];
}
}
if (normalize) {
const int idx24 = (val_size-1)*n_elements + idx;
const float vj = 1.f;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + val_size-1)*n_elements + idx;
pg[i] += deltan*f1[idx13] - deltan*p[i]*f2[idx24]
+ vj*f3[idx13] - vj*p[i]*f4[idx24];
}
}
for (int i = 0; i < key_size; ++i) {
pgrad[i*n_elements + idx] = pg[i];
}
}
} // namespace permutohedral
//##############################################################################
//##############################################################################
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::FreeTempSpace() {
if(cudadevice_init_ >= 0) { CUDA_CHECK(hipSetDevice(cudadevice_init_)); }
if(entries_ != NULL) {
CUDA_CHECK(hipFree(static_cast<void*>(entries_)));
keys_ = NULL;
entries_ = NULL;
matrix_ = NULL;
scale_ = new_vals_ = vals_ = NULL;
}
}
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::GetTempSpace(int val_size) {
using namespace permutohedral;
CHECK(init_);
FreeTempSpace();
const int requestedsize =
n_keys_*2*sizeof(int32_t) +
n_keys_*key_size*sizeof(int16_t) +
n_elements_*spatialposdim_*sizeof(Dtype) +
n_keys_*val_size*sizeof(float) +
n_keys_*val_size*sizeof(float) +
n_keys_*sizeof(Pair) +
key_size*sizeof(float);
uint8_t* ptr;
if(cudadevice_init_ >= 0) { CUDA_CHECK(hipSetDevice(cudadevice_init_)); }
CUDA_CHECK(hipMalloc((void**)&ptr, requestedsize));
entries_ = (int32_t*)ptr;
ptr += n_keys_*2*sizeof(int32_t);
keys_ = (int16_t*)ptr;
ptr += n_keys_*key_size*sizeof(int16_t);
CHECK_EQ(spatialposdim_ > 0, create_spatial_dimension_features_);
if(create_spatial_dimension_features_) {
spatialposfeats_ = (Dtype*)ptr;
ptr += n_elements_*spatialposdim_*sizeof(Dtype);
}
vals_ = (float*)ptr;
ptr += n_keys_*val_size*sizeof(float);
new_vals_ = (float*)ptr;
ptr += n_keys_*val_size*sizeof(float);
matrix_ = (Pair*)ptr;
ptr += n_keys_*sizeof(Pair);
scale_ = (float*)ptr;
ptr += key_size*sizeof(float);
CHECK_EQ(ptr - static_cast<uint8_t*>(static_cast<void*>(entries_)), requestedsize);
}
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::do_init(hipStream_t stream,
int cudadevice,
caffe::Blob<Dtype> const* input_tosmooth,
caffe::Blob<Dtype> const* input_featswrt) {
if (init_) {
CHECK_EQ(cudadevice_init_, cudadevice);
} else {
cudadevice_init_ = cudadevice;
batch_size_ = input_tosmooth->shape(0);
data_size_ = input_tosmooth->shape(1);
//if (PARAM_NORMALIZE_TRUE) {
val_size_ = data_size_ + 1;
//} else {
// val_size_ = data_size_;
//}
n_elements_ = input_tosmooth->count()/batch_size_/data_size_;
n_keys_ = n_elements_*(key_size+1);
CHECK_EQ(n_elements_*batch_size_*data_size_, input_tosmooth->count());
CHECK_EQ(input_featswrt->count()/(input_featswrt->shape(0)*input_featswrt->shape(1)), n_elements_);
CHECK_GE(input_featswrt->shape(1), 1);
// number of spatial dimensions is num_axes() - (batchsize==dim0) - (nchannels==dim1)
if(create_spatial_dimension_features_) {
spatialposdim_ = input_featswrt->num_axes() - 2;
} else {
spatialposdim_ = 0;
}
CHECK(spatialposdim_ >= 0);
CHECK_EQ(input_featswrt->shape(1), key_size - spatialposdim_);
lblock_ = CUDABLOCKSIZE;
nblock_ = (n_elements_-1)/lblock_+1;
init_ = true;
}
}
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::scale_init_host_to_device(hipStream_t* stream,
caffe::Blob<Dtype> const* input_featswrt) {
CHECK(init_ && scale_ != NULL);
float cpu_scale[key_size];
for (int i = 0; i < key_size; i++) {
cpu_scale[i] = static_cast<float>(key_size+1) *
sqrtf(static_cast<float>(2.0/3.0) / static_cast<float>((i+1)*(i+2)))
/ (this->stdv_widths_host_[i]);
}
if(cudadevice_init_ >= 0) { CUDA_CHECK(hipSetDevice(cudadevice_init_)); }
//CUDA_CHECK(hipMemcpy((void*)scale_, (void*)cpu_scale, key_size*sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpyAsync((void*)scale_, (void*)cpu_scale, key_size*sizeof(float), hipMemcpyHostToDevice, *stream));
if(create_spatial_dimension_features_) {
switch(spatialposdim_) {
case 1:hipLaunchKernelGGL(( computeSpatialCoords_1D<Dtype>), dim3(caffe::CAFFE_GET_BLOCKS(n_elements_)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, *stream,
n_elements_, spatialposfeats_
); break;
case 2:hipLaunchKernelGGL(( computeSpatialCoords_2D<Dtype>), dim3(caffe::CAFFE_GET_BLOCKS(n_elements_)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, *stream,
n_elements_, spatialposfeats_, input_featswrt->shape(3)
); break;
case 3:hipLaunchKernelGGL(( computeSpatialCoords_3D<Dtype>), dim3(caffe::CAFFE_GET_BLOCKS(n_elements_)), dim3(caffe::CAFFE_CUDA_NUM_THREADS), 0, *stream,
n_elements_, spatialposfeats_, input_featswrt->shape(3), input_featswrt->shape(4)
); break;
default: LOG(FATAL)<<"unsupported number of spatial dimensions "<<spatialposdim_; break;
}
}
}
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::Filter(hipStream_t stream, permutohedral::CuHashTable<key_size> * table, bool normalize, int val_size,
const float *data, float *out, float *norm) {
using namespace permutohedral;
CUDA_CHECK(hipMemsetAsync(vals_, 0, n_keys_*val_size*sizeof(float), stream));
if (normalize) {
hipLaunchKernelGGL(( splat<key_size, true>), dim3(dim3(1, (n_elements_-1)/(lblock_/(key_size+1))+1, 1)), dim3(dim3(key_size+1, lblock_/(key_size+1), 1)), 0, stream,
*table, n_elements_, val_size, data, vals_, matrix_);
} else {
hipLaunchKernelGGL(( splat<key_size, false>), dim3(dim3(1, (n_elements_-1)/(lblock_/(key_size+1))+1, 1)), dim3(dim3(key_size+1, lblock_/(key_size+1), 1)), 0, stream,
*table, n_elements_, val_size, data, vals_, matrix_);
}
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(hipGetLastError(), hipSuccess);
float *pval = vals_;
float *pnew_val = new_vals_;
for (int j = 0; j <= key_size; ++j) {
hipLaunchKernelGGL(( blur<key_size>), dim3(dim3((n_keys_-1)/lblock_+1, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, stream,
*table, val_size, j, pval, pnew_val, matrix_);
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(hipGetLastError(), hipSuccess);
std::swap(pval, pnew_val);
}
if (normalize) {
if (norm == NULL) {
hipLaunchKernelGGL(( slice<key_size, true, false>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, stream,
*table, n_elements_, val_size, pval, out, matrix_, NULL);
} else {
hipLaunchKernelGGL(( slice<key_size, true, true>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, stream,
*table, n_elements_, val_size, pval, out, matrix_, norm);
}
} else {
hipLaunchKernelGGL(( slice<key_size, false, false>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, stream,
*table, n_elements_, val_size, pval, out, matrix_, NULL);
}
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(hipGetLastError(), hipSuccess);
}
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::Forward(hipStream_t* stream,
int cudadevice,
caffe::Blob<Dtype> const* input_tosmooth,
caffe::Blob<Dtype> const* input_featswrt,
caffe::Blob<Dtype> * output_bilat) {
using namespace permutohedral;
do_init(*stream, cudadevice, input_tosmooth, input_featswrt);
GetTempSpace(val_size_);
scale_init_host_to_device(stream, input_featswrt);
const Dtype* in = input_tosmooth->gpu_data();
const Dtype* pos = input_featswrt->gpu_data();
Dtype* out = output_bilat->mutable_gpu_data();
const int batchstep = (key_size - spatialposdim_) * n_elements_;
CuHashTable<key_size> table(n_keys_, entries_, keys_);
for (int i = 0; i < batch_size_; ++i) {
CUDA_CHECK(hipMemsetAsync(entries_, -1, n_keys_*2*sizeof(int32_t), *stream));
hipLaunchKernelGGL(( init<key_size>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_,1,1)), 0, *stream,
table, n_elements_, spatialposfeats_, spatialposdim_, pos + i*batchstep, scale_, matrix_);
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(hipGetLastError(), hipSuccess);
Filter(*stream, &table, PARAM_NORMALIZE_TRUE, val_size_,
in + i*data_size_*n_elements_,
out + i*data_size_*n_elements_,
NULL);//norm + i*n_elements_);
}
}
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::Backward(hipStream_t* stream,
int cudadevice,
bool require_tosmooth_grad,
bool require_featswrt_grad,
caffe::Blob<Dtype> * input_tosmooth,
caffe::Blob<Dtype> * input_featswrt,
caffe::Blob<Dtype> * output_bilat) {
using namespace permutohedral;
if(!require_tosmooth_grad && !require_featswrt_grad) return;
if(require_featswrt_grad) {
CHECK(require_tosmooth_grad) <<
"currently, if require_featswrt_grad, also must require_tosmooth_grad";
}
do_init(*stream, cudadevice, input_tosmooth, input_featswrt);
GetTempSpace(require_featswrt_grad ? (2*(key_size+1)*val_size_) : val_size_);
scale_init_host_to_device(stream, input_featswrt);
float* norm;
CUDA_CHECK(hipMalloc((void**)&norm, batch_size_*n_elements_*sizeof(float)));
const Dtype* out = output_bilat->gpu_data();
const Dtype* ograd = output_bilat->gpu_diff();
const Dtype* data = input_tosmooth->gpu_data();
Dtype* data_grad = input_tosmooth->mutable_gpu_diff();
const Dtype* pos = input_featswrt->gpu_data();
Dtype* pos_grad = input_featswrt->mutable_gpu_diff();
const int batchstep = (key_size - spatialposdim_) * n_elements_;
CuHashTable<key_size> table(n_keys_, entries_, keys_);
for (int i = 0; i < batch_size_; ++i) {
CUDA_CHECK(hipMemsetAsync(entries_, -1, n_keys_*2*sizeof(int32_t), *stream));
hipLaunchKernelGGL(( init<key_size>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_,1,1)), 0, *stream,
table, n_elements_, spatialposfeats_, spatialposdim_, pos + i*batchstep, scale_, matrix_);
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(hipGetLastError(), hipSuccess);
if (require_tosmooth_grad) {
//CHECK(req[kData] != kAddTo);
Filter(*stream, &table, PARAM_NORMALIZE_TRUE, val_size_,
ograd + i*data_size_*n_elements_,
data_grad + i*data_size_*n_elements_,
norm + i*n_elements_);
}
if (require_featswrt_grad) {
//CHECK(req[kData] != kAddTo);
hipLaunchKernelGGL(( pos_grad_init<key_size, true>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, *stream,
n_elements_, val_size_,
ograd + i*data_size_*n_elements_,
spatialposfeats_, spatialposdim_, pos + i*batchstep,
data + i*data_size_*n_elements_,
out + i*data_size_*n_elements_,
norm + i*n_elements_,
new_vals_);
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(hipGetLastError(), hipSuccess);
Filter(*stream, &table, false, 2*(key_size+1)*val_size_,
new_vals_,
key_size%2 ? new_vals_ : vals_,
NULL);
hipLaunchKernelGGL(( pos_grad_reduce<key_size, true>), dim3(dim3(nblock_, 1, 1)), dim3(dim3(lblock_, 1, 1)), 0, *stream,
n_elements_, val_size_,
ograd + i*data_size_*n_elements_,
spatialposfeats_, spatialposdim_, pos + i*batchstep,
data + i*data_size_*n_elements_,
out + i*data_size_*n_elements_,
norm + i*n_elements_,
key_size%2 ? new_vals_ : vals_,
pos_grad + i*batchstep);
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(hipGetLastError(), hipSuccess);
}
}
CUDA_CHECK(hipFree(static_cast<void*>(norm)));
}
#define RET_NEW_PERMUTO_TEMPLATE(THEKEYSIZE) case THEKEYSIZE: return new PermutohedralOp_template_GPU<Dtype,THEKEYSIZE>(stdv_widths_host, \
create_spatial_dimension_features)
template <typename Dtype>
PermutohedralOp_GPU<Dtype>* new_permutohedral_gpu_op(int keysize,
const std::vector<float> & stdv_widths_host,
bool create_spatial_dimension_features) {
switch (keysize) {
RET_NEW_PERMUTO_TEMPLATE(2);
RET_NEW_PERMUTO_TEMPLATE(3);
RET_NEW_PERMUTO_TEMPLATE(4);
RET_NEW_PERMUTO_TEMPLATE(5);
RET_NEW_PERMUTO_TEMPLATE(6);
#if 1
RET_NEW_PERMUTO_TEMPLATE(7);
RET_NEW_PERMUTO_TEMPLATE(8);
RET_NEW_PERMUTO_TEMPLATE(9);
RET_NEW_PERMUTO_TEMPLATE(10);
RET_NEW_PERMUTO_TEMPLATE(11);
RET_NEW_PERMUTO_TEMPLATE(12);
RET_NEW_PERMUTO_TEMPLATE(13);
RET_NEW_PERMUTO_TEMPLATE(14);
RET_NEW_PERMUTO_TEMPLATE(15);
RET_NEW_PERMUTO_TEMPLATE(16);
#endif
default:
LOG(FATAL) << "GPU op with dimension "<<keysize<<" not supported";
return NULL;
}
}
// Instantiate certain expected uses.
// Will cause "undefined reference" errors if you use a type not defined here.
template PermutohedralOp_GPU<float>* new_permutohedral_gpu_op(int keysize,
const std::vector<float> & stdv_widths_host,
bool create_spatial_dimension_features);
| 8ca787ed7ac0cf68d6517904d46dc5def0a33944.cu | // see LICENSE_mxnet_permutohedral
#include "permutohedral_ops.h"
#include "cu_hash_table.h"
#include "caffe/util/device_alternate.hpp"
//#define CUDABLOCKSIZE 64
#define CUDABLOCKSIZE 256
#define PARAM_NORMALIZE_TRUE true
template <typename Dtype>
__global__ void computeSpatialCoords_1D(const int n_elements, float* output_buf) {
CUDA_KERNEL_LOOP(p, n_elements) {
output_buf[p] = static_cast<float>(p);
}
}
template <typename Dtype>
__global__ void computeSpatialCoords_2D(const int n_elements, float* output_buf,
const int width_dim1) {
CUDA_KERNEL_LOOP(p, n_elements) {
output_buf[p ] = static_cast<float>(p / width_dim1);
output_buf[p + n_elements] = static_cast<float>(p % width_dim1);
}
}
template <typename Dtype>
__global__ void computeSpatialCoords_3D(const int n_elements, float* output_buf,
const int width_dim1, const int width_dim2) {
int a;
CUDA_KERNEL_LOOP(p, n_elements) {
a = (p / width_dim2);
output_buf[p ] = static_cast<float>(a / width_dim1);
output_buf[p + n_elements] = static_cast<float>(a % width_dim1);
output_buf[p + 2*n_elements] = static_cast<float>(p % width_dim2);
}
}
namespace permutohedral {
template<int key_size>
__global__ void init(CuHashTable<key_size> table,
const int n_elements,
const float *pos1, const int n_dim_pos1, const float *pos2,
const float *scale,
Pair *matrix) {
float elevated[key_size+1];
int greedy[key_size+1];
int rank[key_size+1];
float barycentric[key_size+2];
short key[key_size];
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= n_elements) return;
float sm = 0;
for (int i = key_size; i > 0; i--) {
float cf = (i <= n_dim_pos1 ? pos1[(i-1)*n_elements + idx] : pos2[(i-1-n_dim_pos1)*n_elements + idx])*scale[i-1];
elevated[i] = sm - i*cf;
sm += cf;
}
elevated[0] = sm;
// find the closest zero-colored lattice point
// greedily search for the closest zero-colored lattice point
short sum = 0;
for (int i = 0; i <= key_size; i++) {
float v = elevated[i]*(1.0f/(key_size+1));
float up = ceilf(v) * (key_size+1);
float down = floorf(v) * (key_size+1);
if (up - elevated[i] < elevated[i] - down) {
greedy[i] = static_cast<short>(up);
} else {
greedy[i] = static_cast<short>(down);
}
sum += greedy[i];
}
sum /= key_size+1;
// sort differential to find the permutation between this simplex and the canonical one
for (int i = 0; i <= key_size; i++) {
rank[i] = 0;
for (int j = 0; j <= key_size; j++) {
if (elevated[i] - greedy[i] < elevated[j] - greedy[j] ||
(elevated[i] - greedy[i] == elevated[j] - greedy[j]
&& i > j)) {
rank[i]++;
}
}
}
if (sum > 0) { // sum too large, need to bring down the ones with the smallest differential
for (int i = 0; i <= key_size; i++) {
if (rank[i] >= key_size + 1 - sum) {
greedy[i] -= key_size+1;
rank[i] += sum - (key_size+1);
} else {
rank[i] += sum;
}
}
} else if (sum < 0) { // sum too small, need to bring up the ones with largest differential
for (int i = 0; i <= key_size; i++) {
if (rank[i] < -sum) {
greedy[i] += key_size+1;
rank[i] += (key_size+1) + sum;
} else {
rank[i] += sum;
}
}
}
// turn delta into barycentric coords
for (int i = 0; i <= key_size+1; i++) {
barycentric[i] = 0;
}
for (int i = 0; i <= key_size; i++) {
float delta = (elevated[i] - greedy[i]) * (1.0f/(key_size+1));
barycentric[key_size-rank[i]] += delta;
barycentric[key_size+1-rank[i]] -= delta;
}
barycentric[0] += 1.0f + barycentric[key_size+1];
for (int color = 0; color <= key_size; color++) {
// Compute the location of the lattice point explicitly (all but
// the last coordinate - it's redundant because they sum to zero)
for (int i = 0; i < key_size; i++) {
key[i] = greedy[i] + color;
if (rank[i] > key_size-color) key[i] -= (key_size+1);
}
Pair r;
r.index = table.insert(key, idx*(key_size+1)+color);
r.weight = barycentric[color];
matrix[idx*(key_size+1) + color] = r;
}
}
template<int key_size, bool normalize>
__global__ void splat(CuHashTable<key_size> table,
const int32_t n_elements,
const int32_t val_size,
const float *data,
float *val,
const Pair *matrix) {
const int idx = threadIdx.y + blockIdx.y * blockDim.y;
if (idx >= n_elements) return;
const int color = threadIdx.x;
Pair r = matrix[idx*(key_size+1)+color];
float *dst = val + r.index*val_size;
if (!normalize) {
for (int j = 0; j < val_size; j++) {
atomicAdd(dst+j, data[j*n_elements + idx]*r.weight);
}
} else {
for (int j = 0; j < val_size-1; j++) {
atomicAdd(dst+j, data[j*n_elements + idx]*r.weight);
}
atomicAdd(dst+val_size-1, 1.f*r.weight);
}
}
template<int key_size>
__global__ static void blur(CuHashTable<key_size> table,
const int32_t val_size,
const int32_t color,
const float *val,
float *new_val,
const Pair *matrix) {
short key[key_size+1];
short np[key_size+1];
short nm[key_size+1];
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= table.n_keys_) return;
// Check if I'm valid
if (matrix[idx].index != idx) return;
// find my key and the keys of my neighbours
for (int i = 0; i < key_size; i++) {
key[i] = table.keys_[idx*key_size+i];
np[i] = key[i]+1;
nm[i] = key[i]-1;
}
np[color] -= key_size+1;
nm[color] += key_size+1;
int offNp = table.find(np);
int offNm = table.find(nm);
const float *valMe = val + val_size*idx;
const float *valNp = val + val_size*offNp;
const float *valNm = val + val_size*offNm;
float *valOut = new_val + val_size*idx;
for (int i = 0; i < val_size; i++) {
float o = valMe[i];
if (offNp >= 0) o += 0.5f*valNp[i];
if (offNm >= 0) o += 0.5f*valNm[i];
valOut[i] = o;
}
}
template<int key_size, bool normalize, bool save>
__global__ void slice(CuHashTable<key_size> table,
const int32_t n_elements,
const int32_t val_size,
const float *val,
float *out,
const Pair *matrix,
float *norm) {
const float alpha = 1.0f / (1+powf(2, -key_size-1));
int32_t index[key_size+1];
float weight[key_size+1];
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elements) return;
for (int i = 0; i <= key_size; ++i) {
Pair r = matrix[idx*(key_size+1) + i];
index[i] = r.index;
weight[i] = r.weight;
}
if (!normalize) {
for (int j = 0; j < val_size; ++j) {
float v = 0.0f;
for (int i = 0; i <= key_size; ++i) {
v += weight[i]*val[index[i]*val_size + j];
}
out[j*n_elements + idx] = v * alpha;
}
} else {
float n = 0.0f;
for (int i = 0; i <= key_size; ++i) {
n += weight[i]*val[index[i]*val_size + val_size - 1];
}
n = 1.0f/n;
for (int j = 0; j < val_size-1; ++j) {
float v = 0.0f;
for (int i = 0; i <= key_size; ++i) {
v += weight[i]*val[index[i]*val_size + j];
}
out[j*n_elements + idx] = v * n;
}
if(save)
norm[idx] = n;
}
}
template<int key_size, bool normalize>
__global__ void pos_grad_init(const int32_t n_elements, const int32_t val_size,
const float *ograd,
const float *pos1, const int n_dim_pos1, const float *pos2,
const float *data, const float *out,
const float *norm, float *buf) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elements) return;
float *f1 = buf;
float *f2 = f1 + key_size*val_size*n_elements;
float *f3 = f2 + val_size*n_elements;
float *f4 = f3 + key_size*val_size*n_elements;
float p[key_size];
for (int i = 0; i < key_size; ++i)
p[i] = (i < n_dim_pos1 ? pos1[i*n_elements + idx] : pos2[(i-n_dim_pos1)*n_elements + idx]);
float n;
if (normalize)
n = norm[idx];
float deltan = 0.f;
for (int j = 0; j < (normalize ? val_size - 1 : val_size); ++j) {
const int idx24 = j*n_elements + idx;
const float vj = data[idx24];
const float deltaj = normalize ? ograd[idx24]*n : ograd[idx24];
f2[idx24] = vj;
f4[idx24] = deltaj;
if (normalize)
deltan -= out[idx24]*deltaj;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + j)*n_elements + idx;
f1[idx13] = p[i]*vj;
f3[idx13] = p[i]*deltaj;
}
}
if (normalize) {
const int idx24 = (val_size-1)*n_elements + idx;
const float vj = 1.f;
f2[idx24] = vj;
f4[idx24] = deltan;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + val_size-1)*n_elements + idx;
f1[idx13] = p[i]*vj;
f3[idx13] = p[i]*deltan;
}
}
}
template<int key_size, bool normalize>
__global__ void pos_grad_reduce(const int32_t n_elements, const int32_t val_size,
const float *ograd,
const float *pos1, const int n_dim_pos1, const float *pos2,
const float *data, const float *out,
const float *norm, float *buf, float *pgrad) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elements) return;
float *f1 = buf;
float *f2 = f1 + key_size*val_size*n_elements;
float *f3 = f2 + val_size*n_elements;
float *f4 = f3 + key_size*val_size*n_elements;
float p[key_size];
float pg[key_size];
for (int i = 0; i < key_size; ++i) {
p[i] = (i < n_dim_pos1 ? pos1[i*n_elements + idx] : pos2[(i-n_dim_pos1)*n_elements + idx]);
pg[i] = 0;
}
float n;
if (normalize)
n = norm[idx];
float deltan = 0.f;
for (int j = 0; j < (normalize ? val_size - 1 : val_size); ++j) {
const int idx24 = j*n_elements + idx;
const float vj = data[idx24];
const float deltaj = normalize ? ograd[idx24]*n : ograd[idx24];
if (normalize)
deltan -= out[idx24]*deltaj;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + j)*n_elements + idx;
pg[i] += deltaj*f1[idx13] - deltaj*p[i]*f2[idx24]
+ vj*f3[idx13] - vj*p[i]*f4[idx24];
}
}
if (normalize) {
const int idx24 = (val_size-1)*n_elements + idx;
const float vj = 1.f;
for (int i = 0; i < key_size; ++i) {
const int idx13 = (i*val_size + val_size-1)*n_elements + idx;
pg[i] += deltan*f1[idx13] - deltan*p[i]*f2[idx24]
+ vj*f3[idx13] - vj*p[i]*f4[idx24];
}
}
for (int i = 0; i < key_size; ++i) {
pgrad[i*n_elements + idx] = pg[i];
}
}
} // namespace permutohedral
//##############################################################################
//##############################################################################
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::FreeTempSpace() {
if(cudadevice_init_ >= 0) { CUDA_CHECK(cudaSetDevice(cudadevice_init_)); }
if(entries_ != NULL) {
CUDA_CHECK(cudaFree(static_cast<void*>(entries_)));
keys_ = NULL;
entries_ = NULL;
matrix_ = NULL;
scale_ = new_vals_ = vals_ = NULL;
}
}
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::GetTempSpace(int val_size) {
using namespace permutohedral;
CHECK(init_);
FreeTempSpace();
const int requestedsize =
n_keys_*2*sizeof(int32_t) +
n_keys_*key_size*sizeof(int16_t) +
n_elements_*spatialposdim_*sizeof(Dtype) +
n_keys_*val_size*sizeof(float) +
n_keys_*val_size*sizeof(float) +
n_keys_*sizeof(Pair) +
key_size*sizeof(float);
uint8_t* ptr;
if(cudadevice_init_ >= 0) { CUDA_CHECK(cudaSetDevice(cudadevice_init_)); }
CUDA_CHECK(cudaMalloc((void**)&ptr, requestedsize));
entries_ = (int32_t*)ptr;
ptr += n_keys_*2*sizeof(int32_t);
keys_ = (int16_t*)ptr;
ptr += n_keys_*key_size*sizeof(int16_t);
CHECK_EQ(spatialposdim_ > 0, create_spatial_dimension_features_);
if(create_spatial_dimension_features_) {
spatialposfeats_ = (Dtype*)ptr;
ptr += n_elements_*spatialposdim_*sizeof(Dtype);
}
vals_ = (float*)ptr;
ptr += n_keys_*val_size*sizeof(float);
new_vals_ = (float*)ptr;
ptr += n_keys_*val_size*sizeof(float);
matrix_ = (Pair*)ptr;
ptr += n_keys_*sizeof(Pair);
scale_ = (float*)ptr;
ptr += key_size*sizeof(float);
CHECK_EQ(ptr - static_cast<uint8_t*>(static_cast<void*>(entries_)), requestedsize);
}
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::do_init(cudaStream_t stream,
int cudadevice,
caffe::Blob<Dtype> const* input_tosmooth,
caffe::Blob<Dtype> const* input_featswrt) {
if (init_) {
CHECK_EQ(cudadevice_init_, cudadevice);
} else {
cudadevice_init_ = cudadevice;
batch_size_ = input_tosmooth->shape(0);
data_size_ = input_tosmooth->shape(1);
//if (PARAM_NORMALIZE_TRUE) {
val_size_ = data_size_ + 1;
//} else {
// val_size_ = data_size_;
//}
n_elements_ = input_tosmooth->count()/batch_size_/data_size_;
n_keys_ = n_elements_*(key_size+1);
CHECK_EQ(n_elements_*batch_size_*data_size_, input_tosmooth->count());
CHECK_EQ(input_featswrt->count()/(input_featswrt->shape(0)*input_featswrt->shape(1)), n_elements_);
CHECK_GE(input_featswrt->shape(1), 1);
// number of spatial dimensions is num_axes() - (batchsize==dim0) - (nchannels==dim1)
if(create_spatial_dimension_features_) {
spatialposdim_ = input_featswrt->num_axes() - 2;
} else {
spatialposdim_ = 0;
}
CHECK(spatialposdim_ >= 0);
CHECK_EQ(input_featswrt->shape(1), key_size - spatialposdim_);
lblock_ = CUDABLOCKSIZE;
nblock_ = (n_elements_-1)/lblock_+1;
init_ = true;
}
}
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::scale_init_host_to_device(cudaStream_t* stream,
caffe::Blob<Dtype> const* input_featswrt) {
CHECK(init_ && scale_ != NULL);
float cpu_scale[key_size];
for (int i = 0; i < key_size; i++) {
cpu_scale[i] = static_cast<float>(key_size+1) *
sqrtf(static_cast<float>(2.0/3.0) / static_cast<float>((i+1)*(i+2)))
/ (this->stdv_widths_host_[i]);
}
if(cudadevice_init_ >= 0) { CUDA_CHECK(cudaSetDevice(cudadevice_init_)); }
//CUDA_CHECK(cudaMemcpy((void*)scale_, (void*)cpu_scale, key_size*sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpyAsync((void*)scale_, (void*)cpu_scale, key_size*sizeof(float), cudaMemcpyHostToDevice, *stream));
if(create_spatial_dimension_features_) {
switch(spatialposdim_) {
case 1: computeSpatialCoords_1D<Dtype><<<caffe::CAFFE_GET_BLOCKS(n_elements_), caffe::CAFFE_CUDA_NUM_THREADS, 0, *stream>>>(
n_elements_, spatialposfeats_
); break;
case 2: computeSpatialCoords_2D<Dtype><<<caffe::CAFFE_GET_BLOCKS(n_elements_), caffe::CAFFE_CUDA_NUM_THREADS, 0, *stream>>>(
n_elements_, spatialposfeats_, input_featswrt->shape(3)
); break;
case 3: computeSpatialCoords_3D<Dtype><<<caffe::CAFFE_GET_BLOCKS(n_elements_), caffe::CAFFE_CUDA_NUM_THREADS, 0, *stream>>>(
n_elements_, spatialposfeats_, input_featswrt->shape(3), input_featswrt->shape(4)
); break;
default: LOG(FATAL)<<"unsupported number of spatial dimensions "<<spatialposdim_; break;
}
}
}
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::Filter(cudaStream_t stream, permutohedral::CuHashTable<key_size> * table, bool normalize, int val_size,
const float *data, float *out, float *norm) {
using namespace permutohedral;
CUDA_CHECK(cudaMemsetAsync(vals_, 0, n_keys_*val_size*sizeof(float), stream));
if (normalize) {
splat<key_size, true><<<dim3(1, (n_elements_-1)/(lblock_/(key_size+1))+1, 1), dim3(key_size+1, lblock_/(key_size+1), 1), 0, stream>>>(
*table, n_elements_, val_size, data, vals_, matrix_);
} else {
splat<key_size, false><<<dim3(1, (n_elements_-1)/(lblock_/(key_size+1))+1, 1), dim3(key_size+1, lblock_/(key_size+1), 1), 0, stream>>>(
*table, n_elements_, val_size, data, vals_, matrix_);
}
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(cudaGetLastError(), cudaSuccess);
float *pval = vals_;
float *pnew_val = new_vals_;
for (int j = 0; j <= key_size; ++j) {
blur<key_size><<<dim3((n_keys_-1)/lblock_+1, 1, 1), dim3(lblock_, 1, 1), 0, stream>>>(
*table, val_size, j, pval, pnew_val, matrix_);
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(cudaGetLastError(), cudaSuccess);
std::swap(pval, pnew_val);
}
if (normalize) {
if (norm == NULL) {
slice<key_size, true, false><<<dim3(nblock_, 1, 1), dim3(lblock_, 1, 1), 0, stream>>>(
*table, n_elements_, val_size, pval, out, matrix_, NULL);
} else {
slice<key_size, true, true><<<dim3(nblock_, 1, 1), dim3(lblock_, 1, 1), 0, stream>>>(
*table, n_elements_, val_size, pval, out, matrix_, norm);
}
} else {
slice<key_size, false, false><<<dim3(nblock_, 1, 1), dim3(lblock_, 1, 1), 0, stream>>>(
*table, n_elements_, val_size, pval, out, matrix_, NULL);
}
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(cudaGetLastError(), cudaSuccess);
}
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::Forward(cudaStream_t* stream,
int cudadevice,
caffe::Blob<Dtype> const* input_tosmooth,
caffe::Blob<Dtype> const* input_featswrt,
caffe::Blob<Dtype> * output_bilat) {
using namespace permutohedral;
do_init(*stream, cudadevice, input_tosmooth, input_featswrt);
GetTempSpace(val_size_);
scale_init_host_to_device(stream, input_featswrt);
const Dtype* in = input_tosmooth->gpu_data();
const Dtype* pos = input_featswrt->gpu_data();
Dtype* out = output_bilat->mutable_gpu_data();
const int batchstep = (key_size - spatialposdim_) * n_elements_;
CuHashTable<key_size> table(n_keys_, entries_, keys_);
for (int i = 0; i < batch_size_; ++i) {
CUDA_CHECK(cudaMemsetAsync(entries_, -1, n_keys_*2*sizeof(int32_t), *stream));
init<key_size><<<dim3(nblock_, 1, 1), dim3(lblock_,1,1), 0, *stream>>>(
table, n_elements_, spatialposfeats_, spatialposdim_, pos + i*batchstep, scale_, matrix_);
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(cudaGetLastError(), cudaSuccess);
Filter(*stream, &table, PARAM_NORMALIZE_TRUE, val_size_,
in + i*data_size_*n_elements_,
out + i*data_size_*n_elements_,
NULL);//norm + i*n_elements_);
}
}
template<typename Dtype, int key_size>
void PermutohedralOp_template_GPU<Dtype,key_size>::Backward(cudaStream_t* stream,
int cudadevice,
bool require_tosmooth_grad,
bool require_featswrt_grad,
caffe::Blob<Dtype> * input_tosmooth,
caffe::Blob<Dtype> * input_featswrt,
caffe::Blob<Dtype> * output_bilat) {
using namespace permutohedral;
if(!require_tosmooth_grad && !require_featswrt_grad) return;
if(require_featswrt_grad) {
CHECK(require_tosmooth_grad) <<
"currently, if require_featswrt_grad, also must require_tosmooth_grad";
}
do_init(*stream, cudadevice, input_tosmooth, input_featswrt);
GetTempSpace(require_featswrt_grad ? (2*(key_size+1)*val_size_) : val_size_);
scale_init_host_to_device(stream, input_featswrt);
float* norm;
CUDA_CHECK(cudaMalloc((void**)&norm, batch_size_*n_elements_*sizeof(float)));
const Dtype* out = output_bilat->gpu_data();
const Dtype* ograd = output_bilat->gpu_diff();
const Dtype* data = input_tosmooth->gpu_data();
Dtype* data_grad = input_tosmooth->mutable_gpu_diff();
const Dtype* pos = input_featswrt->gpu_data();
Dtype* pos_grad = input_featswrt->mutable_gpu_diff();
const int batchstep = (key_size - spatialposdim_) * n_elements_;
CuHashTable<key_size> table(n_keys_, entries_, keys_);
for (int i = 0; i < batch_size_; ++i) {
CUDA_CHECK(cudaMemsetAsync(entries_, -1, n_keys_*2*sizeof(int32_t), *stream));
init<key_size><<<dim3(nblock_, 1, 1), dim3(lblock_,1,1), 0, *stream>>>(
table, n_elements_, spatialposfeats_, spatialposdim_, pos + i*batchstep, scale_, matrix_);
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(cudaGetLastError(), cudaSuccess);
if (require_tosmooth_grad) {
//CHECK(req[kData] != kAddTo);
Filter(*stream, &table, PARAM_NORMALIZE_TRUE, val_size_,
ograd + i*data_size_*n_elements_,
data_grad + i*data_size_*n_elements_,
norm + i*n_elements_);
}
if (require_featswrt_grad) {
//CHECK(req[kData] != kAddTo);
pos_grad_init<key_size, true><<<dim3(nblock_, 1, 1), dim3(lblock_, 1, 1), 0, *stream>>>(
n_elements_, val_size_,
ograd + i*data_size_*n_elements_,
spatialposfeats_, spatialposdim_, pos + i*batchstep,
data + i*data_size_*n_elements_,
out + i*data_size_*n_elements_,
norm + i*n_elements_,
new_vals_);
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(cudaGetLastError(), cudaSuccess);
Filter(*stream, &table, false, 2*(key_size+1)*val_size_,
new_vals_,
key_size%2 ? new_vals_ : vals_,
NULL);
pos_grad_reduce<key_size, true><<<dim3(nblock_, 1, 1), dim3(lblock_, 1, 1), 0, *stream>>>(
n_elements_, val_size_,
ograd + i*data_size_*n_elements_,
spatialposfeats_, spatialposdim_, pos + i*batchstep,
data + i*data_size_*n_elements_,
out + i*data_size_*n_elements_,
norm + i*n_elements_,
key_size%2 ? new_vals_ : vals_,
pos_grad + i*batchstep);
CUDA_POST_KERNEL_CHECK;
CHECK_EQ(cudaGetLastError(), cudaSuccess);
}
}
CUDA_CHECK(cudaFree(static_cast<void*>(norm)));
}
#define RET_NEW_PERMUTO_TEMPLATE(THEKEYSIZE) case THEKEYSIZE: return new PermutohedralOp_template_GPU<Dtype,THEKEYSIZE>(stdv_widths_host, \
create_spatial_dimension_features)
template <typename Dtype>
PermutohedralOp_GPU<Dtype>* new_permutohedral_gpu_op(int keysize,
const std::vector<float> & stdv_widths_host,
bool create_spatial_dimension_features) {
switch (keysize) {
RET_NEW_PERMUTO_TEMPLATE(2);
RET_NEW_PERMUTO_TEMPLATE(3);
RET_NEW_PERMUTO_TEMPLATE(4);
RET_NEW_PERMUTO_TEMPLATE(5);
RET_NEW_PERMUTO_TEMPLATE(6);
#if 1
RET_NEW_PERMUTO_TEMPLATE(7);
RET_NEW_PERMUTO_TEMPLATE(8);
RET_NEW_PERMUTO_TEMPLATE(9);
RET_NEW_PERMUTO_TEMPLATE(10);
RET_NEW_PERMUTO_TEMPLATE(11);
RET_NEW_PERMUTO_TEMPLATE(12);
RET_NEW_PERMUTO_TEMPLATE(13);
RET_NEW_PERMUTO_TEMPLATE(14);
RET_NEW_PERMUTO_TEMPLATE(15);
RET_NEW_PERMUTO_TEMPLATE(16);
#endif
default:
LOG(FATAL) << "GPU op with dimension "<<keysize<<" not supported";
return NULL;
}
}
// Instantiate certain expected uses.
// Will cause "undefined reference" errors if you use a type not defined here.
template PermutohedralOp_GPU<float>* new_permutohedral_gpu_op(int keysize,
const std::vector<float> & stdv_widths_host,
bool create_spatial_dimension_features);
|
6350e66c830a577d09ead42dcfb6464de245a277.hip | // !!! This is a file automatically generated by hipify!!!
//
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <hip/hip_runtime.h>
#include "optixRaycastingKernels.h"
#include <sutil/vec_math.h>
inline int idivCeil( int x, int y )
{
return ( x + y - 1 ) / y;
}
__global__ void createRaysOrthoKernel( Ray* rays, int width, int height, float x0, float y0, float z, float dx, float dy )
{
const int rayx = threadIdx.x + blockIdx.x * blockDim.x;
const int rayy = threadIdx.y + blockIdx.y * blockDim.y;
if( rayx >= width || rayy >= height )
return;
const int idx = rayx + rayy * width;
rays[idx].origin = make_float3( x0 + rayx * dx, y0 + rayy * dy, z );
rays[idx].tmin = 0.0f;
rays[idx].dir = make_float3( 0, 0, 1 );
rays[idx].tmax = 1e34f;
}
// Note: uses left handed coordinate system
void createRaysOrthoOnDevice( Ray* rays_device, int width, int height, float3 bbmin, float3 bbmax, float padding )
{
const float3 bbspan = bbmax - bbmin;
float dx = bbspan.x * ( 1 + 2 * padding ) / width;
float dy = bbspan.y * ( 1 + 2 * padding ) / height;
float x0 = bbmin.x - bbspan.x * padding + dx / 2;
float y0 = bbmin.y - bbspan.y * padding + dy / 2;
float z = bbmin.z - fmaxf( bbspan.z, 1.0f ) * .001f;
dim3 blockSize( 32, 16 );
dim3 gridSize( idivCeil( width, blockSize.x ), idivCeil( height, blockSize.y ) );
hipLaunchKernelGGL(( createRaysOrthoKernel), dim3(gridSize), dim3(blockSize), 0, 0, rays_device, width, height, x0, y0, z, dx, dy );
}
__global__ void translateRaysKernel( Ray* rays, int count, float3 offset )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if( idx >= count )
return;
rays[idx].origin = rays[idx].origin + offset;
}
void translateRaysOnDevice( Ray* rays_device, int count, float3 offset )
{
const int blockSize = 512;
const int blockCount = idivCeil( count, blockSize );
hipLaunchKernelGGL(( translateRaysKernel), dim3(blockCount), dim3(blockSize), 0, 0, rays_device, count, offset );
}
__global__ void shadeHitsKernel( float3* image, int count, const Hit* hits )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if( idx >= count )
return;
const float3 backgroundColor = make_float3( 0.2f, 0.2f, 0.2f );
if( hits[idx].t < 0.0f )
{
image[idx] = backgroundColor;
}
else
{
image[idx] = 0.5f * hits[idx].geom_normal + make_float3( 0.5f, 0.5f, 0.5f );
}
}
void shadeHitsOnDevice( float3* image_device, int count, const Hit* hits_device )
{
const int blockSize = 512;
const int blockCount = idivCeil( count, blockSize );
hipLaunchKernelGGL(( shadeHitsKernel), dim3(blockCount), dim3(blockSize), 0, 0, image_device, count, hits_device );
}
| 6350e66c830a577d09ead42dcfb6464de245a277.cu | //
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of NVIDIA CORPORATION nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <cuda_runtime.h>
#include "optixRaycastingKernels.h"
#include <sutil/vec_math.h>
inline int idivCeil( int x, int y )
{
return ( x + y - 1 ) / y;
}
__global__ void createRaysOrthoKernel( Ray* rays, int width, int height, float x0, float y0, float z, float dx, float dy )
{
const int rayx = threadIdx.x + blockIdx.x * blockDim.x;
const int rayy = threadIdx.y + blockIdx.y * blockDim.y;
if( rayx >= width || rayy >= height )
return;
const int idx = rayx + rayy * width;
rays[idx].origin = make_float3( x0 + rayx * dx, y0 + rayy * dy, z );
rays[idx].tmin = 0.0f;
rays[idx].dir = make_float3( 0, 0, 1 );
rays[idx].tmax = 1e34f;
}
// Note: uses left handed coordinate system
void createRaysOrthoOnDevice( Ray* rays_device, int width, int height, float3 bbmin, float3 bbmax, float padding )
{
const float3 bbspan = bbmax - bbmin;
float dx = bbspan.x * ( 1 + 2 * padding ) / width;
float dy = bbspan.y * ( 1 + 2 * padding ) / height;
float x0 = bbmin.x - bbspan.x * padding + dx / 2;
float y0 = bbmin.y - bbspan.y * padding + dy / 2;
float z = bbmin.z - fmaxf( bbspan.z, 1.0f ) * .001f;
dim3 blockSize( 32, 16 );
dim3 gridSize( idivCeil( width, blockSize.x ), idivCeil( height, blockSize.y ) );
createRaysOrthoKernel<<<gridSize, blockSize>>>( rays_device, width, height, x0, y0, z, dx, dy );
}
__global__ void translateRaysKernel( Ray* rays, int count, float3 offset )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if( idx >= count )
return;
rays[idx].origin = rays[idx].origin + offset;
}
void translateRaysOnDevice( Ray* rays_device, int count, float3 offset )
{
const int blockSize = 512;
const int blockCount = idivCeil( count, blockSize );
translateRaysKernel<<<blockCount, blockSize>>>( rays_device, count, offset );
}
__global__ void shadeHitsKernel( float3* image, int count, const Hit* hits )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if( idx >= count )
return;
const float3 backgroundColor = make_float3( 0.2f, 0.2f, 0.2f );
if( hits[idx].t < 0.0f )
{
image[idx] = backgroundColor;
}
else
{
image[idx] = 0.5f * hits[idx].geom_normal + make_float3( 0.5f, 0.5f, 0.5f );
}
}
void shadeHitsOnDevice( float3* image_device, int count, const Hit* hits_device )
{
const int blockSize = 512;
const int blockCount = idivCeil( count, blockSize );
shadeHitsKernel<<<blockCount, blockSize>>>( image_device, count, hits_device );
}
|
6d275c46279e1feefcf9a15fb2aec3f27d5d835e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
/*! \file NeighborListGPUBinned.cu
\brief Defines GPU kernel code for neighbor list processing on the GPU
*/
#include "NeighborListGPUBinned.cuh"
#include "NeighborListGPU_hip.cuh"
#include <stdio.h>
/*! \param d_result Device pointer to a single uint. Will be set to 1 if an update is needed
\param d_last_pos Particle positions at the time the nlist was last updated
\param d_pos Current particle positions
\param N Number of particles
\param box Box dimensions
\param maxshiftsq The maximum drsq a particle can have before an update is needed
\param lambda Diagonal deformation tensor (for orthorhombic boundaries)
\param checkn
gpu_nlist_needs_update_check_new_kernel() executes one thread per particle. Every particle's current position is
compared to its last position. If the particle has moved a distance more than sqrt(\a maxshiftsq), then *d_result
is set to \a ncheck.
*/
__global__ void gpu_nlist_needs_update_check_new_kernel(unsigned int *d_result,
const Scalar4 *d_last_pos,
const Scalar4 *d_pos,
const unsigned int N,
const BoxDim box,
const Scalar maxshiftsq,
const Scalar3 lambda,
const unsigned int checkn)
{
// each thread will compare vs it's old position to see if the list needs updating
// if that is true, write a 1 to nlist_needs_updating
// it is possible that writes will collide, but at least one will succeed and that is all that matters
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
Scalar4 cur_postype = d_pos[idx];
Scalar3 cur_pos = make_scalar3(cur_postype.x, cur_postype.y, cur_postype.z);
Scalar4 last_postype = d_last_pos[idx];
Scalar3 last_pos = make_scalar3(last_postype.x, last_postype.y, last_postype.z);
Scalar3 dx = cur_pos - lambda*last_pos;
dx = box.minImage(dx);
if (dot(dx, dx) >= maxshiftsq)
atomicMax(d_result, checkn);
}
}
hipError_t gpu_nlist_needs_update_check_new(unsigned int *d_result,
const Scalar4 *d_last_pos,
const Scalar4 *d_pos,
const unsigned int N,
const BoxDim& box,
const Scalar maxshiftsq,
const Scalar3 lambda,
const unsigned int checkn)
{
unsigned int block_size = 128;
int n_blocks = N/block_size+1;
hipLaunchKernelGGL(( gpu_nlist_needs_update_check_new_kernel), dim3(n_blocks), dim3(block_size), 0, 0, d_result,
d_last_pos,
d_pos,
N,
box,
maxshiftsq,
lambda,
checkn);
return hipSuccess;
}
//! Number of elements of the exclusion list to process in each batch
const unsigned int FILTER_BATCH_SIZE = 4;
/*! \param d_n_neigh Number of neighbors for each particle (read/write)
\param d_nlist Neighbor list for each particle (read/write)
\param nli Indexer for indexing into d_nlist
\param d_n_ex Number of exclusions for each particle
\param d_ex_list List of exclusions for each particle
\param exli Indexer for indexing into d_ex_list
\param N Number of particles
\param ex_start Start filtering the nlist from exclusion number \a ex_start
gpu_nlist_filter_kernel() processes the neighbor list \a d_nlist and removes any entries that are excluded. To allow
for an arbitrary large number of exclusions, these are processed in batch sizes of FILTER_BATCH_SIZE. The kernel
must be called multiple times in order to fully remove all exclusions from the nlist.
\note The driver gpu_nlist_filter properly makes as many calls as are necessary, it only needs to be called once.
\b Implementation
One thread is run for each particle. Exclusions \a ex_start, \a ex_start + 1, ... are loaded in for that particle
(or the thread returns if there are no exlusions past that point). The thread then loops over the neighbor list,
comparing each entry to the list of exclusions. If the entry is not excluded, it is written back out. \a d_n_neigh
is updated to reflect the current number of particles in the list at the end of the kernel call.
*/
__global__ void gpu_nlist_filter_kernel(unsigned int *d_n_neigh,
unsigned int *d_nlist,
const Index2D nli,
const unsigned int *d_n_ex,
const unsigned int *d_ex_list,
const Index2D exli,
const unsigned int N,
const unsigned int ex_start)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// quit now if this thread is processing past the end of the particle list
if (idx >= N)
return;
const unsigned int n_neigh = d_n_neigh[idx];
const unsigned int n_ex = d_n_ex[idx];
unsigned int new_n_neigh = 0;
// quit now if the ex_start flag is past the end of n_ex
if (ex_start >= n_ex)
return;
// count the number of exclusions to process in this thread
const unsigned int n_ex_process = n_ex - ex_start;
// load the exclusion list into "local" memory - fully unrolled loops should dump this into registers
unsigned int l_ex_list[FILTER_BATCH_SIZE];
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_ex_idx < n_ex_process)
l_ex_list[cur_ex_idx] = d_ex_list[exli(idx, cur_ex_idx + ex_start)];
else
l_ex_list[cur_ex_idx] = 0xffffffff;
}
// loop over the list, regenerating it as we go
for (unsigned int cur_neigh_idx = 0; cur_neigh_idx < n_neigh; cur_neigh_idx++)
{
unsigned int cur_neigh = d_nlist[nli(idx, cur_neigh_idx)];
// test if excluded
bool excluded = false;
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_neigh == l_ex_list[cur_ex_idx])
excluded = true;
}
// add it back to the list if it is not excluded
if (!excluded)
{
if (new_n_neigh != cur_neigh_idx)
d_nlist[nli(idx, new_n_neigh)] = cur_neigh;
new_n_neigh++;
}
}
// update the number of neighbors
d_n_neigh[idx] = new_n_neigh;
}
hipError_t gpu_nlist_filter(unsigned int *d_n_neigh,
unsigned int *d_nlist,
const Index2D& nli,
const unsigned int *d_n_ex,
const unsigned int *d_ex_list,
const Index2D& exli,
const unsigned int N,
const unsigned int block_size)
{
// determine parameters for kernel launch
int n_blocks = N/block_size + 1;
// split the processing of the full exclusion list up into a number of batches
unsigned int n_batches = (unsigned int)ceil(double(exli.getH())/double(FILTER_BATCH_SIZE));
unsigned int ex_start = 0;
for (unsigned int batch = 0; batch < n_batches; batch++)
{
hipLaunchKernelGGL(( gpu_nlist_filter_kernel), dim3(n_blocks), dim3(block_size), 0, 0, d_n_neigh,
d_nlist,
nli,
d_n_ex,
d_ex_list,
exli,
N,
ex_start);
ex_start += FILTER_BATCH_SIZE;
}
return hipSuccess;
}
//! Compile time determined block size for the NSQ neighbor list calculation
const int NLIST_BLOCK_SIZE = 128;
//! Generate the neighbor list on the GPU in O(N^2) time
/*! \param d_nlist Neighbor list to write out
\param d_n_neigh Number of neighbors to write
\param d_last_updated_pos Particle positions will be written here
\param d_conditions Overflow condition flag
\param nli Indexer for indexing into d_nlist
\param d_pos Current particle positions
\param N number of particles
\param box Box dimensions for handling periodic boundary conditions
\param r_maxsq Precalculated value for r_max*r_max
each thread is to compute the neighborlist for a single particle i
each block will load a bunch of particles into shared mem and then each thread will compare it's particle
to each particle in shmem to see if they are a neighbor. Since all threads in the block access the same
shmem element at the same time, the value is broadcast and there are no bank conflicts
*/
__global__
void gpu_compute_nlist_nsq_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const Index2D nli,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int n_ghost,
const BoxDim box,
const Scalar r_maxsq)
{
// shared data to store all of the particles we compare against
__shared__ Scalar sdata[NLIST_BLOCK_SIZE*4];
// load in the particle
int pidx = blockIdx.x * NLIST_BLOCK_SIZE + threadIdx.x;
// store the max number of neighbors needed for this thread
unsigned int n_neigh_needed = 0;
Scalar4 pos = make_scalar4(0, 0, 0, 0);
if (pidx < N)
pos = d_pos[pidx];
Scalar px = pos.x;
Scalar py = pos.y;
Scalar pz = pos.z;
// track the number of neighbors added so far
int n_neigh = 0;
// each block is going to loop over all N particles (this assumes memory is padded to a multiple of blockDim.x)
// in blocks of blockDim.x
// include ghosts as neighbors
for (int start = 0; start < N + n_ghost; start += NLIST_BLOCK_SIZE)
{
// load data
Scalar4 neigh_pos = make_scalar4(0, 0, 0, 0);
if (start + threadIdx.x < N + n_ghost)
neigh_pos = d_pos[start + threadIdx.x];
// make sure everybody is caught up before we stomp on the memory
__syncthreads();
sdata[threadIdx.x] = neigh_pos.x;
sdata[threadIdx.x + NLIST_BLOCK_SIZE] = neigh_pos.y;
sdata[threadIdx.x + 2*NLIST_BLOCK_SIZE] = neigh_pos.z;
sdata[threadIdx.x + 3*NLIST_BLOCK_SIZE] = neigh_pos.w; //< unused, but try to get compiler to fully coalesce reads
// ensure all data is loaded
__syncthreads();
// now each thread loops over every particle in shmem, but doesn't loop past the end of the particle list (since
// the block might extend that far)
int end_offset= NLIST_BLOCK_SIZE;
end_offset = min(end_offset, N + n_ghost - start);
if (pidx < N)
{
for (int cur_offset = 0; cur_offset < end_offset; cur_offset++)
{
// calculate dr
Scalar3 dx = make_scalar3(px - sdata[cur_offset],
py - sdata[cur_offset + NLIST_BLOCK_SIZE],
pz - sdata[cur_offset + 2*NLIST_BLOCK_SIZE]);
dx = box.minImage(dx);
// we don't add if we are comparing to ourselves, and we don't add if we are above the cut
if ((dot(dx,dx) <= r_maxsq) && ((start + cur_offset) != pidx))
{
unsigned int j = start + cur_offset;
if (n_neigh < nli.getH())
d_nlist[nli(pidx, n_neigh)] = j;
else
n_neigh_needed = n_neigh+1;
n_neigh++;
}
}
}
}
// now that we are done: update the first row that lists the number of neighbors
if (pidx < N)
{
d_n_neigh[pidx] = n_neigh;
d_last_updated_pos[pidx] = d_pos[pidx];
if (n_neigh_needed > 0)
atomicMax(&d_conditions[0], n_neigh_needed);
}
}
//! GPU kernel to update the exclusions list
__global__ void gpu_update_exclusion_list_kernel(const unsigned int *tags,
const unsigned int *rtags,
const unsigned int *n_ex_tag,
const unsigned int *ex_list_tag,
const Index2D ex_list_tag_indexer,
unsigned int *n_ex_idx,
unsigned int *ex_list_idx,
const Index2D ex_list_indexer,
const unsigned int N)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
unsigned int tag = tags[idx];
unsigned int n = n_ex_tag[tag];
// copy over number of exclusions
n_ex_idx[idx] = n;
for (unsigned int offset = 0; offset < n; offset++)
{
unsigned int ex_tag = ex_list_tag[ex_list_tag_indexer(tag, offset)];
unsigned int ex_idx = rtags[ex_tag];
ex_list_idx[ex_list_indexer(idx, offset)] = ex_idx;
}
}
//! GPU function to update the exclusion list on the device
/*! \param d_tag Array of particle tags
\param d_rtag Array of reverse-lookup tag->idx
\param d_n_ex_tag List of number of exclusions per tag
\param d_ex_list_tag 2D Exclusion list per tag
\param ex_list_tag_indexer Indexer for per-tag exclusion list
\param d_n_ex_idx List of number of exclusions per idx
\param d_ex_list_idx Exclusion list per idx
\param ex_list_indexer Indexer for per-idx exclusion list
\param N number of particles
*/
hipError_t gpu_update_exclusion_list(const unsigned int *d_tag,
const unsigned int *d_rtag,
const unsigned int *d_n_ex_tag,
const unsigned int *d_ex_list_tag,
const Index2D& ex_list_tag_indexer,
unsigned int *d_n_ex_idx,
unsigned int *d_ex_list_idx,
const Index2D& ex_list_indexer,
const unsigned int N)
{
unsigned int block_size = 512;
hipLaunchKernelGGL(( gpu_update_exclusion_list_kernel), dim3(N/block_size + 1), dim3(block_size), 0, 0, d_tag,
d_rtag,
d_n_ex_tag,
d_ex_list_tag,
ex_list_tag_indexer,
d_n_ex_idx,
d_ex_list_idx,
ex_list_indexer,
N);
return hipSuccess;
}
//! Generate the neighbor list on the GPU in O(N^2) time
hipError_t gpu_compute_nlist_nsq(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const Index2D& nli,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int n_ghost,
const BoxDim& box,
const Scalar r_maxsq)
{
// setup the grid to run the kernel
int block_size = NLIST_BLOCK_SIZE;
dim3 grid( (N/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL(( gpu_compute_nlist_nsq_kernel), dim3(grid), dim3(threads) , 0, 0, d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
nli,
d_pos,
N,
n_ghost,
box,
r_maxsq);
return hipSuccess;
}
| 6d275c46279e1feefcf9a15fb2aec3f27d5d835e.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
/*! \file NeighborListGPUBinned.cu
\brief Defines GPU kernel code for neighbor list processing on the GPU
*/
#include "NeighborListGPUBinned.cuh"
#include "NeighborListGPU.cuh"
#include <stdio.h>
/*! \param d_result Device pointer to a single uint. Will be set to 1 if an update is needed
\param d_last_pos Particle positions at the time the nlist was last updated
\param d_pos Current particle positions
\param N Number of particles
\param box Box dimensions
\param maxshiftsq The maximum drsq a particle can have before an update is needed
\param lambda Diagonal deformation tensor (for orthorhombic boundaries)
\param checkn
gpu_nlist_needs_update_check_new_kernel() executes one thread per particle. Every particle's current position is
compared to its last position. If the particle has moved a distance more than sqrt(\a maxshiftsq), then *d_result
is set to \a ncheck.
*/
__global__ void gpu_nlist_needs_update_check_new_kernel(unsigned int *d_result,
const Scalar4 *d_last_pos,
const Scalar4 *d_pos,
const unsigned int N,
const BoxDim box,
const Scalar maxshiftsq,
const Scalar3 lambda,
const unsigned int checkn)
{
// each thread will compare vs it's old position to see if the list needs updating
// if that is true, write a 1 to nlist_needs_updating
// it is possible that writes will collide, but at least one will succeed and that is all that matters
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
Scalar4 cur_postype = d_pos[idx];
Scalar3 cur_pos = make_scalar3(cur_postype.x, cur_postype.y, cur_postype.z);
Scalar4 last_postype = d_last_pos[idx];
Scalar3 last_pos = make_scalar3(last_postype.x, last_postype.y, last_postype.z);
Scalar3 dx = cur_pos - lambda*last_pos;
dx = box.minImage(dx);
if (dot(dx, dx) >= maxshiftsq)
atomicMax(d_result, checkn);
}
}
cudaError_t gpu_nlist_needs_update_check_new(unsigned int *d_result,
const Scalar4 *d_last_pos,
const Scalar4 *d_pos,
const unsigned int N,
const BoxDim& box,
const Scalar maxshiftsq,
const Scalar3 lambda,
const unsigned int checkn)
{
unsigned int block_size = 128;
int n_blocks = N/block_size+1;
gpu_nlist_needs_update_check_new_kernel<<<n_blocks, block_size>>>(d_result,
d_last_pos,
d_pos,
N,
box,
maxshiftsq,
lambda,
checkn);
return cudaSuccess;
}
//! Number of elements of the exclusion list to process in each batch
const unsigned int FILTER_BATCH_SIZE = 4;
/*! \param d_n_neigh Number of neighbors for each particle (read/write)
\param d_nlist Neighbor list for each particle (read/write)
\param nli Indexer for indexing into d_nlist
\param d_n_ex Number of exclusions for each particle
\param d_ex_list List of exclusions for each particle
\param exli Indexer for indexing into d_ex_list
\param N Number of particles
\param ex_start Start filtering the nlist from exclusion number \a ex_start
gpu_nlist_filter_kernel() processes the neighbor list \a d_nlist and removes any entries that are excluded. To allow
for an arbitrary large number of exclusions, these are processed in batch sizes of FILTER_BATCH_SIZE. The kernel
must be called multiple times in order to fully remove all exclusions from the nlist.
\note The driver gpu_nlist_filter properly makes as many calls as are necessary, it only needs to be called once.
\b Implementation
One thread is run for each particle. Exclusions \a ex_start, \a ex_start + 1, ... are loaded in for that particle
(or the thread returns if there are no exlusions past that point). The thread then loops over the neighbor list,
comparing each entry to the list of exclusions. If the entry is not excluded, it is written back out. \a d_n_neigh
is updated to reflect the current number of particles in the list at the end of the kernel call.
*/
__global__ void gpu_nlist_filter_kernel(unsigned int *d_n_neigh,
unsigned int *d_nlist,
const Index2D nli,
const unsigned int *d_n_ex,
const unsigned int *d_ex_list,
const Index2D exli,
const unsigned int N,
const unsigned int ex_start)
{
// compute the particle index this thread operates on
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
// quit now if this thread is processing past the end of the particle list
if (idx >= N)
return;
const unsigned int n_neigh = d_n_neigh[idx];
const unsigned int n_ex = d_n_ex[idx];
unsigned int new_n_neigh = 0;
// quit now if the ex_start flag is past the end of n_ex
if (ex_start >= n_ex)
return;
// count the number of exclusions to process in this thread
const unsigned int n_ex_process = n_ex - ex_start;
// load the exclusion list into "local" memory - fully unrolled loops should dump this into registers
unsigned int l_ex_list[FILTER_BATCH_SIZE];
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_ex_idx < n_ex_process)
l_ex_list[cur_ex_idx] = d_ex_list[exli(idx, cur_ex_idx + ex_start)];
else
l_ex_list[cur_ex_idx] = 0xffffffff;
}
// loop over the list, regenerating it as we go
for (unsigned int cur_neigh_idx = 0; cur_neigh_idx < n_neigh; cur_neigh_idx++)
{
unsigned int cur_neigh = d_nlist[nli(idx, cur_neigh_idx)];
// test if excluded
bool excluded = false;
#pragma unroll
for (unsigned int cur_ex_idx = 0; cur_ex_idx < FILTER_BATCH_SIZE; cur_ex_idx++)
{
if (cur_neigh == l_ex_list[cur_ex_idx])
excluded = true;
}
// add it back to the list if it is not excluded
if (!excluded)
{
if (new_n_neigh != cur_neigh_idx)
d_nlist[nli(idx, new_n_neigh)] = cur_neigh;
new_n_neigh++;
}
}
// update the number of neighbors
d_n_neigh[idx] = new_n_neigh;
}
cudaError_t gpu_nlist_filter(unsigned int *d_n_neigh,
unsigned int *d_nlist,
const Index2D& nli,
const unsigned int *d_n_ex,
const unsigned int *d_ex_list,
const Index2D& exli,
const unsigned int N,
const unsigned int block_size)
{
// determine parameters for kernel launch
int n_blocks = N/block_size + 1;
// split the processing of the full exclusion list up into a number of batches
unsigned int n_batches = (unsigned int)ceil(double(exli.getH())/double(FILTER_BATCH_SIZE));
unsigned int ex_start = 0;
for (unsigned int batch = 0; batch < n_batches; batch++)
{
gpu_nlist_filter_kernel<<<n_blocks, block_size>>>(d_n_neigh,
d_nlist,
nli,
d_n_ex,
d_ex_list,
exli,
N,
ex_start);
ex_start += FILTER_BATCH_SIZE;
}
return cudaSuccess;
}
//! Compile time determined block size for the NSQ neighbor list calculation
const int NLIST_BLOCK_SIZE = 128;
//! Generate the neighbor list on the GPU in O(N^2) time
/*! \param d_nlist Neighbor list to write out
\param d_n_neigh Number of neighbors to write
\param d_last_updated_pos Particle positions will be written here
\param d_conditions Overflow condition flag
\param nli Indexer for indexing into d_nlist
\param d_pos Current particle positions
\param N number of particles
\param box Box dimensions for handling periodic boundary conditions
\param r_maxsq Precalculated value for r_max*r_max
each thread is to compute the neighborlist for a single particle i
each block will load a bunch of particles into shared mem and then each thread will compare it's particle
to each particle in shmem to see if they are a neighbor. Since all threads in the block access the same
shmem element at the same time, the value is broadcast and there are no bank conflicts
*/
__global__
void gpu_compute_nlist_nsq_kernel(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const Index2D nli,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int n_ghost,
const BoxDim box,
const Scalar r_maxsq)
{
// shared data to store all of the particles we compare against
__shared__ Scalar sdata[NLIST_BLOCK_SIZE*4];
// load in the particle
int pidx = blockIdx.x * NLIST_BLOCK_SIZE + threadIdx.x;
// store the max number of neighbors needed for this thread
unsigned int n_neigh_needed = 0;
Scalar4 pos = make_scalar4(0, 0, 0, 0);
if (pidx < N)
pos = d_pos[pidx];
Scalar px = pos.x;
Scalar py = pos.y;
Scalar pz = pos.z;
// track the number of neighbors added so far
int n_neigh = 0;
// each block is going to loop over all N particles (this assumes memory is padded to a multiple of blockDim.x)
// in blocks of blockDim.x
// include ghosts as neighbors
for (int start = 0; start < N + n_ghost; start += NLIST_BLOCK_SIZE)
{
// load data
Scalar4 neigh_pos = make_scalar4(0, 0, 0, 0);
if (start + threadIdx.x < N + n_ghost)
neigh_pos = d_pos[start + threadIdx.x];
// make sure everybody is caught up before we stomp on the memory
__syncthreads();
sdata[threadIdx.x] = neigh_pos.x;
sdata[threadIdx.x + NLIST_BLOCK_SIZE] = neigh_pos.y;
sdata[threadIdx.x + 2*NLIST_BLOCK_SIZE] = neigh_pos.z;
sdata[threadIdx.x + 3*NLIST_BLOCK_SIZE] = neigh_pos.w; //< unused, but try to get compiler to fully coalesce reads
// ensure all data is loaded
__syncthreads();
// now each thread loops over every particle in shmem, but doesn't loop past the end of the particle list (since
// the block might extend that far)
int end_offset= NLIST_BLOCK_SIZE;
end_offset = min(end_offset, N + n_ghost - start);
if (pidx < N)
{
for (int cur_offset = 0; cur_offset < end_offset; cur_offset++)
{
// calculate dr
Scalar3 dx = make_scalar3(px - sdata[cur_offset],
py - sdata[cur_offset + NLIST_BLOCK_SIZE],
pz - sdata[cur_offset + 2*NLIST_BLOCK_SIZE]);
dx = box.minImage(dx);
// we don't add if we are comparing to ourselves, and we don't add if we are above the cut
if ((dot(dx,dx) <= r_maxsq) && ((start + cur_offset) != pidx))
{
unsigned int j = start + cur_offset;
if (n_neigh < nli.getH())
d_nlist[nli(pidx, n_neigh)] = j;
else
n_neigh_needed = n_neigh+1;
n_neigh++;
}
}
}
}
// now that we are done: update the first row that lists the number of neighbors
if (pidx < N)
{
d_n_neigh[pidx] = n_neigh;
d_last_updated_pos[pidx] = d_pos[pidx];
if (n_neigh_needed > 0)
atomicMax(&d_conditions[0], n_neigh_needed);
}
}
//! GPU kernel to update the exclusions list
__global__ void gpu_update_exclusion_list_kernel(const unsigned int *tags,
const unsigned int *rtags,
const unsigned int *n_ex_tag,
const unsigned int *ex_list_tag,
const Index2D ex_list_tag_indexer,
unsigned int *n_ex_idx,
unsigned int *ex_list_idx,
const Index2D ex_list_indexer,
const unsigned int N)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
unsigned int tag = tags[idx];
unsigned int n = n_ex_tag[tag];
// copy over number of exclusions
n_ex_idx[idx] = n;
for (unsigned int offset = 0; offset < n; offset++)
{
unsigned int ex_tag = ex_list_tag[ex_list_tag_indexer(tag, offset)];
unsigned int ex_idx = rtags[ex_tag];
ex_list_idx[ex_list_indexer(idx, offset)] = ex_idx;
}
}
//! GPU function to update the exclusion list on the device
/*! \param d_tag Array of particle tags
\param d_rtag Array of reverse-lookup tag->idx
\param d_n_ex_tag List of number of exclusions per tag
\param d_ex_list_tag 2D Exclusion list per tag
\param ex_list_tag_indexer Indexer for per-tag exclusion list
\param d_n_ex_idx List of number of exclusions per idx
\param d_ex_list_idx Exclusion list per idx
\param ex_list_indexer Indexer for per-idx exclusion list
\param N number of particles
*/
cudaError_t gpu_update_exclusion_list(const unsigned int *d_tag,
const unsigned int *d_rtag,
const unsigned int *d_n_ex_tag,
const unsigned int *d_ex_list_tag,
const Index2D& ex_list_tag_indexer,
unsigned int *d_n_ex_idx,
unsigned int *d_ex_list_idx,
const Index2D& ex_list_indexer,
const unsigned int N)
{
unsigned int block_size = 512;
gpu_update_exclusion_list_kernel<<<N/block_size + 1, block_size>>>(d_tag,
d_rtag,
d_n_ex_tag,
d_ex_list_tag,
ex_list_tag_indexer,
d_n_ex_idx,
d_ex_list_idx,
ex_list_indexer,
N);
return cudaSuccess;
}
//! Generate the neighbor list on the GPU in O(N^2) time
cudaError_t gpu_compute_nlist_nsq(unsigned int *d_nlist,
unsigned int *d_n_neigh,
Scalar4 *d_last_updated_pos,
unsigned int *d_conditions,
const Index2D& nli,
const Scalar4 *d_pos,
const unsigned int N,
const unsigned int n_ghost,
const BoxDim& box,
const Scalar r_maxsq)
{
// setup the grid to run the kernel
int block_size = NLIST_BLOCK_SIZE;
dim3 grid( (N/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
gpu_compute_nlist_nsq_kernel<<< grid, threads >>>(d_nlist,
d_n_neigh,
d_last_updated_pos,
d_conditions,
nli,
d_pos,
N,
n_ghost,
box,
r_maxsq);
return cudaSuccess;
}
|
019695bcf0ed19aff25b76d03519e82dcc603ba0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaclaw5_update_q_cuda(int mbc, double dtdx, double dtdy, double* qold, double* fm, double* fp, double* gm, double* gp)
{
int mq = threadIdx.z;
int x = threadIdx.x;
int x_stride = blockDim.z;
int y = threadIdx.y;
int y_stride = (blockDim.x + 2*mbc)*x_stride;
int i = mq + (x+mbc)*x_stride + (y+mbc)*y_stride;
qold[i] = qold[i] - dtdx * (fm[i+x_stride] - fp[i])
- dtdy * (gm[i+y_stride] - gp[i]);
} | 019695bcf0ed19aff25b76d03519e82dcc603ba0.cu | #include "includes.h"
__global__ void cudaclaw5_update_q_cuda(int mbc, double dtdx, double dtdy, double* qold, double* fm, double* fp, double* gm, double* gp)
{
int mq = threadIdx.z;
int x = threadIdx.x;
int x_stride = blockDim.z;
int y = threadIdx.y;
int y_stride = (blockDim.x + 2*mbc)*x_stride;
int i = mq + (x+mbc)*x_stride + (y+mbc)*y_stride;
qold[i] = qold[i] - dtdx * (fm[i+x_stride] - fp[i])
- dtdy * (gm[i+y_stride] - gp[i]);
} |
3853c56b7a84643acb6ab4207ae0b593a8d892c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define _VOLATILE_
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define load(x) __ldcg(x)
#define store(x, value) __stcs(x, value)
typedef long long ll_t;
typedef unsigned long long ull_t;
typedef unsigned char uint8_t;
typedef struct __builtin_align__(32) {
float s0, s1, s2, s3, s4, s5, s6, s7;
} _float8;
typedef union {
_float8 f8;
float val[8];
} float8;
__device__ void init_cCache(
float8 cCache[8]
) {
#pragma unroll
for (int i=0; i<8; i++){
#pragma unroll
for (int j=0; j<8; j++){
cCache[i].val[j] = 0.f;
}
}
}
__device__ void thread_matmul_v4(
_VOLATILE_ float aSM[8][128+4],
_VOLATILE_ float bSM[8][128+4],
float8 cCache[8],
int vx, int vy
) {
float aCache1[8];
float aCache2[8];
#pragma unroll
for (int mi=0; mi<8; mi++){
aCache1[mi] = aSM[0][8*vy + mi];
}
#pragma unroll
for (int ki=0; ki<8; ki++){
int is_odd = ki & 1;
if (is_odd == 0){
if (likely(ki < 7)){
#pragma unroll
for (int mi=0; mi<8; mi++){
aCache2[mi] = aSM[ki+1][8*vy + mi];
}
}
#pragma unroll
for (int ni=0; ni<8; ni++){
float b = bSM[ki][vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<8; mi++){
float a = aCache1[mi];
cCache[mi].val[ni] = fmaf(a, b, cCache[mi].val[ni]);
}
}
} else {
if (likely(ki < 7)){
#pragma unroll
for (int mi=0; mi<8; mi++){
aCache1[mi] = aSM[ki+1][8*vy + mi];
}
}
#pragma unroll
for (int ni=0; ni<8; ni++){
float b = bSM[ki][vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<8; mi++){
float a = aCache2[mi];
cCache[mi].val[ni] = fmaf(a, b, cCache[mi].val[ni]);
}
}
}
}
}
__device__ void thread_matmul_v3(
_VOLATILE_ float aSM[8][128+4],
_VOLATILE_ float bSM[8][128+4],
float8 cCache[8],
int vx, int vy
) {
float aCache[8];
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int mi=0; mi<8; mi++){
aCache[mi] = aSM[ki][8*vy + mi];
}
#pragma unroll
for (int ni=0; ni<8; ni++){
float b = bSM[ki][vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<8; mi++){
float a = aCache[mi];
cCache[mi].val[ni] = fmaf(a, b, cCache[mi].val[ni]);
}
}
}
}
__device__ void mask_cCache(
float8 cCache[8],
const uint8_t* ElementMask,
int gStartx,
int gStarty,
int vx, int vy, int bid,
int M, int N
) {
#pragma unroll
for (int i=0; i<8; i++){
int iM = gStarty + vy*8 + i;
if (likely(iM < M)){
#pragma unroll
for (int j=0; j<8; j++){
int iN = gStartx + vx*8 + j;
if (likely(iN < N)){
uint8_t element_mask = ElementMask[(__MASK_BID__)*M*N + (iM)*N + (iN)];
cCache[i].val[j] *= element_mask;
}
}
}
}
}
// Unsafe
__device__ void write_c(
float8 cCache[8],
float* C,
int gStartx, int gStarty,
int vx, int vy, int bid,
int M, int N
) {
#pragma unroll
for (int i=0; i<8; i++){
int iM = gStarty + vy*8 + i;
if (likely(iM < M)){
int iN_start = gStartx + vx*8;
reinterpret_cast<float8*>(C + (bid)*M*N + (iM)*N + (iN_start))[0] = cCache[i];
}
}
}
__device__ void write_c_v3(
float8 cCache[8],
float* C,
int gStartx, int gStarty,
int vx, int vy, int bid,
int M, int N
) {
__shared__ volatile float cSM[16][128];
#pragma unroll
for (int mi=0; mi<8; mi++){
int iM = gStarty + vy*8 + mi;
// Store 1 row from cCache to cSM
if (iM < M){
#pragma unroll
for (int ni=0; ni<8; ni++){
cSM[vy][vx*8 + ni] = cCache[mi].val[ni];
}
// Store to C
#pragma unroll
for (int ni=0; ni<8; ni++){
int iN = gStartx + 16*ni + vx;
if (iN < N){
float cVal = cSM[vy][16*ni + vx];
//store(C+(bid)*M*N + (iM)*N + (iN), cVal);
C[(bid)*M*N + (iM)*N + (iN)] = cVal;
}
}
}
}
}
extern "C"
__global__ void mbmm_tn(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
const uint8_t* __restrict__ BlockMask,
const uint8_t* __restrict__ ThreadMask,
const uint8_t* __restrict__ ElementMask,
int M, int N, int K
){
}
extern "C"
__global__ void mbmm_nt(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
const uint8_t* __restrict__ BlockMask,
const uint8_t* __restrict__ ThreadMask,
const uint8_t* __restrict__ ElementMask,
int M, int N, int K
){
}
extern "C"
__global__ void mbmm_nn(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
const uint8_t* __restrict__ BlockMask,
const uint8_t* __restrict__ ThreadMask,
const uint8_t* __restrict__ ElementMask,
int M, int N, int K
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate.
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
int bM = (M + 128 - 1) / 128;
int bN = (N + 128 - 1) / 128;
int tM = (M + 8 - 1) / 8;
int tN = (N + 8 - 1) / 8;
uint8_t block_mask = BlockMask[__MASK_BID__*bM*bN + (bIdxY)*bN + (bIdxX)];
uint8_t thread_mask = ThreadMask[__MASK_BID__*tM*tN + (bIdxY*16 + vy)*tN + (bIdxX*16 + vx) ];
if (block_mask == 0){
return;
}
__shared__ _VOLATILE_ float aSM1[8][128+4];
__shared__ _VOLATILE_ float bSM1[8][128+4];
__shared__ _VOLATILE_ float aSM2[8][128+4];
__shared__ _VOLATILE_ float bSM2[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + dy + i*32;
int iN = gStartx + wx + i*32;
if (likely(iM < M)){
if (likely(dx < K)){
aBuffer1[i] = load(A + (bid)*M*K + (iM)*K + (dx));
} else {
aBuffer1[i] = 0.f;
}
if (likely(dx+8 < K)){
aBuffer2[i] = load(A + (bid)*M*K + (iM)*K + (dx+8));
} else {
aBuffer2[i] = 0.f;
}
}
if (likely(iN < N)){
if (likely(wy < K)){
bBuffer1[i] = load(B + (bid)*N*K + (wy)*N + (iN));
} else {
bBuffer1[i] = 0.f;
}
if (likely(wy+8 < K)){
bBuffer2[i] = load(B + (bid)*N*K + (wy+8)*N + (iN));
} else {
bBuffer2[i] = 0.f;
}
}
}
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
// Index on K axis of A and B
int iKA = gStartk + 16 + dx;
int iKB = gStartk + 16 + wy;
#pragma unroll
for (int i=0; i<4; i++){
// Store buffered tiles into shared memory
aSM1[dx][dy+i*32] = aBuffer1[i];
bSM1[wy][wx+i*32+i] = bBuffer1[i];
aSM2[dx][dy+i*32] = aBuffer2[i];
bSM2[wy][wx+i*32+i] = bBuffer2[i];
// Start loading next 16*128 tile of A and B to buffer1 and buffer2.
// Don't load anything on the last iteration.
// Loading from global memory will not block thread_matmul
if (likely(itr < nIt - 1)){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + wx;
if (likely(iM < M)){
if (likely(iKA < K)){
aBuffer1[i] = load(A + (bid)*M*K + (iM)*K + (iKA));
} else {
aBuffer1[i] = 0.f;
}
if (likely(iKA+8 < K)){
aBuffer2[i] = load(A + (bid)*M*K + (iM)*K + (iKA+8));
} else {
aBuffer2[i] = 0.f;
}
}
if (likely(iN < N)){
if (likely(iKB < K)){
bBuffer1[i] = load(B + (bid)*N*K + (iKB)*N + (iN));
} else {
bBuffer1[i] = 0.f;
}
if (likely(iKB+8 < K)){
bBuffer2[i] = load(B + (bid)*N*K + (iKB+8)*N + (iN));
} else {
bBuffer2[i] = 0.f;
}
}
}
}
// synchroznie threads in order to make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
// Each thread computes 8 x 8 matrix multiplication
// Accumulating intermediate results in cCache
// aSM1, bSM1, aSM2, bSM2 are consumed
if (thread_mask != 0){
thread_matmul_v3(aSM1, bSM1, cCache, vx, vy);
thread_matmul_v3(aSM2, bSM2, cCache, vx, vy);
}
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// At the end of main loop, store cCache to C
if (0 < thread_mask < 64){
mask_cCache(cCache, ElementMask, gStartx, gStarty, vx, vy, bid, M, N);
}
write_c_v3(cCache, C, gStartx, gStarty, vx, vy, bid, M, N);
}
extern "C"
__global__ void mbmm_tt(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
const uint8_t* __restrict__ BlockMask,
const uint8_t* __restrict__ ThreadMask,
const uint8_t* __restrict__ ElementMask,
int M, int N, int K
){
} | 3853c56b7a84643acb6ab4207ae0b593a8d892c9.cu | #define _VOLATILE_
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define load(x) __ldcg(x)
#define store(x, value) __stcs(x, value)
typedef long long ll_t;
typedef unsigned long long ull_t;
typedef unsigned char uint8_t;
typedef struct __builtin_align__(32) {
float s0, s1, s2, s3, s4, s5, s6, s7;
} _float8;
typedef union {
_float8 f8;
float val[8];
} float8;
__device__ void init_cCache(
float8 cCache[8]
) {
#pragma unroll
for (int i=0; i<8; i++){
#pragma unroll
for (int j=0; j<8; j++){
cCache[i].val[j] = 0.f;
}
}
}
__device__ void thread_matmul_v4(
_VOLATILE_ float aSM[8][128+4],
_VOLATILE_ float bSM[8][128+4],
float8 cCache[8],
int vx, int vy
) {
float aCache1[8];
float aCache2[8];
#pragma unroll
for (int mi=0; mi<8; mi++){
aCache1[mi] = aSM[0][8*vy + mi];
}
#pragma unroll
for (int ki=0; ki<8; ki++){
int is_odd = ki & 1;
if (is_odd == 0){
if (likely(ki < 7)){
#pragma unroll
for (int mi=0; mi<8; mi++){
aCache2[mi] = aSM[ki+1][8*vy + mi];
}
}
#pragma unroll
for (int ni=0; ni<8; ni++){
float b = bSM[ki][vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<8; mi++){
float a = aCache1[mi];
cCache[mi].val[ni] = fmaf(a, b, cCache[mi].val[ni]);
}
}
} else {
if (likely(ki < 7)){
#pragma unroll
for (int mi=0; mi<8; mi++){
aCache1[mi] = aSM[ki+1][8*vy + mi];
}
}
#pragma unroll
for (int ni=0; ni<8; ni++){
float b = bSM[ki][vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<8; mi++){
float a = aCache2[mi];
cCache[mi].val[ni] = fmaf(a, b, cCache[mi].val[ni]);
}
}
}
}
}
__device__ void thread_matmul_v3(
_VOLATILE_ float aSM[8][128+4],
_VOLATILE_ float bSM[8][128+4],
float8 cCache[8],
int vx, int vy
) {
float aCache[8];
#pragma unroll
for (int ki=0; ki<8; ki++){
#pragma unroll
for (int mi=0; mi<8; mi++){
aCache[mi] = aSM[ki][8*vy + mi];
}
#pragma unroll
for (int ni=0; ni<8; ni++){
float b = bSM[ki][vx/4 + 8*vx + ni];
#pragma unroll
for (int mi=0; mi<8; mi++){
float a = aCache[mi];
cCache[mi].val[ni] = fmaf(a, b, cCache[mi].val[ni]);
}
}
}
}
__device__ void mask_cCache(
float8 cCache[8],
const uint8_t* ElementMask,
int gStartx,
int gStarty,
int vx, int vy, int bid,
int M, int N
) {
#pragma unroll
for (int i=0; i<8; i++){
int iM = gStarty + vy*8 + i;
if (likely(iM < M)){
#pragma unroll
for (int j=0; j<8; j++){
int iN = gStartx + vx*8 + j;
if (likely(iN < N)){
uint8_t element_mask = ElementMask[(__MASK_BID__)*M*N + (iM)*N + (iN)];
cCache[i].val[j] *= element_mask;
}
}
}
}
}
// Unsafe
__device__ void write_c(
float8 cCache[8],
float* C,
int gStartx, int gStarty,
int vx, int vy, int bid,
int M, int N
) {
#pragma unroll
for (int i=0; i<8; i++){
int iM = gStarty + vy*8 + i;
if (likely(iM < M)){
int iN_start = gStartx + vx*8;
reinterpret_cast<float8*>(C + (bid)*M*N + (iM)*N + (iN_start))[0] = cCache[i];
}
}
}
__device__ void write_c_v3(
float8 cCache[8],
float* C,
int gStartx, int gStarty,
int vx, int vy, int bid,
int M, int N
) {
__shared__ volatile float cSM[16][128];
#pragma unroll
for (int mi=0; mi<8; mi++){
int iM = gStarty + vy*8 + mi;
// Store 1 row from cCache to cSM
if (iM < M){
#pragma unroll
for (int ni=0; ni<8; ni++){
cSM[vy][vx*8 + ni] = cCache[mi].val[ni];
}
// Store to C
#pragma unroll
for (int ni=0; ni<8; ni++){
int iN = gStartx + 16*ni + vx;
if (iN < N){
float cVal = cSM[vy][16*ni + vx];
//store(C+(bid)*M*N + (iM)*N + (iN), cVal);
C[(bid)*M*N + (iM)*N + (iN)] = cVal;
}
}
}
}
}
extern "C"
__global__ void mbmm_tn(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
const uint8_t* __restrict__ BlockMask,
const uint8_t* __restrict__ ThreadMask,
const uint8_t* __restrict__ ElementMask,
int M, int N, int K
){
}
extern "C"
__global__ void mbmm_nt(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
const uint8_t* __restrict__ BlockMask,
const uint8_t* __restrict__ ThreadMask,
const uint8_t* __restrict__ ElementMask,
int M, int N, int K
){
}
extern "C"
__global__ void mbmm_nn(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
const uint8_t* __restrict__ BlockMask,
const uint8_t* __restrict__ ThreadMask,
const uint8_t* __restrict__ ElementMask,
int M, int N, int K
){
int tid = threadIdx.x; // thread idx
int bid = blockIdx.z; // batch idx
// Neighboring blocks are grouped into PN x PM block groups in order to increase
// L1 cache hit rate.
// There are ceil(M/PM) x ceil(N/PN) block groups in total.
// Blocks within block groups are indexed with blockIdx.x % PN and blockIdx.x / PN
int px = blockIdx.x % _PN_;
int py = blockIdx.x / _PN_;
int bDimX = (N + (128*_PN_) - 1) / (128*_PN_);
int bDimY = (M + (128*_PM_) - 1) / (128*_PM_);
int bIdxX = (blockIdx.y % bDimX) * _PN_ + px;
int bIdxY = (blockIdx.y / bDimX) * _PM_ + py;
int gStartx = bIdxX * 128; // starting index of block on N axis
int gStarty = bIdxY * 128; // starting index of block on M axis
if (gStartx > N || gStarty > M){
return;
}
// These are used to re-arrange threads into different shapes
// for example: (256) -> (16, 16) -> (8, 32) -> (32, 8)
int vx = tid % 16;
int vy = tid / 16;
int wx = tid % 32; // thread idx in warp
int wy = tid / 32; // warp id
int dx = tid % 8;
int dy = tid / 8;
int bM = (M + 128 - 1) / 128;
int bN = (N + 128 - 1) / 128;
int tM = (M + 8 - 1) / 8;
int tN = (N + 8 - 1) / 8;
uint8_t block_mask = BlockMask[__MASK_BID__*bM*bN + (bIdxY)*bN + (bIdxX)];
uint8_t thread_mask = ThreadMask[__MASK_BID__*tM*tN + (bIdxY*16 + vy)*tN + (bIdxX*16 + vx) ];
if (block_mask == 0){
return;
}
__shared__ _VOLATILE_ float aSM1[8][128+4];
__shared__ _VOLATILE_ float bSM1[8][128+4];
__shared__ _VOLATILE_ float aSM2[8][128+4];
__shared__ _VOLATILE_ float bSM2[8][128+4];
float aBuffer1[4];
float bBuffer1[4];
float aBuffer2[4];
float bBuffer2[4];
float8 cCache[8];
init_cCache(cCache);
// Load initial 16 x 128 tile of A and B to buffer1 and buffer2
#pragma unroll
for (int i=0; i<4; i++){
int iM = gStarty + dy + i*32;
int iN = gStartx + wx + i*32;
if (likely(iM < M)){
if (likely(dx < K)){
aBuffer1[i] = load(A + (bid)*M*K + (iM)*K + (dx));
} else {
aBuffer1[i] = 0.f;
}
if (likely(dx+8 < K)){
aBuffer2[i] = load(A + (bid)*M*K + (iM)*K + (dx+8));
} else {
aBuffer2[i] = 0.f;
}
}
if (likely(iN < N)){
if (likely(wy < K)){
bBuffer1[i] = load(B + (bid)*N*K + (wy)*N + (iN));
} else {
bBuffer1[i] = 0.f;
}
if (likely(wy+8 < K)){
bBuffer2[i] = load(B + (bid)*N*K + (wy+8)*N + (iN));
} else {
bBuffer2[i] = 0.f;
}
}
}
// Number of main loop iterations is ceil(k/16)
int nIt = (K + 16 - 1) / 16;
#pragma unroll
for (int itr=0; itr<nIt; itr++){
int gStartk = itr * 16;
// Index on K axis of A and B
int iKA = gStartk + 16 + dx;
int iKB = gStartk + 16 + wy;
#pragma unroll
for (int i=0; i<4; i++){
// Store buffered tiles into shared memory
aSM1[dx][dy+i*32] = aBuffer1[i];
bSM1[wy][wx+i*32+i] = bBuffer1[i];
aSM2[dx][dy+i*32] = aBuffer2[i];
bSM2[wy][wx+i*32+i] = bBuffer2[i];
// Start loading next 16*128 tile of A and B to buffer1 and buffer2.
// Don't load anything on the last iteration.
// Loading from global memory will not block thread_matmul
if (likely(itr < nIt - 1)){
int iM = gStarty + i*32 + dy;
int iN = gStartx + i*32 + wx;
if (likely(iM < M)){
if (likely(iKA < K)){
aBuffer1[i] = load(A + (bid)*M*K + (iM)*K + (iKA));
} else {
aBuffer1[i] = 0.f;
}
if (likely(iKA+8 < K)){
aBuffer2[i] = load(A + (bid)*M*K + (iM)*K + (iKA+8));
} else {
aBuffer2[i] = 0.f;
}
}
if (likely(iN < N)){
if (likely(iKB < K)){
bBuffer1[i] = load(B + (bid)*N*K + (iKB)*N + (iN));
} else {
bBuffer1[i] = 0.f;
}
if (likely(iKB+8 < K)){
bBuffer2[i] = load(B + (bid)*N*K + (iKB+8)*N + (iN));
} else {
bBuffer2[i] = 0.f;
}
}
}
}
// synchroznie threads in order to make sure tiles of A and B are fully
// loaded to shared memory.
__syncthreads();
// Each thread computes 8 x 8 matrix multiplication
// Accumulating intermediate results in cCache
// aSM1, bSM1, aSM2, bSM2 are consumed
if (thread_mask != 0){
thread_matmul_v3(aSM1, bSM1, cCache, vx, vy);
thread_matmul_v3(aSM2, bSM2, cCache, vx, vy);
}
// synchronize threads to signal that shared memory is consumed.
__syncthreads();
}
// At the end of main loop, store cCache to C
if (0 < thread_mask < 64){
mask_cCache(cCache, ElementMask, gStartx, gStarty, vx, vy, bid, M, N);
}
write_c_v3(cCache, C, gStartx, gStarty, vx, vy, bid, M, N);
}
extern "C"
__global__ void mbmm_tt(
const float* __restrict__ A,
const float* __restrict__ B,
float* __restrict__ C,
const uint8_t* __restrict__ BlockMask,
const uint8_t* __restrict__ ThreadMask,
const uint8_t* __restrict__ ElementMask,
int M, int N, int K
){
} |
1d3578a61a14d54b48edc93c2eb786ec8d33dbee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <algorithm>
#include <cmath>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/caption_input_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void GetGT(const int nthreads, const int N_, const int T_, const int H_,
int t, const Dtype* gt, const Dtype* Ws, Dtype* X_data)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
const int n = index / H_;
const int h = index % H_;
const Dtype *lb = gt + n*(T_-1) ;
const int word_id= int(lb[t-1]);
const Dtype *Ws_id = Ws + word_id*H_ ;
Dtype *X_t = X_data + t*N_*H_ + n*H_;
X_t[h] = Ws_id[h];
}
}
template <typename Dtype>
__global__ void UpdateWs(const int nthreads, const int N_, const int T_, const int H_,
int t, const Dtype* gt, const Dtype* dX, Dtype* dWs)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
const int n = index / H_;
const int h = index % H_;
const Dtype *lb = gt + n*(T_-1) ;
const int word_id= int(lb[t-1]);
Dtype *dWs_id = dWs + word_id*H_ ;
const Dtype *dX_t = dX + t*N_*H_ + n*H_;
dWs_id[h] += dX_t[h];
}
}
template <typename Dtype>
void CaptionInputLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
CHECK_EQ(top[0]->gpu_data(), X_.gpu_data());
const Dtype* bottom_img_data = bottom[0]->gpu_data();
const Dtype* bottom_sent_data = bottom[1]->gpu_data();
const Dtype* We = this->blobs_[0]->gpu_data();
const Dtype* be = this->blobs_[1]->gpu_data();
const Dtype* Ws = this->blobs_[2]->gpu_data();
Dtype* X_data = X_.mutable_gpu_data();
// Compute X[0], two steps: X[0] = We * img_vec, X[0] += outerprod(1, be)
caffe_gpu_gemm(CblasNoTrans, CblasTrans, 1*N_, H_, P_, Dtype(1.), bottom_img_data, We, Dtype(0.), X_data);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 1*N_, H_, 1, Dtype(1.), be_multiplier_.gpu_data(), be, Dtype(1.), X_data);
for (int t = 1; t < T_; t++)
{
hipLaunchKernelGGL(( GetGT<Dtype>), dim3(CAFFE_GET_BLOCKS(N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N_*H_, N_, T_, H_, t, bottom[1]->gpu_data(), Ws, X_data);
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
void CaptionInputLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom)
{
//LOG(INFO) << "=========== Backward Caption Input GPU ============"<<std::endl;
const Dtype* bottom_img_data = bottom[0]->gpu_data();
const Dtype* bottom_sent_data = bottom[1]->gpu_data();
const Dtype* dX = X_.gpu_diff();
Dtype* dWe = this->blobs_[0]->mutable_gpu_diff();
Dtype* dbe = this->blobs_[1]->mutable_gpu_diff();
Dtype* dWs = this->blobs_[2]->mutable_gpu_diff();
if (this->param_propagate_down_[0])
{
caffe_gpu_gemm(CblasTrans, CblasNoTrans, H_, P_, N_, Dtype(1.),
dX, bottom_img_data, Dtype(1.), dWe);
}
if (this->param_propagate_down_[1])
{
caffe_gpu_gemv(CblasTrans, N_, H_, Dtype(1.),
dX, be_multiplier_.gpu_data(), Dtype(1.), dbe);
}
if (this->param_propagate_down_[2])
{
for(int t=1; t < T_; t++)
{
hipLaunchKernelGGL(( UpdateWs<Dtype>), dim3(CAFFE_GET_BLOCKS(N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N_*H_, N_, T_, H_, t, bottom[1]->gpu_data(), dX, dWs);
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CaptionInputLayer);
} // namespace caffe
| 1d3578a61a14d54b48edc93c2eb786ec8d33dbee.cu | #include <vector>
#include <algorithm>
#include <cmath>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/caption_input_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void GetGT(const int nthreads, const int N_, const int T_, const int H_,
int t, const Dtype* gt, const Dtype* Ws, Dtype* X_data)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
const int n = index / H_;
const int h = index % H_;
const Dtype *lb = gt + n*(T_-1) ;
const int word_id= int(lb[t-1]);
const Dtype *Ws_id = Ws + word_id*H_ ;
Dtype *X_t = X_data + t*N_*H_ + n*H_;
X_t[h] = Ws_id[h];
}
}
template <typename Dtype>
__global__ void UpdateWs(const int nthreads, const int N_, const int T_, const int H_,
int t, const Dtype* gt, const Dtype* dX, Dtype* dWs)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
const int n = index / H_;
const int h = index % H_;
const Dtype *lb = gt + n*(T_-1) ;
const int word_id= int(lb[t-1]);
Dtype *dWs_id = dWs + word_id*H_ ;
const Dtype *dX_t = dX + t*N_*H_ + n*H_;
dWs_id[h] += dX_t[h];
}
}
template <typename Dtype>
void CaptionInputLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
CHECK_EQ(top[0]->gpu_data(), X_.gpu_data());
const Dtype* bottom_img_data = bottom[0]->gpu_data();
const Dtype* bottom_sent_data = bottom[1]->gpu_data();
const Dtype* We = this->blobs_[0]->gpu_data();
const Dtype* be = this->blobs_[1]->gpu_data();
const Dtype* Ws = this->blobs_[2]->gpu_data();
Dtype* X_data = X_.mutable_gpu_data();
// Compute X[0], two steps: X[0] = We * img_vec, X[0] += outerprod(1, be)
caffe_gpu_gemm(CblasNoTrans, CblasTrans, 1*N_, H_, P_, Dtype(1.), bottom_img_data, We, Dtype(0.), X_data);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, 1*N_, H_, 1, Dtype(1.), be_multiplier_.gpu_data(), be, Dtype(1.), X_data);
for (int t = 1; t < T_; t++)
{
GetGT<Dtype><<<CAFFE_GET_BLOCKS(N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
N_*H_, N_, T_, H_, t, bottom[1]->gpu_data(), Ws, X_data);
CUDA_POST_KERNEL_CHECK;
}
}
template <typename Dtype>
void CaptionInputLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom)
{
//LOG(INFO) << "=========== Backward Caption Input GPU ============"<<std::endl;
const Dtype* bottom_img_data = bottom[0]->gpu_data();
const Dtype* bottom_sent_data = bottom[1]->gpu_data();
const Dtype* dX = X_.gpu_diff();
Dtype* dWe = this->blobs_[0]->mutable_gpu_diff();
Dtype* dbe = this->blobs_[1]->mutable_gpu_diff();
Dtype* dWs = this->blobs_[2]->mutable_gpu_diff();
if (this->param_propagate_down_[0])
{
caffe_gpu_gemm(CblasTrans, CblasNoTrans, H_, P_, N_, Dtype(1.),
dX, bottom_img_data, Dtype(1.), dWe);
}
if (this->param_propagate_down_[1])
{
caffe_gpu_gemv(CblasTrans, N_, H_, Dtype(1.),
dX, be_multiplier_.gpu_data(), Dtype(1.), dbe);
}
if (this->param_propagate_down_[2])
{
for(int t=1; t < T_; t++)
{
UpdateWs<Dtype><<<CAFFE_GET_BLOCKS(N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
N_*H_, N_, T_, H_, t, bottom[1]->gpu_data(), dX, dWs);
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(CaptionInputLayer);
} // namespace caffe
|
90f75c13450658d225bfdaf2cdce3f98b47604cf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/Exceptions.h>
#include <THH/THHTensorMathReduce.cuh>
#include <math.h>
#include <ATen/native/Distance.h>
namespace at { namespace native {
namespace {
static const int forward_threads = 256;
template <typename scalar_t>
static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);
template <>
__forceinline__ __device__ float device_sqrt(float val) {
return ::sqrtf(val);
}
template <>
__forceinline__ __device__ double device_sqrt(double val) {
return ::sqrt(val);
}
template <typename scalar_t>
struct dists {
static __forceinline__ __device__ scalar_t sign(scalar_t val) {
return (0 < val) - (val < 0);
}
// Zero norm
struct zero {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff != 0.0; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
};
// One norm
struct one {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff); }
};
// Special case backward when p is less than two
struct lt_two {
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : sign(diff) * ::pow(std::abs(diff), p - 1) * grad / ::pow(dist, p - 1); }
};
// Two norm
struct two {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff * diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return device_sqrt<scalar_t>(agg); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : grad * diff / dist; }
};
// General p norm
struct p {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += ::pow(diff, p); }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return ::pow(agg, static_cast<scalar_t>(1) / p); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : diff * ::pow(std::abs(diff), p - 2) * grad / ::pow(dist, p - 1); }
};
// Inf norm
struct inf {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { if (diff > agg) { agg = diff; } }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { if (other > update) { update = other; } }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff) * (std::abs(diff) == dist); }
};
};
template <typename scalar_t, typename F>
__device__ static inline scalar_t reduce_agg(scalar_t agg) {
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
__shared__ scalar_t shared[forward_threads];
int lane = threadIdx.x % warpSize;
int warp_id = threadIdx.x / warpSize;
if (lane == 0) {
shared[warp_id] = agg;
}
__syncthreads();
agg = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0.0;
if (warp_id == 0) {
for (int offset = blockDim.x / warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
}
return agg;
}
template <typename scalar_t, typename F>
__global__ static void pdist_kernel_cuda_impl(scalar_t * result, const scalar_t * self, const int64_t n, const int64_t m, const scalar_t p,
const double n2, const double n2_squared_minus_1) {
const int k = blockIdx.x;
const int stride = blockDim.x;
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * a = start + threadIdx.x;
const scalar_t * b = self + j * m + threadIdx.x;
scalar_t agg = 0.0;
for (; a < end; a += stride, b += stride) {
F::inc(agg, std::abs(*a - *b), p);
}
agg = reduce_agg<scalar_t, F>(agg);
if (threadIdx.x == 0) {
result[k] = F::finish(agg, p);
}
}
template <typename scalar_t, typename F>
__global__ static void cdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * x1, const scalar_t * x2, const scalar_t * dist, int64_t gs,
const scalar_t p, const int64_t r1, const int64_t r2, const int64_t m, const int64_t count) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int init = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (k >= count) {
return;
}
int64_t i = k / r2;
int64_t j = k % r2;
const scalar_t grad_k = grad[k * gs];
const scalar_t dist_k = dist[k];
const scalar_t * const start = x1 + i * m;
const scalar_t * const end = start + m;
const scalar_t * self_i = start + init;
const scalar_t * self_j = x2 + j * m + init;
scalar_t * buff_i = buffer + (r1 * j + i) * m + init;
for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride) {
const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p);
*buff_i = res;
}
}
template <typename scalar_t, typename F>
__global__ static void pdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * self, const scalar_t * dist, int64_t gs, const int64_t n, const int64_t m, const int64_t combs, const scalar_t p,
const double n2, const double n2_squared_minus_1) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int init = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (k >= combs) {
return;
}
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
int64_t ib = j - i - 1;
int64_t jb = n - 2 - i;
const scalar_t grad_k = grad[k * gs];
const scalar_t dist_k = dist[k];
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * self_i = start + init;
const scalar_t * self_j = self + j * m + init;
scalar_t * buff_i = buffer + (ib * n + i) * m + init;
scalar_t * buff_j = buffer + (jb * n + j) * m + init;
for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride, buff_j += stride) {
const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p);
*buff_i = res;
*buff_j = -res;
}
}
template <typename scalar_t, typename F>
__global__ static void cdist_kernel_cuda_impl(scalar_t * result, const scalar_t * x1, const scalar_t * x2, const scalar_t p, const int64_t r1, const int64_t r2, const int64_t m) {
const int k = blockIdx.x;
const int64_t i = k / r2;
const int64_t j = k % r2;
const int stride = blockDim.x;
const scalar_t * const start = x1 + i * m;
const scalar_t * const end = start + m;
const scalar_t * a = start + threadIdx.x;
const scalar_t * b = x2 + j * m + threadIdx.x;
scalar_t agg = 0.0;
for (; a < end; a += stride, b += stride) {
F::inc(agg, std::abs(*a - *b), p);
}
agg = reduce_agg<scalar_t, F>(agg);
if (threadIdx.x == 0) {
result[k] = F::finish(agg, p);
}
}
void cdist_kernel_impl(Tensor& result, const Tensor& x1, const Tensor& x2, double p) {
int64_t r1 = x1.size(-2);
int64_t r2 = x2.size(-2);
int64_t m = x1.size(-1);
const dim3 grid(r1*r2);
const dim3 block(forward_threads);
AT_DISPATCH_FLOATING_TYPES(x1.scalar_type(), "cdist_cuda", [&] {
if (p == 0.0) {
hipLaunchKernelGGL(( cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else if (p == 1.0) {
hipLaunchKernelGGL(( cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else if (p == 2.0) {
hipLaunchKernelGGL(( cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else if (std::isinf(p)) {
hipLaunchKernelGGL(( cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else {
hipLaunchKernelGGL(( cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
}
});
AT_CUDA_CHECK(hipGetLastError());
}
void pdist_forward_kernel_impl(Tensor& result, const Tensor& self, double p) {
const dim3 grid(result.numel());
const dim3 block(forward_threads);
int64_t n = self.size(0);
int64_t m = self.size(1);
// https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do
// some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device.
const double n2 = n - .5;
const double n2_squared_minus_1 = n2 * n2 - 1;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "pdist_cuda", [&] {
if (p == 0.0) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (p == 1.0) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (p == 2.0) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (std::isinf(p)) {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else {
hipLaunchKernelGGL(( pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
}
});
AT_CUDA_CHECK(hipGetLastError());
}
void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) {
if (p == 0.0 || grad.numel() == 0 || self.numel() == 0) {
result.fill_(0);
return;
}
const int64_t n = result.size(0);
int64_t m = self.size(1);
const int block_x = 64;
// NB: be careful with changing block_y; as it's currently written, grid_y is limited to be 2^16.
// From binary search, block_y of 16 gives us max pdist dim0 of 1449,
// block_y of 4 gives us max pdist dim0 of 725.
const int block_y = 16;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int grid_y = (dist.numel() + block_y - 1) / block_y;
const dim3 grid(grid_x, grid_y);
const dim3 block(block_x, block_y);
// https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do
// some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device.
const double n2 = n - .5;
const double n2_squared_minus_1 = n2 * n2 - 1;
Tensor buffer = at::empty({n - 1, result.size(0), result.size(1)}, result.options());
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "pdist_cuda_backward", [&] {
if (p == 1.0) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (p < 2.0) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (p == 2.0) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (std::isinf(p)) {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else {
hipLaunchKernelGGL(( pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
}
});
AT_CUDA_CHECK(hipGetLastError());
at::sum_out(result, buffer, 0);
}
void cdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& x1, const Tensor& x2, const double p, const Tensor& dist) {
if (p == 0.0 || grad.numel() == 0 || x1.numel() == 0 || x2.numel() == 0) {
result.fill_(0);
return;
}
const int64_t r1 = x1.size(-2);
const int64_t r2 = x2.size(-2);
const int64_t m = x1.size(-1);
const int block_x = 64;
const int block_y = 16;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int grid_y = (dist.numel() + block_y - 1) / block_y;
const dim3 grid(grid_x, grid_y);
const dim3 block(block_x, block_y);
const int64_t count = dist.numel();
Tensor buffer = at::empty({r2, r1, m}, result.options());
AT_DISPATCH_FLOATING_TYPES(result.type(), "cdist_cuda_backward", [&] {
if (p == 1.0) {
hipLaunchKernelGGL(( cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else if (p < 2.0) {
hipLaunchKernelGGL(( cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else if (p == 2.0) {
hipLaunchKernelGGL(( cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else if (std::isinf(p)) {
hipLaunchKernelGGL(( cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else {
hipLaunchKernelGGL(( cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p>), dim3(grid), dim3(block), 0, 0, buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
}
});
AT_CUDA_CHECK(hipGetLastError());
at::sum_out(result, buffer, 0);
}
} // anonymous namespace
REGISTER_DISPATCH(pdist_forward_stub, &pdist_forward_kernel_impl);
REGISTER_DISPATCH(pdist_backward_stub, &pdist_backward_kernel_impl);
REGISTER_DISPATCH(cdist_stub, &cdist_kernel_impl);
REGISTER_DISPATCH(cdist_backward_stub, &cdist_backward_kernel_impl);
}} // at::native
| 90f75c13450658d225bfdaf2cdce3f98b47604cf.cu | #include <ATen/ATen.h>
#include <ATen/cuda/Exceptions.h>
#include <THC/THCTensorMathReduce.cuh>
#include <math.h>
#include <ATen/native/Distance.h>
namespace at { namespace native {
namespace {
static const int forward_threads = 256;
template <typename scalar_t>
static __forceinline__ __device__ scalar_t device_sqrt(scalar_t val);
template <>
__forceinline__ __device__ float device_sqrt(float val) {
return ::sqrtf(val);
}
template <>
__forceinline__ __device__ double device_sqrt(double val) {
return ::sqrt(val);
}
template <typename scalar_t>
struct dists {
static __forceinline__ __device__ scalar_t sign(scalar_t val) {
return (0 < val) - (val < 0);
}
// Zero norm
struct zero {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff != 0.0; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
};
// One norm
struct one {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff); }
};
// Special case backward when p is less than two
struct lt_two {
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : sign(diff) * std::pow(std::abs(diff), p - 1) * grad / std::pow(dist, p - 1); }
};
// Two norm
struct two {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += diff * diff; }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return device_sqrt<scalar_t>(agg); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : grad * diff / dist; }
};
// General p norm
struct p {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { agg += std::pow(diff, p); }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return std::pow(agg, static_cast<scalar_t>(1) / p); }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { update += other; }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return dist == 0.0 ? 0 : diff * std::pow(std::abs(diff), p - 2) * grad / std::pow(dist, p - 1); }
};
// Inf norm
struct inf {
static __forceinline__ __device__ void inc(scalar_t& agg, const scalar_t diff, const scalar_t p) { if (diff > agg) { agg = diff; } }
static __forceinline__ __device__ scalar_t finish(const scalar_t agg, const scalar_t p) { return agg; }
static __forceinline__ __device__ void agg(scalar_t& update, const scalar_t other) { if (other > update) { update = other; } }
static __forceinline__ __device__ scalar_t backward(const scalar_t diff, const scalar_t grad, const scalar_t dist, const scalar_t p) { return grad * sign(diff) * (std::abs(diff) == dist); }
};
};
template <typename scalar_t, typename F>
__device__ static inline scalar_t reduce_agg(scalar_t agg) {
for (int offset = warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
__shared__ scalar_t shared[forward_threads];
int lane = threadIdx.x % warpSize;
int warp_id = threadIdx.x / warpSize;
if (lane == 0) {
shared[warp_id] = agg;
}
__syncthreads();
agg = (threadIdx.x < blockDim.x / warpSize) ? shared[lane] : 0.0;
if (warp_id == 0) {
for (int offset = blockDim.x / warpSize / 2; offset > 0; offset /= 2) {
F::agg(agg, WARP_SHFL_DOWN(agg, offset));
}
}
return agg;
}
template <typename scalar_t, typename F>
__global__ static void pdist_kernel_cuda_impl(scalar_t * result, const scalar_t * self, const int64_t n, const int64_t m, const scalar_t p,
const double n2, const double n2_squared_minus_1) {
const int k = blockIdx.x;
const int stride = blockDim.x;
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * a = start + threadIdx.x;
const scalar_t * b = self + j * m + threadIdx.x;
scalar_t agg = 0.0;
for (; a < end; a += stride, b += stride) {
F::inc(agg, std::abs(*a - *b), p);
}
agg = reduce_agg<scalar_t, F>(agg);
if (threadIdx.x == 0) {
result[k] = F::finish(agg, p);
}
}
template <typename scalar_t, typename F>
__global__ static void cdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * x1, const scalar_t * x2, const scalar_t * dist, int64_t gs,
const scalar_t p, const int64_t r1, const int64_t r2, const int64_t m, const int64_t count) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int init = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (k >= count) {
return;
}
int64_t i = k / r2;
int64_t j = k % r2;
const scalar_t grad_k = grad[k * gs];
const scalar_t dist_k = dist[k];
const scalar_t * const start = x1 + i * m;
const scalar_t * const end = start + m;
const scalar_t * self_i = start + init;
const scalar_t * self_j = x2 + j * m + init;
scalar_t * buff_i = buffer + (r1 * j + i) * m + init;
for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride) {
const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p);
*buff_i = res;
}
}
template <typename scalar_t, typename F>
__global__ static void pdist_backward_kernel_cuda_impl(scalar_t * buffer, const scalar_t * grad, const scalar_t * self, const scalar_t * dist, int64_t gs, const int64_t n, const int64_t m, const int64_t combs, const scalar_t p,
const double n2, const double n2_squared_minus_1) {
const int k = blockIdx.y * blockDim.y + threadIdx.y;
const int init = blockIdx.x * blockDim.x + threadIdx.x;
const int stride = blockDim.x * gridDim.x;
if (k >= combs) {
return;
}
// The -1 accounts for floating point truncation issues
int64_t i = static_cast<int64_t>((n2 - device_sqrt<double>(n2_squared_minus_1 - 2 * k)));
int64_t j = k - n * i + i * (i + 1) / 2 + i + 1;
int64_t ib = j - i - 1;
int64_t jb = n - 2 - i;
const scalar_t grad_k = grad[k * gs];
const scalar_t dist_k = dist[k];
const scalar_t * const start = self + i * m;
const scalar_t * const end = start + m;
const scalar_t * self_i = start + init;
const scalar_t * self_j = self + j * m + init;
scalar_t * buff_i = buffer + (ib * n + i) * m + init;
scalar_t * buff_j = buffer + (jb * n + j) * m + init;
for (; self_i < end; self_i += stride, self_j += stride, buff_i += stride, buff_j += stride) {
const scalar_t res = F::backward(*self_i - *self_j, grad_k, dist_k, p);
*buff_i = res;
*buff_j = -res;
}
}
template <typename scalar_t, typename F>
__global__ static void cdist_kernel_cuda_impl(scalar_t * result, const scalar_t * x1, const scalar_t * x2, const scalar_t p, const int64_t r1, const int64_t r2, const int64_t m) {
const int k = blockIdx.x;
const int64_t i = k / r2;
const int64_t j = k % r2;
const int stride = blockDim.x;
const scalar_t * const start = x1 + i * m;
const scalar_t * const end = start + m;
const scalar_t * a = start + threadIdx.x;
const scalar_t * b = x2 + j * m + threadIdx.x;
scalar_t agg = 0.0;
for (; a < end; a += stride, b += stride) {
F::inc(agg, std::abs(*a - *b), p);
}
agg = reduce_agg<scalar_t, F>(agg);
if (threadIdx.x == 0) {
result[k] = F::finish(agg, p);
}
}
void cdist_kernel_impl(Tensor& result, const Tensor& x1, const Tensor& x2, double p) {
int64_t r1 = x1.size(-2);
int64_t r2 = x2.size(-2);
int64_t m = x1.size(-1);
const dim3 grid(r1*r2);
const dim3 block(forward_threads);
AT_DISPATCH_FLOATING_TYPES(x1.scalar_type(), "cdist_cuda", [&] {
if (p == 0.0) {
cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero><<<grid, block>>>(result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else if (p == 1.0) {
cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else if (p == 2.0) {
cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else if (std::isinf(p)) {
cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
} else {
cdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(result.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), p, r1, r2, m);
}
});
AT_CUDA_CHECK(cudaGetLastError());
}
void pdist_forward_kernel_impl(Tensor& result, const Tensor& self, double p) {
const dim3 grid(result.numel());
const dim3 block(forward_threads);
int64_t n = self.size(0);
int64_t m = self.size(1);
// https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do
// some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device.
const double n2 = n - .5;
const double n2_squared_minus_1 = n2 * n2 - 1;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "pdist_cuda", [&] {
if (p == 0.0) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::zero><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (p == 1.0) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (p == 2.0) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else if (std::isinf(p)) {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
} else {
pdist_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(result.data<scalar_t>(), self.data<scalar_t>(), n, m, p, n2, n2_squared_minus_1);
}
});
AT_CUDA_CHECK(cudaGetLastError());
}
void pdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& self, const double p, const Tensor& dist) {
if (p == 0.0 || grad.numel() == 0 || self.numel() == 0) {
result.fill_(0);
return;
}
const int64_t n = result.size(0);
int64_t m = self.size(1);
const int block_x = 64;
// NB: be careful with changing block_y; as it's currently written, grid_y is limited to be 2^16.
// From binary search, block_y of 16 gives us max pdist dim0 of 1449,
// block_y of 4 gives us max pdist dim0 of 725.
const int block_y = 16;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int grid_y = (dist.numel() + block_y - 1) / block_y;
const dim3 grid(grid_x, grid_y);
const dim3 block(block_x, block_y);
// https://github.com/pytorch/pytorch/issues/15511 demonstrated we need to do
// some math in fp64 -- this is just minimizing the amount of fp64 math we do on the device.
const double n2 = n - .5;
const double n2_squared_minus_1 = n2 * n2 - 1;
Tensor buffer = at::empty({n - 1, result.size(0), result.size(1)}, result.options());
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "pdist_cuda_backward", [&] {
if (p == 1.0) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (p < 2.0) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (p == 2.0) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else if (std::isinf(p)) {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
} else {
pdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), self.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(0), n, m, dist.numel(), p, n2, n2_squared_minus_1);
}
});
AT_CUDA_CHECK(cudaGetLastError());
at::sum_out(result, buffer, 0);
}
void cdist_backward_kernel_impl(Tensor& result, const Tensor& grad, const Tensor& x1, const Tensor& x2, const double p, const Tensor& dist) {
if (p == 0.0 || grad.numel() == 0 || x1.numel() == 0 || x2.numel() == 0) {
result.fill_(0);
return;
}
const int64_t r1 = x1.size(-2);
const int64_t r2 = x2.size(-2);
const int64_t m = x1.size(-1);
const int block_x = 64;
const int block_y = 16;
const int grid_x = (m + block_x * 8 - 1) / (block_x * 8);
const int grid_y = (dist.numel() + block_y - 1) / block_y;
const dim3 grid(grid_x, grid_y);
const dim3 block(block_x, block_y);
const int64_t count = dist.numel();
Tensor buffer = at::empty({r2, r1, m}, result.options());
AT_DISPATCH_FLOATING_TYPES(result.type(), "cdist_cuda_backward", [&] {
if (p == 1.0) {
cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::one><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else if (p < 2.0) {
cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::lt_two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else if (p == 2.0) {
cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::two><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else if (std::isinf(p)) {
cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::inf><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
} else {
cdist_backward_kernel_cuda_impl<scalar_t, dists<scalar_t>::p><<<grid, block>>>(buffer.data<scalar_t>(), grad.data<scalar_t>(), x1.data<scalar_t>(), x2.data<scalar_t>(), dist.data<scalar_t>(), grad.stride(1), p, r1, r2, m, count);
}
});
AT_CUDA_CHECK(cudaGetLastError());
at::sum_out(result, buffer, 0);
}
} // anonymous namespace
REGISTER_DISPATCH(pdist_forward_stub, &pdist_forward_kernel_impl);
REGISTER_DISPATCH(pdist_backward_stub, &pdist_backward_kernel_impl);
REGISTER_DISPATCH(cdist_stub, &cdist_kernel_impl);
REGISTER_DISPATCH(cdist_backward_stub, &cdist_backward_kernel_impl);
}} // at::native
|
014ccb3d62fc0fec91c33b823f3ad330ac357c4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include "kernels.h"
#define N 32
int main()
{int tmpx[N];
//int x[N]={64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
int x[N]={32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
int *kerx,*tmp,i;//*counter,c=99;
hipMalloc(&kerx,sizeof(int)*N);
hipMalloc(&tmp,sizeof(int)*N);
//hipMalloc(&counter,sizeof(int));
hipMemcpy(kerx,x,sizeof(int)*N,hipMemcpyHostToDevice);
hipLaunchKernelGGL((
msort), dim3(1),dim3(N), 0, 0, kerx,tmp,N);
hipDeviceSynchronize();
hipMemcpy(x,kerx,sizeof(int)*N,hipMemcpyDeviceToHost);
hipMemcpy(tmpx,tmp,sizeof(int)*N,hipMemcpyDeviceToHost);
//hipMemcpy(&c,counter,sizeof(int),hipMemcpyDeviceToHost);
//printf("\ncounter-%d\n",c);
for(i=0;i<N;i++)
printf("%d\n",x[i]);
}
| 014ccb3d62fc0fec91c33b823f3ad330ac357c4a.cu | #include<stdio.h>
#include "kernels.h"
#define N 32
int main()
{int tmpx[N];
//int x[N]={64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
int x[N]={32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1};
int *kerx,*tmp,i;//*counter,c=99;
cudaMalloc(&kerx,sizeof(int)*N);
cudaMalloc(&tmp,sizeof(int)*N);
//cudaMalloc(&counter,sizeof(int));
cudaMemcpy(kerx,x,sizeof(int)*N,cudaMemcpyHostToDevice);
msort<<<1,N>>>(kerx,tmp,N);
cudaThreadSynchronize();
cudaMemcpy(x,kerx,sizeof(int)*N,cudaMemcpyDeviceToHost);
cudaMemcpy(tmpx,tmp,sizeof(int)*N,cudaMemcpyDeviceToHost);
//cudaMemcpy(&c,counter,sizeof(int),cudaMemcpyDeviceToHost);
//printf("\ncounter-%d\n",c);
for(i=0;i<N;i++)
printf("%d\n",x[i]);
}
|
5ae3f5e1db62baa253170f8ef3e34c6ab998a5ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 256
__global__ void vecAdd(int *a,int *b,int *c);
void vecAdd_cpu(int a[N],int b[N],int c[N]){
for(int i=0;i<N;i++){
c[i]=a[i]+b[i];
}
}
int main(){
int a[N],b[N],gpu_add[N],cpu_add[N];
int *dev_a, *dev_b, *dev_c;
float time_gpu,time_cpu,timeindex,timeinit;
for(int i=0;i<N;i++){
a[i]=i+i;
b[i]=i*i;
}
int size=N*sizeof(int);
hipMalloc((void**) &dev_a, size);
hipMalloc((void**) &dev_b,size);
hipMalloc((void**) &dev_c,size);
hipEvent_t startinit,endinit;
hipEventCreate(&startinit);
hipEventCreate(&endinit);
hipEventRecord(startinit, 0);
hipMemcpy(dev_a,a,size,hipMemcpyHostToDevice);
hipMemcpy(dev_b,b,size,hipMemcpyHostToDevice);
hipEventRecord(endinit, 0);
hipEventSynchronize(endinit);
hipEventElapsedTime(&timeinit, startinit, endinit);
hipEvent_t gpu_start,gpu_end;
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_end);
hipEventRecord(gpu_start, 0);
hipLaunchKernelGGL(( vecAdd), dim3(1),dim3(N), 0, 0, dev_a,dev_b,dev_c);
hipDeviceSynchronize();
hipEventRecord(gpu_end, 0);
hipEventSynchronize(gpu_end);
hipEventElapsedTime(&time_gpu, gpu_start, gpu_end);
hipEvent_t startindex,endindex;
hipEventCreate(&startindex);
hipEventCreate(&endindex);
hipEventRecord(startindex, 0);
hipMemcpy(gpu_add,dev_c,size,hipMemcpyDeviceToHost);
hipEventRecord(endindex, 0);
hipEventSynchronize(endindex);
hipEventElapsedTime(&timeindex, startindex, endindex);
clock_t cpu_start,cpu_end;
cpu_start=clock();
vecAdd_cpu(a,b,cpu_add);
cpu_end=clock();
timeinit/=1000;
timeindex/=1000;
time_gpu/=1000;
time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC);
printf("Time for sending initial data from host to device : %f\t sec\n",timeinit);
printf("Cuda program launched with 1 block and %d threads\n",N);
printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex);
printf("GPU Time:%f seconds\n",time_gpu);
printf("CPU Time:%f seconds\n",time_cpu);
int flag=1;
for(int i=0;i<N;i++){
if(gpu_add[i]!=cpu_add[i]){
flag=0;
break;
}
}
if(flag){
printf("TEST PASSED\n");
printf("SPEED UP:%f\n",time_cpu/time_gpu);
}
else{
printf("TEST FAILED\n");
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
exit(0);
}
__global__ void vecAdd(int *a,int *b,int *c){
int i=threadIdx.x;
c[i]=a[i]+b[i];
}
| 5ae3f5e1db62baa253170f8ef3e34c6ab998a5ea.cu | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define N 256
__global__ void vecAdd(int *a,int *b,int *c);
void vecAdd_cpu(int a[N],int b[N],int c[N]){
for(int i=0;i<N;i++){
c[i]=a[i]+b[i];
}
}
int main(){
int a[N],b[N],gpu_add[N],cpu_add[N];
int *dev_a, *dev_b, *dev_c;
float time_gpu,time_cpu,timeindex,timeinit;
for(int i=0;i<N;i++){
a[i]=i+i;
b[i]=i*i;
}
int size=N*sizeof(int);
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b,size);
cudaMalloc((void**) &dev_c,size);
cudaEvent_t startinit,endinit;
cudaEventCreate(&startinit);
cudaEventCreate(&endinit);
cudaEventRecord(startinit, 0);
cudaMemcpy(dev_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size,cudaMemcpyHostToDevice);
cudaEventRecord(endinit, 0);
cudaEventSynchronize(endinit);
cudaEventElapsedTime(&timeinit, startinit, endinit);
cudaEvent_t gpu_start,gpu_end;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_end);
cudaEventRecord(gpu_start, 0);
vecAdd<<<1,N>>> (dev_a,dev_b,dev_c);
cudaDeviceSynchronize();
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end);
cudaEventElapsedTime(&time_gpu, gpu_start, gpu_end);
cudaEvent_t startindex,endindex;
cudaEventCreate(&startindex);
cudaEventCreate(&endindex);
cudaEventRecord(startindex, 0);
cudaMemcpy(gpu_add,dev_c,size,cudaMemcpyDeviceToHost);
cudaEventRecord(endindex, 0);
cudaEventSynchronize(endindex);
cudaEventElapsedTime(&timeindex, startindex, endindex);
clock_t cpu_start,cpu_end;
cpu_start=clock();
vecAdd_cpu(a,b,cpu_add);
cpu_end=clock();
timeinit/=1000;
timeindex/=1000;
time_gpu/=1000;
time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC);
printf("Time for sending initial data from host to device : %f\t sec\n",timeinit);
printf("Cuda program launched with 1 block and %d threads\n",N);
printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex);
printf("GPU Time:%f seconds\n",time_gpu);
printf("CPU Time:%f seconds\n",time_cpu);
int flag=1;
for(int i=0;i<N;i++){
if(gpu_add[i]!=cpu_add[i]){
flag=0;
break;
}
}
if(flag){
printf("TEST PASSED\n");
printf("SPEED UP:%f\n",time_cpu/time_gpu);
}
else{
printf("TEST FAILED\n");
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
exit(0);
}
__global__ void vecAdd(int *a,int *b,int *c){
int i=threadIdx.x;
c[i]=a[i]+b[i];
}
|
b68ba52850c6cf727c9e06f5ac9574b7bd2ba82b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "IndiceTools_GPU.h"
#include "RipplingMath.h"
using namespace gpu;
// Attention : Choix du nom est impotant!
// VagueDevice.cu et non Vague.cu
// Dans ce dernier cas, problme de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diffrents!
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
// __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t)
{
RipplingMath ripplingMath = RipplingMath(w, h);
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
const int WH = w * h;
int s = TID;
int i;
int j;
while (s < WH)
{
IndiceTools::toIJ(s, w, &i, &j);
ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t);
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| b68ba52850c6cf727c9e06f5ac9574b7bd2ba82b.cu | #include "Indice2D.h"
#include "cudaTools.h"
#include "Device.h"
#include "IndiceTools_GPU.h"
#include "RipplingMath.h"
using namespace gpu;
// Attention : Choix du nom est impotant!
// VagueDevice.cu et non Vague.cu
// Dans ce dernier cas, probl�me de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host)
// On a donc ajouter Device (ou n'importequoi) pour que les noms soient diff�rents!
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
// __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t)
{
RipplingMath ripplingMath = RipplingMath(w, h);
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
const int WH = w * h;
int s = TID;
int i;
int j;
while (s < WH)
{
IndiceTools::toIJ(s, w, &i, &j);
ripplingMath.colorIJ(&ptrDevPixels[s], i, j, t);
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
946e4ee4c1f992d2e3ec81071100adec244e54d2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "j2d9pt-512-9-512_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_9(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC8(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_8_0, 0);
__LOAD(__reg_8_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_8_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_8_0, __reg_8_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_8_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_8_0, __reg_8_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_8_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_8_0, __reg_8_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_8_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_8_0, __reg_8_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_8_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_8_0, __reg_8_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_8_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_8_0, __reg_8_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_8_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_8_0, __reg_8_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_8_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(5, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(7, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(8, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(9, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(10, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(12, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(13, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(14, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(15, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(17, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4);
__STORE(__h + 1, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 37; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
}
}
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_7_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_6_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_5_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, __h + 3);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_4_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_3_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_2_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
}
}
| 946e4ee4c1f992d2e3ec81071100adec244e54d2.cu | #include "j2d9pt-512-9-512_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_9(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC8(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_8_0, 0);
__LOAD(__reg_8_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_8_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_8_0, __reg_8_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_8_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_8_0, __reg_8_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_8_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_8_0, __reg_8_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_8_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_8_0, __reg_8_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_8_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_8_0, __reg_8_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_8_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_8_0, __reg_8_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_8_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_8_0, __reg_8_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_8_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(5, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(7, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(8, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(9, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(10, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(12, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(13, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(14, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(15, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(17, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4);
__STORE(__h + 1, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 37; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
}
}
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_7_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_6_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_5_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, __h + 3);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_4_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_3_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_2_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = ((((((((((7.1f * (__REGREF(__a, 0))) + (5.1f * (__REGREF(__b, 0)))) + (9.2f * (__SBREF(__c_sb, -2)))) + (12.1f * (__SBREF(__c_sb, -1)))) + (15.f * (__REGREF(__c, 0)))) + (12.2f * (__SBREF(__c_sb, 1)))) + (9.1f * (__SBREF(__c_sb, 2)))) + (5.2f * (__REGREF(__d, 0)))) + (7.2f * (__REGREF(__e, 0)))) / 118); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
}
}
|
f1c7639555c4fa0c6b8ef8c1cccf2c3fccdf3c64.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void adaptiveMeanGPU8 (float* D, int32_t D_width, int32_t D_height) {
// Global coordinates and Pixel id
uint32_t u0 = blockDim.x*blockIdx.x + threadIdx.x + 4;
uint32_t v0 = blockDim.y*blockIdx.y + threadIdx.y + 4;
uint32_t idx = v0*D_width + u0;
//Local thread coordinates
uint32_t ut = threadIdx.x + 4;
uint32_t vt = threadIdx.y + 4;
//If out of filter range return instantly
if(u0 > (D_width - 4) || v0 > (D_height - 4))
return;
//Allocate Shared memory array with an appropiate margin for the bitlateral filter
//Since we are using 8 pixels with the center pixel being 5,
//we need 4 extra on left and top and 3 extra on right and bottom
__shared__ float D_shared[32+7][32+7];
//Populate shared memory
if(threadIdx.x == blockDim.x-1){
D_shared[ut+1][vt] = D[idx+1];
D_shared[ut+2][vt] = D[idx+2];
D_shared[ut+3][vt] = D[idx+3];
//D_shared[ut+4][vt] = D[idx+4];
}
if(threadIdx.x == 0){
D_shared[ut-4][vt] = D[idx-4];
D_shared[ut-3][vt] = D[idx-3];
D_shared[ut-2][vt] = D[idx-2];
D_shared[ut-1][vt] = D[idx-1];
}
if(threadIdx.y == 0){
D_shared[ut][vt-4] = D[(v0-4)*D_width+u0];
D_shared[ut][vt-3] = D[(v0-3)*D_width+u0];
D_shared[ut][vt-2] = D[(v0-2)*D_width+u0];
D_shared[ut][vt-1] = D[(v0-1)*D_width+u0];
}
if(threadIdx.y == blockDim.y-1){
D_shared[ut][vt+1] = D[(v0+1)*D_width+u0];
D_shared[ut][vt+2] = D[(v0+2)*D_width+u0];
D_shared[ut][vt+3] = D[(v0+3)*D_width+u0];
//D_shared[ut][vt+4] = D[(v0+4)*D_width+u0];
}
if(D[idx] < 0){
// zero input disparity maps to -10 (this makes the bilateral
// weights of all valid disparities to 0 in this region)
D_shared[ut][vt] = -10;
}else{
D_shared[ut][vt] = D[idx];
}
__syncthreads();
// full resolution: 8 pixel bilateral filter width
// D(x) = sum(I(xi)*f(I(xi)-I(x))*g(xi-x))/W(x)
// W(x) = sum(f(I(xi)-I(x))*g(xi-x))
// g(xi-x) = 1
// f(I(xi)-I(x)) = 4-|I(xi)-I(x)| if greater than 0, 0 otherwise
// horizontal filter
// Current pixel being filtered is middle of our set (4 back, in orginal its 3 for some reason)
//Note this isn't truely the center since original uses 8 vectore resisters
float val_curr = D_shared[ut][vt];
float weight_sum0 = 0;
float weight_sum = 0;
float factor_sum = 0;
for(int32_t i=0; i < 8; i++){
weight_sum0 = 4.0f - fabs(D_shared[ut+(i-4)][vt]-val_curr);
weight_sum0 = max(0.0f, weight_sum0);
weight_sum += weight_sum0;
factor_sum += D_shared[ut+(i-4)][vt]*weight_sum0;
}
if (weight_sum>0) {
float d = factor_sum/weight_sum;
if (d>=0) *(D+idx) = d;
}
__syncthreads();
//Update shared memory
if(threadIdx.x == blockDim.x-1){
D_shared[ut+1][vt] = D[idx+1];
D_shared[ut+2][vt] = D[idx+2];
D_shared[ut+3][vt] = D[idx+3];
//D_shared[ut+4][vt] = D[idx+4];
}
if(threadIdx.x == 0){
D_shared[ut-4][vt] = D[idx-4];
D_shared[ut-3][vt] = D[idx-3];
D_shared[ut-2][vt] = D[idx-2];
D_shared[ut-1][vt] = D[idx-1];
}
if(threadIdx.y == 0){
D_shared[ut][vt-4] = D[(v0-4)*D_width+u0];
D_shared[ut][vt-3] = D[(v0-3)*D_width+u0];
D_shared[ut][vt-2] = D[(v0-2)*D_width+u0];
D_shared[ut][vt-1] = D[(v0-1)*D_width+u0];
}
if(threadIdx.y == blockDim.y-1){
D_shared[ut][vt+1] = D[(v0+1)*D_width+u0];
D_shared[ut][vt+2] = D[(v0+2)*D_width+u0];
D_shared[ut][vt+3] = D[(v0+3)*D_width+u0];
//D_shared[ut][vt+4] = D[(v0+4)*D_width+u0];
}
if(D[idx] < 0){
D_shared[ut][vt] = -10;
}else{
D_shared[ut][vt] = D[idx];
}
__syncthreads();
// vertical filter
// set pixel of interest
val_curr = D_shared[ut][vt];
weight_sum0 = 0;
weight_sum = 0;
factor_sum = 0;
for(int32_t i=0; i < 8; i++){
weight_sum0 = 4.0f - fabs(D_shared[ut][vt+(i-4)]-val_curr);
weight_sum0 = max(0.0f, weight_sum0);
weight_sum += weight_sum0;
factor_sum += D_shared[ut][vt+(i-4)]*weight_sum0;
}
if (weight_sum>0) {
float d = factor_sum/weight_sum;
if (d>=0) *(D+idx) = d;
}
} | f1c7639555c4fa0c6b8ef8c1cccf2c3fccdf3c64.cu | #include "includes.h"
__global__ void adaptiveMeanGPU8 (float* D, int32_t D_width, int32_t D_height) {
// Global coordinates and Pixel id
uint32_t u0 = blockDim.x*blockIdx.x + threadIdx.x + 4;
uint32_t v0 = blockDim.y*blockIdx.y + threadIdx.y + 4;
uint32_t idx = v0*D_width + u0;
//Local thread coordinates
uint32_t ut = threadIdx.x + 4;
uint32_t vt = threadIdx.y + 4;
//If out of filter range return instantly
if(u0 > (D_width - 4) || v0 > (D_height - 4))
return;
//Allocate Shared memory array with an appropiate margin for the bitlateral filter
//Since we are using 8 pixels with the center pixel being 5,
//we need 4 extra on left and top and 3 extra on right and bottom
__shared__ float D_shared[32+7][32+7];
//Populate shared memory
if(threadIdx.x == blockDim.x-1){
D_shared[ut+1][vt] = D[idx+1];
D_shared[ut+2][vt] = D[idx+2];
D_shared[ut+3][vt] = D[idx+3];
//D_shared[ut+4][vt] = D[idx+4];
}
if(threadIdx.x == 0){
D_shared[ut-4][vt] = D[idx-4];
D_shared[ut-3][vt] = D[idx-3];
D_shared[ut-2][vt] = D[idx-2];
D_shared[ut-1][vt] = D[idx-1];
}
if(threadIdx.y == 0){
D_shared[ut][vt-4] = D[(v0-4)*D_width+u0];
D_shared[ut][vt-3] = D[(v0-3)*D_width+u0];
D_shared[ut][vt-2] = D[(v0-2)*D_width+u0];
D_shared[ut][vt-1] = D[(v0-1)*D_width+u0];
}
if(threadIdx.y == blockDim.y-1){
D_shared[ut][vt+1] = D[(v0+1)*D_width+u0];
D_shared[ut][vt+2] = D[(v0+2)*D_width+u0];
D_shared[ut][vt+3] = D[(v0+3)*D_width+u0];
//D_shared[ut][vt+4] = D[(v0+4)*D_width+u0];
}
if(D[idx] < 0){
// zero input disparity maps to -10 (this makes the bilateral
// weights of all valid disparities to 0 in this region)
D_shared[ut][vt] = -10;
}else{
D_shared[ut][vt] = D[idx];
}
__syncthreads();
// full resolution: 8 pixel bilateral filter width
// D(x) = sum(I(xi)*f(I(xi)-I(x))*g(xi-x))/W(x)
// W(x) = sum(f(I(xi)-I(x))*g(xi-x))
// g(xi-x) = 1
// f(I(xi)-I(x)) = 4-|I(xi)-I(x)| if greater than 0, 0 otherwise
// horizontal filter
// Current pixel being filtered is middle of our set (4 back, in orginal its 3 for some reason)
//Note this isn't truely the center since original uses 8 vectore resisters
float val_curr = D_shared[ut][vt];
float weight_sum0 = 0;
float weight_sum = 0;
float factor_sum = 0;
for(int32_t i=0; i < 8; i++){
weight_sum0 = 4.0f - fabs(D_shared[ut+(i-4)][vt]-val_curr);
weight_sum0 = max(0.0f, weight_sum0);
weight_sum += weight_sum0;
factor_sum += D_shared[ut+(i-4)][vt]*weight_sum0;
}
if (weight_sum>0) {
float d = factor_sum/weight_sum;
if (d>=0) *(D+idx) = d;
}
__syncthreads();
//Update shared memory
if(threadIdx.x == blockDim.x-1){
D_shared[ut+1][vt] = D[idx+1];
D_shared[ut+2][vt] = D[idx+2];
D_shared[ut+3][vt] = D[idx+3];
//D_shared[ut+4][vt] = D[idx+4];
}
if(threadIdx.x == 0){
D_shared[ut-4][vt] = D[idx-4];
D_shared[ut-3][vt] = D[idx-3];
D_shared[ut-2][vt] = D[idx-2];
D_shared[ut-1][vt] = D[idx-1];
}
if(threadIdx.y == 0){
D_shared[ut][vt-4] = D[(v0-4)*D_width+u0];
D_shared[ut][vt-3] = D[(v0-3)*D_width+u0];
D_shared[ut][vt-2] = D[(v0-2)*D_width+u0];
D_shared[ut][vt-1] = D[(v0-1)*D_width+u0];
}
if(threadIdx.y == blockDim.y-1){
D_shared[ut][vt+1] = D[(v0+1)*D_width+u0];
D_shared[ut][vt+2] = D[(v0+2)*D_width+u0];
D_shared[ut][vt+3] = D[(v0+3)*D_width+u0];
//D_shared[ut][vt+4] = D[(v0+4)*D_width+u0];
}
if(D[idx] < 0){
D_shared[ut][vt] = -10;
}else{
D_shared[ut][vt] = D[idx];
}
__syncthreads();
// vertical filter
// set pixel of interest
val_curr = D_shared[ut][vt];
weight_sum0 = 0;
weight_sum = 0;
factor_sum = 0;
for(int32_t i=0; i < 8; i++){
weight_sum0 = 4.0f - fabs(D_shared[ut][vt+(i-4)]-val_curr);
weight_sum0 = max(0.0f, weight_sum0);
weight_sum += weight_sum0;
factor_sum += D_shared[ut][vt+(i-4)]*weight_sum0;
}
if (weight_sum>0) {
float d = factor_sum/weight_sum;
if (d>=0) *(D+idx) = d;
}
} |
46de12109c4bb859215469a253447fa4c55fb6ce.hip | // !!! This is a file automatically generated by hipify!!!
extern "C" {
#include "../shape/head.h"
}
__host__ void dbg_print_array(float *data, int x, int y) {
/* Debug function that prints all elements in data to a csv in x col * y rows */
int n, i, j;
float *host;
FILE *fp_fit;
const char *filename_fit;
//double *fit;
dim3 BLK,THD;
filename_fit = "dbg_array_cuda.csv";
printf("\n %sfile created",filename_fit);
printf("\n\nFilename: %s",filename_fit);
n = x*y;
host = (float *) malloc(n*sizeof(float));
gpuErrchk(hipMemcpy(host, data, n*sizeof(float), hipMemcpyDeviceToHost));
fp_fit = fopen(filename_fit, "w+");
/* Print top corner idop/idel label */
fprintf(fp_fit, "i/j , ");
/* Print top row idel values */
for (i=0; i<x; i++)
fprintf(fp_fit, "%i , ", i);
/* Print first entry in every row (except 1st): idop */
for (j=1; j<y; j++) {
fprintf(fp_fit, "\n%i , ", j);
/* Write the rest of the row values: fit[idel][idop] */
for (i=0; i<x; i++)
fprintf(fp_fit, " %g , ", host[j*x + i]);
}
fclose(fp_fit);
free(host);
}
__host__ void dbg_print_array1D(float *data, int size) {
/* Debug function that prints all elements in data to a csv */
int i;
float *host;
FILE *fp_fit;
const char *filename_fit;
//double *fit;
dim3 BLK,THD;
filename_fit = "dbg_array1D_cuda.csv";
printf("\n %sfile created",filename_fit);
printf("\n\nFilename: %s",filename_fit);
host = (float *) malloc(size*sizeof(float));
gpuErrchk(hipMemcpy(host, data, size*sizeof(float), hipMemcpyDeviceToHost));
fp_fit = fopen(filename_fit, "w+");
/* Print top corner idop/idel label */
fprintf(fp_fit, "i , \n");
/* Print top row idel values */
for (i=0; i<size; i++)
fprintf(fp_fit, "%i , ", i);
/* Go to second row */
fprintf(fp_fit, "\n , ");
/* Write the rest of the row values: fit[idel][idop] */
for (i=0; i<size; i++)
fprintf(fp_fit, " %g , ", host[i]);
fclose(fp_fit);
free(host);
}
__host__ void dbg_print_array1D_dbl(double *data, int size, int offset,
const char *filename) {
/* Debug function that prints all elements in data to a csv */
int i;
FILE *fp_fit;
printf("\n\nFilename: %s",filename);
fp_fit = fopen(filename, "w+");
/* Print top corner idop/idel label */
fprintf(fp_fit, "i , \n");
/* Print row */
for (i=offset; i<=size; i++)
fprintf(fp_fit, "%i , %g, \n", i, data[i]);
fclose(fp_fit);
}
__host__ void dbg_print_array1(float *in, int size) {
/* This debug function prints each array value */
int i;
for (i=0; i<size; i++) {
printf("\narray[%i]=%g", i, in[i]);
}
}
| 46de12109c4bb859215469a253447fa4c55fb6ce.cu |
extern "C" {
#include "../shape/head.h"
}
__host__ void dbg_print_array(float *data, int x, int y) {
/* Debug function that prints all elements in data to a csv in x col * y rows */
int n, i, j;
float *host;
FILE *fp_fit;
const char *filename_fit;
//double *fit;
dim3 BLK,THD;
filename_fit = "dbg_array_cuda.csv";
printf("\n %sfile created",filename_fit);
printf("\n\nFilename: %s",filename_fit);
n = x*y;
host = (float *) malloc(n*sizeof(float));
gpuErrchk(cudaMemcpy(host, data, n*sizeof(float), cudaMemcpyDeviceToHost));
fp_fit = fopen(filename_fit, "w+");
/* Print top corner idop/idel label */
fprintf(fp_fit, "i/j , ");
/* Print top row idel values */
for (i=0; i<x; i++)
fprintf(fp_fit, "%i , ", i);
/* Print first entry in every row (except 1st): idop */
for (j=1; j<y; j++) {
fprintf(fp_fit, "\n%i , ", j);
/* Write the rest of the row values: fit[idel][idop] */
for (i=0; i<x; i++)
fprintf(fp_fit, " %g , ", host[j*x + i]);
}
fclose(fp_fit);
free(host);
}
__host__ void dbg_print_array1D(float *data, int size) {
/* Debug function that prints all elements in data to a csv */
int i;
float *host;
FILE *fp_fit;
const char *filename_fit;
//double *fit;
dim3 BLK,THD;
filename_fit = "dbg_array1D_cuda.csv";
printf("\n %sfile created",filename_fit);
printf("\n\nFilename: %s",filename_fit);
host = (float *) malloc(size*sizeof(float));
gpuErrchk(cudaMemcpy(host, data, size*sizeof(float), cudaMemcpyDeviceToHost));
fp_fit = fopen(filename_fit, "w+");
/* Print top corner idop/idel label */
fprintf(fp_fit, "i , \n");
/* Print top row idel values */
for (i=0; i<size; i++)
fprintf(fp_fit, "%i , ", i);
/* Go to second row */
fprintf(fp_fit, "\n , ");
/* Write the rest of the row values: fit[idel][idop] */
for (i=0; i<size; i++)
fprintf(fp_fit, " %g , ", host[i]);
fclose(fp_fit);
free(host);
}
__host__ void dbg_print_array1D_dbl(double *data, int size, int offset,
const char *filename) {
/* Debug function that prints all elements in data to a csv */
int i;
FILE *fp_fit;
printf("\n\nFilename: %s",filename);
fp_fit = fopen(filename, "w+");
/* Print top corner idop/idel label */
fprintf(fp_fit, "i , \n");
/* Print row */
for (i=offset; i<=size; i++)
fprintf(fp_fit, "%i , %g, \n", i, data[i]);
fclose(fp_fit);
}
__host__ void dbg_print_array1(float *in, int size) {
/* This debug function prints each array value */
int i;
for (i=0; i<size; i++) {
printf("\narray[%i]=%g", i, in[i]);
}
}
|
8927eaf22681662f8ad1603006b27ca74d85ff13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <cstring>
#include <limits>
#include <numeric>
#include <vector>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/fixed_divisor.h"
// TODO: Move this to fixed_divisor.h
#ifdef __HIP_PLATFORM_HCC__
#define FIXED_DIVISOR int32_t
#define FIXED_DIVISOR_DIV(d, n) (n / d)
#define FIXED_DIVISOR_MOD(d, n) (n % d)
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) \
do { \
const auto n_copy = n; \
*q = n_copy / d; \
*r = n_copy % d; \
} while (0)
#else // __HIP_PLATFORM_HCC__
#define FIXED_DIVISOR FixedDivisor<int32_t>
#define FIXED_DIVISOR_DIV(d, n) (d.Div(n))
#define FIXED_DIVISOR_MOD(d, n) (d.Mod(n))
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) (d.DivMod(n, q, r))
#endif // __HIP_PLATFORM_HCC__
#ifdef __HIP_PLATFORM_HCC__
using CUBLAS_HALF_TYPE = rocblas_half;
#else // __HIP_PLATFORM_HCC
using CUBLAS_HALF_TYPE = __half;
#endif // __HIP_PLATFORM_HCC
#include "caffe2/utils/math/utils.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
namespace {
#define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \
template <typename T> \
struct Func##Functor { \
inline __host__ __device__ T \
operator()(const T& lhs, const T& rhs) const { \
return lhs expr rhs; \
} \
}; \
template <> \
struct Func##Functor<at::Half> { \
inline __host__ __device__ at::Half operator()( \
const at::Half& lhs, \
const at::Half& rhs) const { \
return convert::To<float, at::Half>(convert::To<at::Half, float>( \
lhs) expr convert::To<at::Half, float>(rhs)); \
} \
};
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /)
#undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR
template <typename TIn, typename TOut, class BinaryOperator>
__global__ void SimpleBinaryOpCUDAKernel(
const int N,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
C[i] = op(A[i], B[i]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void RowwiseBinaryOpCUDAKenel(
const int size,
const FIXED_DIVISOR cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int j = FIXED_DIVISOR_MOD(cols, C_index);
const int A_index = broadcast_1st ? j : C_index;
const int B_index = broadcast_1st ? C_index : j;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void ColwiseBinaryOpCUDAKenel(
const int size,
const FIXED_DIVISOR cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int i = FIXED_DIVISOR_DIV(cols, C_index);
const int A_index = broadcast_1st ? i : C_index;
const int B_index = broadcast_1st ? C_index : i;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
__global__ void BroadcastBinaryOpCUDAKernel(
const int size,
const SimpleArray<int, D> A_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FIXED_DIVISOR, D> C_dims,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
int A_index = 0;
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(C_dims.data[i], C_index_val, &C_index_val, &d);
A_index += d * A_strides.data[i];
B_index += d * B_strides.data[i];
}
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting(
const int rows,
const int cols,
const bool rowwise_broadcast,
const bool broadcast_1st,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
if (rows == 0 || cols == 0) {
return;
}
const int size = rows * cols;
const FIXED_DIVISOR cols_div(cols);
if (rowwise_broadcast) {
if (broadcast_1st) {
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
} else {
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
}
} else {
if (broadcast_1st) {
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
} else {
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, cols_div, op, A, B, C);
}
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl(
const int* A_dims,
const int* B_dims,
const int* C_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
SimpleArray<int, D> A_strides_array;
SimpleArray<int, D> B_strides_array;
SimpleArray<FIXED_DIVISOR, D> C_dims_array;
int A_stride = 1;
int B_stride = 1;
for (int i = D - 1; i >= 0; --i) {
if (C_dims[i] == 0) {
return;
}
A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride;
B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride;
A_stride *= A_dims[i];
B_stride *= B_dims[i];
C_dims_array.data[i] = FIXED_DIVISOR(C_dims[i]);
}
const int size =
std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C);
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOp(
const int A_ndim,
const int* A_dims,
const int B_ndim,
const int* B_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int ndim = ::max(A_ndim, B_ndim);
std::vector<int> A_dims_array(ndim);
std::vector<int> B_dims_array(ndim);
std::vector<int> C_dims_array(ndim);
utils::ComputeBroadcastBinaryOpDims(
A_ndim,
A_dims,
B_ndim,
B_dims,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data());
if (A_dims_array == B_dims_array) {
const int size = std::accumulate(
C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>());
hipLaunchKernelGGL(( SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>)
, dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, op, A, B, C);
return;
}
int rows;
int cols;
bool broadcast_1st;
if (utils::IsRowwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, true, broadcast_1st, op, A, B, C, context);
return;
}
if (utils::IsColwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, false, broadcast_1st, op, A, B, C, context);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
BroadcastBinaryOpImpl,
TIn,
TOut,
BinaryOperator,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data(),
op,
A,
B,
C,
context);
}
} // namespace
#define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
hipLaunchKernelGGL(( RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
hipLaunchKernelGGL(( ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false>) \
, dim3(CAFFE_GET_BLOCKS(size)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), size, cols_div, Op<TIn>(), A, B, C); \
}
#define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \
A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \
}
#define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
hipcub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); \
hipcub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
#ifdef __HIP_PLATFORM_HCC__
// rocblas doesn't support cublasSgemmEx type API yet.
// It has more general rocblas_gemm_ex API which is more close to
// hipblasGemmEx rocblas_gemm_ex does D = alpha*op( A )*op( B ) + beta*C,
// whereas cublasgemmEx does C = alpha*op( A )*op( B ) + beta*C
ROCBLAS_ENFORCE(rocblas_gemm_ex(
context->rocblashandle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
rocblas_datatype_f16_r,
ldb,
A,
rocblas_datatype_f16_r,
lda,
&beta,
C,
rocblas_datatype_f16_r,
N,
C, // D
rocblas_datatype_f16_r, // D type
N, // ldd
rocblas_datatype_f32_r, // compute type
rocblas_gemm_algo_standard, // rocblas_gemm_algo
0, // solution index, reserved for future use
0, // flags, reserved for future use
NULL, // size of workspace
NULL)); // workspace
#else
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N));
#endif // __HIP_PLATFORM_HCC__
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
// call hipblasHgemm
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE*>(B),
ldb,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(A),
lda,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE*>(C),
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>(
const float* bias,
const float* bias_multiplier,
const int bias_channels,
const int image_size,
float* image,
CUDAContext* context) {
Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
bias_channels,
image_size,
1,
1,
bias,
bias_multiplier,
1,
image,
context);
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float** A,
const float** B,
const float beta,
float** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8 || defined(__HIP_PLATFORM_HCC__)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
thrust::device_vector<const float*> A_device(A, A + batch_size);
thrust::device_vector<const float*> B_device(B, B + batch_size);
thrust::device_vector<float*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta,
C_device.data().get(),
ldc,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIP_PLATFORM_HCC__)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
B_stride,
A,
lda,
A_stride,
&beta,
C,
ldc,
C_stride,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half** A,
const at::Half** B,
const float beta,
at::Half** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 9
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if TORCH_HIP_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
thrust::device_vector<const void*> A_device(A, A + batch_size);
thrust::device_vector<const void*> B_device(B, B + batch_size);
thrust::device_vector<void*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
HIP_R_16F,
ldb,
A_device.data().get(),
HIP_R_16F,
lda,
&beta,
C_device.data().get(),
HIP_R_16F,
ldc,
batch_size,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
std::vector<const __half*> A_array(batch_size);
std::vector<const __half*> B_array(batch_size);
std::vector<__half*> C_array(batch_size);
for (int i = 0; i < batch_size; ++i) {
A_array[i] = reinterpret_cast<const __half*>(A[i]);
B_array[i] = reinterpret_cast<const __half*>(B[i]);
C_array[i] = reinterpret_cast<__half*>(C[i]);
}
thrust::device_vector<const __half*> A_device(
A_array.cbegin(), A_array.cend());
thrust::device_vector<const __half*> B_device(
B_array.cbegin(), B_array.cend());
thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend());
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta_fp16,
C_device.data().get(),
ldc,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIP_PLATFORM_HCC__)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if TORCH_HIP_VERSION < 9010 && !defined(__HIP_PLATFORM_HCC__)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
#ifdef __HIP_PLATFORM_HCC__
// D[i*stride_d] = alpha*op(A[i*stride_a])*op(B[i*stride_b]) +
// beta*C[i*stride_c], for i in [0,batch_count-1]
ROCBLAS_ENFORCE(rocblas_gemm_strided_batched_ex(
context->rocblashandle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
rocblas_datatype_f16_r,
ldb,
B_stride,
A,
rocblas_datatype_f16_r,
lda,
A_stride,
&beta,
C,
rocblas_datatype_f16_r,
ldc,
C_stride,
C, // D
rocblas_datatype_f16_r, // D type
ldc, // ldd
C_stride, // D stride
batch_size,
rocblas_datatype_f32_r, // compute type
rocblas_gemm_algo_standard, // rocblas_gemm_algo
0, // solution index, reserved for future use
0, // flags, reserved for future use
NULL, // size of workspace
NULL)); // workspace
#else
CUBLAS_ENFORCE(hipblasGemmStridedBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
B_stride,
A,
HIP_R_16F,
lda,
A_stride,
&beta,
C,
HIP_R_16F,
ldc,
C_stride,
batch_size,
HIP_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif // __HIP_PLATFORM_HCC__
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE*>(B),
ldb,
B_stride,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(A),
lda,
A_stride,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE*>(C),
ldc,
C_stride,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
}
#if TORCH_HIP_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasGemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
HIP_R_16F,
ldb,
A,
HIP_R_16F,
lda,
&beta,
C,
HIP_R_16F,
N,
HIP_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
CAFFE2_CUDA_EXPORT void
GemmStridedBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<float, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void
GemmStridedBatched<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<at::Half, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
#endif // TORCH_HIP_VERSION >= 9000
template <>
CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
const hipblasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSgemv(
context->cublas_handle(),
cu_trans_A,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AddStripedBatchKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(at::Half);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const at::Half* A,
const at::Half* x,
const float beta,
at::Half* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const hipblasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / hipblasHgemm
const int m = (cu_trans_A == HIPBLAS_OP_N) ? N : M;
const int k = (cu_trans_A == HIPBLAS_OP_N) ? M : N;
const int lda = (cu_trans_A == HIPBLAS_OP_N) ? m : k;
const int ldc = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
#ifdef __HIP_PLATFORM_HCC__
// rocblas doesn't support cublasSgemmEx type API yet.
// It has more general rocblas_gemm_ex API which is more close to
// hipblasGemmEx rocblas_gemm_ex does D = alpha*op( A )*op( B ) + beta*C,
// whereas cublasgemmEx does C = alpha*op( A )*op( B ) + beta*C
ROCBLAS_ENFORCE(rocblas_gemm_ex(
context->rocblashandle(),
cu_trans_A,
rocblas_operation_none,
m,
1,
k,
&alpha,
A,
rocblas_datatype_f16_r,
lda,
x,
rocblas_datatype_f16_r,
k,
&beta,
y,
rocblas_datatype_f16_r,
ldc,
y, // D
rocblas_datatype_f16_r, // D type
ldc, // ldd
rocblas_datatype_f32_r, // compute type
rocblas_gemm_algo_standard, // rocblas_gemm_algo
0, // solution index, reserved for future use
0, // flags, reserved for future use
NULL, // size of workspace
NULL)); // workspace
#else
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_A,
HIPBLAS_OP_N,
m,
1,
k,
&alpha,
A,
HIP_R_16F,
lda,
x,
HIP_R_16F,
k,
&beta,
y,
HIP_R_16F,
ldc));
#endif // __HIP_PLATFORM_HCC__
} else if (math_type == TensorProto_DataType_FLOAT16) {
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasHgemm(
context->cublas_handle(),
cu_trans_A,
HIPBLAS_OP_N,
m,
1,
k,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE*>(A),
lda,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(x),
k,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE*>(y),
ldc));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerateUniform(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<float>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
hiprandGenerateUniformDouble(context->curand_generator(), r, n));
hipLaunchKernelGGL(( UniformShift<double>)
, dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(hiprandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
hipLaunchKernelGGL(( UniformIntFit),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using hiprandGenerateNormal.
// hiprandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
hiprandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(hiprandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasSdot(context->cublas_handle(), n, a, 1, b, 1, y));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<at::Half, CUDAContext>(
const int n,
const at::Half* a,
const at::Half* b,
at::Half* y,
CUDAContext* context) {
#if defined(__HIP_PLATFORM_HCC__)
CAFFE_THROW("HIP currently does not support FP16 completely yet.");
#else
// execute with 32-bit math
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasDotEx_v2(
context->cublas_handle(),
n,
a,
HIP_R_16F,
1,
b,
HIP_R_16F,
1,
y,
HIP_R_16F,
HIP_R_32F));
#endif
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
CAFFE2_CUDA_EXPORT void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor* scratch_ptr) {
size_t memRequired = 0;
hipcub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<int64_t>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<int64_t>{buffer_size});
}
hipcub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
template <>
CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(at::Half)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
hipcub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(),
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
hipcub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
hipcub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
hipLaunchKernelGGL(( SumConvertKernel), dim3(1), dim3(1), 0, context->cuda_stream(), sum, y); \
} else { \
hipLaunchKernelGGL(( SumKernel), dim3(1), dim3(SUM_KERNEL_NTHREADS), 0, context->cuda_stream(), \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(at::Half)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<float>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
template <>
CAFFE2_CUDA_EXPORT void Select<at::Half, CUDAContext>(
const int N,
const int D,
const at::Half* x,
const int* idx,
at::Half* y,
CUDAContext* context) {
hipLaunchKernelGGL(( SelectKernel<at::Half>)
, dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, D, x, idx, y);
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(
hipblasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>(
const int N,
const float alpha,
const at::Half* X,
at::Half* Y,
CUDAContext* context) {
#if defined(__HIP_PLATFORM_HCC__)
CAFFE_THROW("HIP currently does not support FP16 completely yet.");
#else
CUBLAS_ENFORCE(
hipblasSetPointerMode(context->cublas_handle(), HIPBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(hipblasAxpyEx_v2(
context->cublas_handle(),
N,
&alpha,
HIP_R_32F,
X,
HIP_R_16F,
1,
Y,
HIP_R_16F,
1,
HIP_R_32F));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float* alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>(
const int N,
const float* alpha,
const at::Half* X,
at::Half* Y,
CUDAContext* context) {
#if defined(__HIP_PLATFORM_HCC__)
CAFFE_THROW("HIP currently does not support FP16 completely yet.");
#else
CUBLAS_ENFORCE(hipblasSetPointerMode(
context->cublas_handle(), HIPBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(hipblasAxpyEx_v2(
context->cublas_handle(),
N,
alpha,
HIP_R_32F,
X,
HIP_R_16F,
1,
Y,
HIP_R_16F,
1,
HIP_R_32F));
#endif
}
namespace {
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff a,
const TData* x,
const TCoeff b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * a + y[i] * b;
#else
y[i] = x[i] * a + y[i] * b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, at::Half>(
const int N,
const float a,
const at::Half* x,
const float b,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * a +
convert::To<at::Half, float>(y[i]) * b);
}
}
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff* a,
const TData* x,
const TCoeff* b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * __ldg(a) + y[i] * __ldg(b);
#else
y[i] = x[i] * *a + y[i] * *b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, at::Half>(
const int N,
const float* a,
const at::Half* x,
const float* b,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * __ldg(a) +
convert::To<at::Half, float>(y[i]) * __ldg(b));
#else
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * *a +
convert::To<at::Half, float>(y[i]) * *b);
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AXPBY(TCoeff, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff a, \
const TData* x, \
const TCoeff b, \
TData* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AxpbyCUDAKernel<TCoeff, TData>) \
, dim3(CAFFE_GET_BLOCKS(n)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), n, a, x, b, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff* a, \
const TData* x, \
const TCoeff* b, \
TData* y, \
CUDAContext* context) { \
hipLaunchKernelGGL(( AxpbyCUDAKernel<TCoeff, TData>) \
, dim3(CAFFE_GET_BLOCKS(n)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), n, a, x, b, y); \
}
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPBY
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]);
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
atomicAdd(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
atomicAdd(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, false>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
hipLaunchKernelGGL(( Im2ColNdNCHWCUDAKernel<T, N, true>)
, dim3(::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
hipLaunchKernelGGL(( Im2ColNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
hipLaunchKernelGGL(( Im2ColNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int /* groups */) {
// In NCHW, the number of groups doesn't affect Col2Im.
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
hipLaunchKernelGGL(( Col2ImNCHWCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
hipLaunchKernelGGL(( Col2ImNHWCCUDAKernel<float>)
, dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
// In NCHW, the number of groups doesn't affect Im2Col.
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NHWC>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_NOT_IMPLEMENTED;
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context,
int /* groups */) {
// In NCHW, the number of groups doesn't affect Col2Im.
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NHWC>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context,
int groups) {
CAFFE_NOT_IMPLEMENTED;
}
template <>
CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::Copy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
hipMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
#define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \
template <> \
void CopyMatrix<T, CUDAContext>( \
const int M, \
const int N, \
const T* A, \
const int lda, \
T* B, \
const int ldb, \
CUDAContext* context) { \
if (M == 0 || N == 0) { \
return; \
} \
hipMemcpy2DAsync( \
B, \
sizeof(T) * ldb, \
A, \
sizeof(T) * lda, \
sizeof(T) * N, \
M, \
hipMemcpyDeviceToDevice, \
context->cuda_stream()); \
}
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX
template <>
CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
hipMemcpyAsync(
dst,
src,
sizeof(float) * N,
hipMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = hipcub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( RowwiseReduceKernel), \
::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
hipLaunchKernelGGL(( ColwiseReduceKernel), \
::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), \
N, D, hipcub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
hipLaunchKernelGGL(( maximum_kernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), N, alpha, x, y);
}
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FIXED_DIVISOR, D> Y_dims,
const T alpha,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
Y[Y_index] = __ldg(X + X_index) * alpha;
#else
Y[Y_index] = X[X_index] * alpha;
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<FIXED_DIVISOR, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
for (int i = 0; i < D; ++i) {
if (Y_dims[i] == 0) {
return;
}
Y_dims_array.data[i] = FIXED_DIVISOR(Y_dims[i]);
}
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
hipLaunchKernelGGL(( BroadcastCUDAKernel<T, D>)
, dim3(CAFFE_GET_BLOCKS(Y_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(),
Y_size, X_strides_array, Y_dims_array, alpha, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, \
BroadcastCUDAImpl, \
T, \
X_ndim, \
X_dims, \
Y_dims, \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void
InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std);
#define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \
template <> \
__global__ void InvStdCUDAKernel<T>( \
const int N, const T epsilon, const T* var, T* inv_std) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
inv_std[i] = Func(var[i] + epsilon); \
} \
}
DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf)
#undef DELEGATE_INV_STD_KERNEL_FUNCTION
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \
template <> \
CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \
const int N, \
const T epsilon, \
const T* var, \
T* inv_std, \
CUDAContext* context) { \
hipLaunchKernelGGL(( InvStdCUDAKernel<T>) \
, dim3(CAFFE_GET_BLOCKS(N)), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream(), N, epsilon, var, inv_std); \
}
CAFFE2_SPECIALIZED_CUDA_INV_STD(float)
#undef CAFFE2_SPECIALIZED_CUDA_INV_STD
} // namespace math
} // namespace caffe2
| 8927eaf22681662f8ad1603006b27ca74d85ff13.cu | // Implements the math functions for GPU.
#include "caffe2/utils/math.h"
#include <cstring>
#include <limits>
#include <numeric>
#include <vector>
#include <cub/block/block_reduce.cuh>
#include <cub/cub.cuh>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/fixed_divisor.h"
// TODO: Move this to fixed_divisor.h
#ifdef __HIP_PLATFORM_HCC__
#define FIXED_DIVISOR int32_t
#define FIXED_DIVISOR_DIV(d, n) (n / d)
#define FIXED_DIVISOR_MOD(d, n) (n % d)
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) \
do { \
const auto n_copy = n; \
*q = n_copy / d; \
*r = n_copy % d; \
} while (0)
#else // __HIP_PLATFORM_HCC__
#define FIXED_DIVISOR FixedDivisor<int32_t>
#define FIXED_DIVISOR_DIV(d, n) (d.Div(n))
#define FIXED_DIVISOR_MOD(d, n) (d.Mod(n))
#define FIXED_DIVISOR_DIV_MOD(d, n, q, r) (d.DivMod(n, q, r))
#endif // __HIP_PLATFORM_HCC__
#ifdef __HIP_PLATFORM_HCC__
using CUBLAS_HALF_TYPE = rocblas_half;
#else // __HIP_PLATFORM_HCC
using CUBLAS_HALF_TYPE = __half;
#endif // __HIP_PLATFORM_HCC
#include "caffe2/utils/math/utils.h"
#if THRUST_VERSION >= 100800
#define THRUST_SUPPORTS_PER_THREAD
#endif // THRUST_VERSION >= 100800
namespace caffe2 {
namespace math {
namespace {
#define DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Func, expr) \
template <typename T> \
struct Func##Functor { \
inline __host__ __device__ T \
operator()(const T& lhs, const T& rhs) const { \
return lhs expr rhs; \
} \
}; \
template <> \
struct Func##Functor<at::Half> { \
inline __host__ __device__ at::Half operator()( \
const at::Half& lhs, \
const at::Half& rhs) const { \
return convert::To<float, at::Half>(convert::To<at::Half, float>( \
lhs) expr convert::To<at::Half, float>(rhs)); \
} \
};
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Add, +)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Sub, -)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Mul, *)
DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR(Div, /)
#undef DELEGATE_SIMPLE_HOST_DEVICE_BINARY_FUNCTOR
template <typename TIn, typename TOut, class BinaryOperator>
__global__ void SimpleBinaryOpCUDAKernel(
const int N,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(i, N) {
C[i] = op(A[i], B[i]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void RowwiseBinaryOpCUDAKenel(
const int size,
const FIXED_DIVISOR cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int j = FIXED_DIVISOR_MOD(cols, C_index);
const int A_index = broadcast_1st ? j : C_index;
const int B_index = broadcast_1st ? C_index : j;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, bool broadcast_1st>
__global__ void ColwiseBinaryOpCUDAKenel(
const int size,
const FIXED_DIVISOR cols,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
const int i = FIXED_DIVISOR_DIV(cols, C_index);
const int A_index = broadcast_1st ? i : C_index;
const int B_index = broadcast_1st ? C_index : i;
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
__global__ void BroadcastBinaryOpCUDAKernel(
const int size,
const SimpleArray<int, D> A_strides,
const SimpleArray<int, D> B_strides,
const SimpleArray<FIXED_DIVISOR, D> C_dims,
const BinaryOperator op,
const TIn* A,
const TIn* B,
TOut* C) {
CUDA_1D_KERNEL_LOOP(C_index, size) {
int A_index = 0;
int B_index = 0;
int C_index_val = C_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(C_dims.data[i], C_index_val, &C_index_val, &d);
A_index += d * A_strides.data[i];
B_index += d * B_strides.data[i];
}
C[C_index] = op(A[A_index], B[B_index]);
}
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BinaryOpWith2DBroadcasting(
const int rows,
const int cols,
const bool rowwise_broadcast,
const bool broadcast_1st,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
if (rows == 0 || cols == 0) {
return;
}
const int size = rows * cols;
const FIXED_DIVISOR cols_div(cols);
if (rowwise_broadcast) {
if (broadcast_1st) {
RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
} else {
RowwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
}
} else {
if (broadcast_1st) {
ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, true>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
} else {
ColwiseBinaryOpCUDAKenel<TIn, TOut, BinaryOperator, false>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, cols_div, op, A, B, C);
}
}
}
template <typename TIn, typename TOut, class BinaryOperator, int D>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOpImpl(
const int* A_dims,
const int* B_dims,
const int* C_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
SimpleArray<int, D> A_strides_array;
SimpleArray<int, D> B_strides_array;
SimpleArray<FIXED_DIVISOR, D> C_dims_array;
int A_stride = 1;
int B_stride = 1;
for (int i = D - 1; i >= 0; --i) {
if (C_dims[i] == 0) {
return;
}
A_strides_array.data[i] = A_dims[i] == 1 ? 0 : A_stride;
B_strides_array.data[i] = B_dims[i] == 1 ? 0 : B_stride;
A_stride *= A_dims[i];
B_stride *= B_dims[i];
C_dims_array.data[i] = FIXED_DIVISOR(C_dims[i]);
}
const int size =
std::accumulate(C_dims, C_dims + D, 1, std::multiplies<int>());
BroadcastBinaryOpCUDAKernel<TIn, TOut, BinaryOperator, D>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
size, A_strides_array, B_strides_array, C_dims_array, op, A, B, C);
}
template <typename TIn, typename TOut, class BinaryOperator>
CAFFE2_CUDA_EXPORT void BroadcastBinaryOp(
const int A_ndim,
const int* A_dims,
const int B_ndim,
const int* B_dims,
const BinaryOperator& op,
const TIn* A,
const TIn* B,
TOut* C,
CUDAContext* context) {
const int ndim = std::max(A_ndim, B_ndim);
std::vector<int> A_dims_array(ndim);
std::vector<int> B_dims_array(ndim);
std::vector<int> C_dims_array(ndim);
utils::ComputeBroadcastBinaryOpDims(
A_ndim,
A_dims,
B_ndim,
B_dims,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data());
if (A_dims_array == B_dims_array) {
const int size = std::accumulate(
C_dims_array.cbegin(), C_dims_array.cend(), 1, std::multiplies<int>());
SimpleBinaryOpCUDAKernel<TIn, TOut, BinaryOperator>
<<<CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, op, A, B, C);
return;
}
int rows;
int cols;
bool broadcast_1st;
if (utils::IsRowwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, true, broadcast_1st, op, A, B, C, context);
return;
}
if (utils::IsColwiseBroadcastBinaryOp(
ndim,
A_dims_array.data(),
B_dims_array.data(),
&rows,
&cols,
&broadcast_1st)) {
BinaryOpWith2DBroadcasting<TIn, TOut, BinaryOperator>(
rows, cols, false, broadcast_1st, op, A, B, C, context);
return;
}
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_3(
ndim,
BroadcastBinaryOpImpl,
TIn,
TOut,
BinaryOperator,
A_dims_array.data(),
B_dims_array.data(),
C_dims_array.data(),
op,
A,
B,
C,
context);
}
} // namespace
#define DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Rowwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
RowwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, true>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, true> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Colwise##Func<TIn, CUDAContext, false>( \
const int rows, \
const int cols, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
if (rows == 0 || cols == 0) { \
return; \
} \
const int size = rows * cols; \
const FIXED_DIVISOR cols_div(cols); \
ColwiseBinaryOpCUDAKenel<TIn, TOut, Op<TIn>, false> \
<<<CAFFE_GET_BLOCKS(size), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(size, cols_div, Op<TIn>(), A, B, C); \
}
#define DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_2D_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_2D_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_2D_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_2D_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(TIn, TOut, Func, Op) \
template <> \
CAFFE2_CUDA_EXPORT void Func<TIn, CUDAContext>( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const TIn* A, \
const TIn* B, \
TOut* C, \
CUDAContext* context) { \
BroadcastBinaryOp<TIn, TOut, Op<TIn>>( \
A_ndim, A_dims, B_ndim, B_dims, Op<TIn>(), A, B, C, context); \
}
#define DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int32_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(EQ, thrust::equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(NE, thrust::not_equal_to)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LT, thrust::less)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(LE, thrust::less_equal)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GT, thrust::greater)
DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION(GE, thrust::greater_equal)
#undef DEFINE_BROADCAST_CUDA_COMPARE_FUNCTION
#define DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int64_t, std::int64_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(float, float, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(double, double, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(at::Half, at::Half, Func, Op)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Add, AddFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Sub, SubFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Mul, MulFunctor)
DEFINE_BROADCAST_CUDA_BINARY_FUNCTION(Div, DivFunctor)
#undef DEFINE_BROADCAST_CUDA_BINARY_FUNCTION
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, And, thrust::logical_and)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Or, thrust::logical_or)
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Xor, thrust::bit_xor)
#define DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(bool, bool, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION( \
std::int32_t, std::int32_t, Func, Op) \
DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION(std::int64_t, std::int64_t, Func, Op)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseAnd, thrust::bit_and)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseOr, thrust::bit_or)
DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION(BitwiseXor, thrust::bit_xor)
#undef DEFINE_BROADCAST_CUDA_BITWISE_BINARY_FUNCTION
#undef DELEGATE_BROADCAST_CUDA_BINARY_FUNCTION
#define DELEGATE_REDUCTION_FUNCTION(T, Funcname, func) \
template <> \
CAFFE2_CUDA_EXPORT void Funcname<T, CUDAContext>( \
const int N, \
const T* src, \
T* dst, \
Tensor* scratch_ptr, \
CUDAContext* context) { \
size_t memRequired = 0; \
cub::DeviceReduce::func( \
nullptr, memRequired, src, dst, N, context->cuda_stream()); \
auto buffer_size = \
static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T)); \
scratch_ptr->Resize(std::vector<int64_t>{buffer_size}); \
cub::DeviceReduce::func( \
static_cast<void*>(scratch_ptr->mutable_data<T>()), \
memRequired, \
src, \
dst, \
N, \
context->cuda_stream()); \
}
DELEGATE_REDUCTION_FUNCTION(float, ReduceMin, Min)
DELEGATE_REDUCTION_FUNCTION(float, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int32_t, ReduceMax, Max)
DELEGATE_REDUCTION_FUNCTION(int64_t, ReduceMax, Max)
#undef DELEGATE_REDUCTION_FUNCTION
// Caffe2 gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
N));
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
#ifdef __HIP_PLATFORM_HCC__
// rocblas doesn't support cublasSgemmEx type API yet.
// It has more general rocblas_gemm_ex API which is more close to
// cublasGemmEx rocblas_gemm_ex does D = alpha*op( A )*op( B ) + beta*C,
// whereas cublasgemmEx does C = alpha*op( A )*op( B ) + beta*C
ROCBLAS_ENFORCE(rocblas_gemm_ex(
context->rocblashandle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
rocblas_datatype_f16_r,
ldb,
A,
rocblas_datatype_f16_r,
lda,
&beta,
C,
rocblas_datatype_f16_r,
N,
C, // D
rocblas_datatype_f16_r, // D type
N, // ldd
rocblas_datatype_f32_r, // compute type
rocblas_gemm_algo_standard, // rocblas_gemm_algo
0, // solution index, reserved for future use
0, // flags, reserved for future use
NULL, // size of workspace
NULL)); // workspace
#else
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N));
#endif // __HIP_PLATFORM_HCC__
} else if (math_type == TensorProto_DataType_FLOAT16) {
// convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
// call cublasHgemm
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE*>(B),
ldb,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(A),
lda,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE*>(C),
N));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
template <>
CAFFE2_CUDA_EXPORT void BiasCHW<float, CUDAContext>(
const float* bias,
const float* bias_multiplier,
const int bias_channels,
const int image_size,
float* image,
CUDAContext* context) {
Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
bias_channels,
image_size,
1,
1,
bias,
bias_multiplier,
1,
image,
context);
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float** A,
const float** B,
const float beta,
float** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8 || defined(__HIP_PLATFORM_HCC__)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
thrust::device_vector<const float*> A_device(A, A + batch_size);
thrust::device_vector<const float*> B_device(B, B + batch_size);
thrust::device_vector<float*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta,
C_device.data().get(),
ldc,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIP_PLATFORM_HCC__)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
B_stride,
A,
lda,
A_stride,
&beta,
C,
ldc,
C_stride,
batch_size));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half** A,
const at::Half** B,
const float beta,
at::Half** C,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 9
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if CUDA_VERSION < 9010
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A,
trans_B,
M,
N,
K,
alpha,
A[i],
B[i],
beta,
C[i],
context,
math_type);
}
#else
thrust::device_vector<const void*> A_device(A, A + batch_size);
thrust::device_vector<const void*> B_device(B, B + batch_size);
thrust::device_vector<void*> C_device(C, C + batch_size);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B_device.data().get(),
CUDA_R_16F,
ldb,
A_device.data().get(),
CUDA_R_16F,
lda,
&beta,
C_device.data().get(),
CUDA_R_16F,
ldc,
batch_size,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
std::vector<const __half*> A_array(batch_size);
std::vector<const __half*> B_array(batch_size);
std::vector<__half*> C_array(batch_size);
for (int i = 0; i < batch_size; ++i) {
A_array[i] = reinterpret_cast<const __half*>(A[i]);
B_array[i] = reinterpret_cast<const __half*>(B[i]);
C_array[i] = reinterpret_cast<__half*>(C[i]);
}
thrust::device_vector<const __half*> A_device(
A_array.cbegin(), A_array.cend());
thrust::device_vector<const __half*> B_device(
B_array.cbegin(), B_array.cend());
thrust::device_vector<__half*> C_device(C_array.cbegin(), C_array.cend());
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemmBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha_fp16,
B_device.data().get(),
ldb,
A_device.data().get(),
lda,
&beta_fp16,
C_device.data().get(),
ldc,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
}
template <>
CAFFE2_CUDA_EXPORT void GemmStridedBatched<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
#if __CUDACC_VER_MAJOR__ < 8 && !defined(__HIP_PLATFORM_HCC__)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const int ldc = N;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
if (math_type == TensorProto_DataType_FLOAT) {
#if CUDA_VERSION < 9010 && !defined(__HIP_PLATFORM_HCC__)
// loop over matrices in the batch
for (int i = 0; i < batch_size; ++i) {
Gemm<at::Half, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
A += A_stride;
B += B_stride;
C += C_stride;
}
#else
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
#ifdef __HIP_PLATFORM_HCC__
// D[i*stride_d] = alpha*op(A[i*stride_a])*op(B[i*stride_b]) +
// beta*C[i*stride_c], for i in [0,batch_count-1]
ROCBLAS_ENFORCE(rocblas_gemm_strided_batched_ex(
context->rocblashandle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
rocblas_datatype_f16_r,
ldb,
B_stride,
A,
rocblas_datatype_f16_r,
lda,
A_stride,
&beta,
C,
rocblas_datatype_f16_r,
ldc,
C_stride,
C, // D
rocblas_datatype_f16_r, // D type
ldc, // ldd
C_stride, // D stride
batch_size,
rocblas_datatype_f32_r, // compute type
rocblas_gemm_algo_standard, // rocblas_gemm_algo
0, // solution index, reserved for future use
0, // flags, reserved for future use
NULL, // size of workspace
NULL)); // workspace
#else
CUBLAS_ENFORCE(cublasGemmStridedBatchedEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
B_stride,
A,
CUDA_R_16F,
lda,
A_stride,
&beta,
C,
CUDA_R_16F,
ldc,
C_stride,
batch_size,
CUDA_R_32F,
CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#endif // __HIP_PLATFORM_HCC__
#endif
} else if (math_type == TensorProto_DataType_FLOAT16) {
// Convert alpha, beta from float -> __half
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemmStridedBatched(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE*>(B),
ldb,
B_stride,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(A),
lda,
A_stride,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE*>(C),
ldc,
C_stride,
batch_size));
} else {
CAFFE_THROW("Unsupported math type");
}
#endif
}
#if CUDA_VERSION >= 9000
// No change, but required. Defer to default CUDA engine
template <>
CAFFE2_CUDA_EXPORT void Gemm<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const float* B,
const float beta,
float* C,
CUDAContext* context,
TensorProto::DataType math_type) {
return Gemm<float, CUDAContext>(
trans_A, trans_B, M, N, K, alpha, A, B, beta, C, context, math_type);
}
template <>
CAFFE2_CUDA_EXPORT void Gemm<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const at::Half* B,
const float beta,
at::Half* C,
CUDAContext* context,
TensorProto::DataType math_type) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const int lda = (trans_A == CblasNoTrans) ? K : M;
const int ldb = (trans_B == CblasNoTrans) ? N : K;
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
// enable TensorCore for this call on this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_TENSOR_OP_MATH));
}
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasGemmEx(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
CUDA_R_16F,
ldb,
A,
CUDA_R_16F,
lda,
&beta,
C,
CUDA_R_16F,
N,
CUDA_R_32F,
CUBLAS_GEMM_DFALT_TENSOR_OP));
// Now disable TensorCore math for subsequent calls to this handle
if (TensorCoreAvailable()) {
CUBLAS_ENFORCE(
cublasSetMathMode(context->cublas_handle(), CUBLAS_DEFAULT_MATH));
}
}
template <>
CAFFE2_CUDA_EXPORT void
GemmStridedBatched<float, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int A_stride,
const float* B,
const int B_stride,
const float beta,
float* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<float, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
template <>
CAFFE2_CUDA_EXPORT void
GemmStridedBatched<at::Half, CUDAContext, TensorCoreEngine>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const at::Half* A,
const int A_stride,
const at::Half* B,
const int B_stride,
const float beta,
at::Half* C,
const int C_stride,
CUDAContext* context,
TensorProto::DataType math_type) {
return GemmStridedBatched<at::Half, CUDAContext, DefaultEngine>(
trans_A,
trans_B,
batch_size,
M,
N,
K,
alpha,
A,
A_stride,
B,
B_stride,
beta,
C,
C_stride,
context,
math_type);
}
#endif // CUDA_VERSION >= 9000
template <>
CAFFE2_CUDA_EXPORT void GemmEx<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const float* A,
const int lda,
const float* B,
const int ldb,
const float beta,
float* C,
const int ldc,
CUDAContext* context) {
// Note that cublas follows fortran order, so the order is different from
// the cblas convention.
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
const cublasOperation_t cu_trans_B =
(trans_B == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemm(
context->cublas_handle(),
cu_trans_B,
cu_trans_A,
N,
M,
K,
&alpha,
B,
ldb,
A,
lda,
&beta,
C,
ldc));
}
template <>
CAFFE2_CUDA_EXPORT void Gemv<float, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const float* A,
const float* x,
const float beta,
float* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSgemv(
context->cublas_handle(),
cu_trans_A,
N,
M,
&alpha,
A,
N,
x,
1,
&beta,
y,
1));
}
// Batched Add variants
namespace {
template <typename T>
__global__ void AddStripedBatchKernel(
const int N,
const T* first,
T* Y,
const int stripe,
const int batch) {
for (int j = 0; j < batch; j++) {
const T* x = first + j * stripe;
CUDA_1D_KERNEL_LOOP(i, N) {
float tmpY = convert::To<T, float>(Y[i]);
tmpY += convert::To<T, float>(x[i]);
Y[i] = convert::To<float, T>(tmpY);
}
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(T) \
template <> \
CAFFE2_CUDA_EXPORT void AddStripedBatch<T, CUDAContext>( \
const int N, \
const T* first, \
T* Y, \
const int stripe, \
const int batch, \
CUDAContext* context) { \
AddStripedBatchKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, first, Y, stripe, batch); \
}
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(float);
CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH(at::Half);
#undef CAFFE2_SPECIALIZED_CUDA_ADD_STRIPED_BATCH
template <>
CAFFE2_CUDA_EXPORT void Gemv<at::Half, CUDAContext>(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const at::Half* A,
const at::Half* x,
const float beta,
at::Half* y,
CUDAContext* context,
TensorProto::DataType math_type) {
const cublasOperation_t cu_trans_A =
(trans_A == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
// sort out what we need to call cublasSgemmEx / cublasHgemm
const int m = (cu_trans_A == CUBLAS_OP_N) ? N : M;
const int k = (cu_trans_A == CUBLAS_OP_N) ? M : N;
const int lda = (cu_trans_A == CUBLAS_OP_N) ? m : k;
const int ldc = m;
if (math_type == TensorProto_DataType_FLOAT) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
#ifdef __HIP_PLATFORM_HCC__
// rocblas doesn't support cublasSgemmEx type API yet.
// It has more general rocblas_gemm_ex API which is more close to
// cublasGemmEx rocblas_gemm_ex does D = alpha*op( A )*op( B ) + beta*C,
// whereas cublasgemmEx does C = alpha*op( A )*op( B ) + beta*C
ROCBLAS_ENFORCE(rocblas_gemm_ex(
context->rocblashandle(),
cu_trans_A,
rocblas_operation_none,
m,
1,
k,
&alpha,
A,
rocblas_datatype_f16_r,
lda,
x,
rocblas_datatype_f16_r,
k,
&beta,
y,
rocblas_datatype_f16_r,
ldc,
y, // D
rocblas_datatype_f16_r, // D type
ldc, // ldd
rocblas_datatype_f32_r, // compute type
rocblas_gemm_algo_standard, // rocblas_gemm_algo
0, // solution index, reserved for future use
0, // flags, reserved for future use
NULL, // size of workspace
NULL)); // workspace
#else
CUBLAS_ENFORCE(cublasSgemmEx(
context->cublas_handle(),
cu_trans_A,
CUBLAS_OP_N,
m,
1,
k,
&alpha,
A,
CUDA_R_16F,
lda,
x,
CUDA_R_16F,
k,
&beta,
y,
CUDA_R_16F,
ldc));
#endif // __HIP_PLATFORM_HCC__
} else if (math_type == TensorProto_DataType_FLOAT16) {
const __half alpha_fp16 = at::Half(alpha);
const __half beta_fp16 = at::Half(beta);
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasHgemm(
context->cublas_handle(),
cu_trans_A,
CUBLAS_OP_N,
m,
1,
k,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&alpha_fp16),
reinterpret_cast<const CUBLAS_HALF_TYPE*>(A),
lda,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(x),
k,
reinterpret_cast<const CUBLAS_HALF_TYPE*>(&beta_fp16),
reinterpret_cast<CUBLAS_HALF_TYPE*>(y),
ldc));
} else {
// fail
CAFFE_THROW("Unsupported math type");
}
}
namespace {
template <typename T>
__global__ void
UniformShift(const size_t N, const float min, const float max, T* x) {
float scale = max - min;
CUDA_1D_KERNEL_LOOP(i, N) {
x[i] = convert::To<float, T>(convert::To<T, float>(x[i]) * scale + min);
}
}
__global__ void
UniformIntFit(const size_t N, const int min, const int max, unsigned int* x) {
int* x_int = reinterpret_cast<int*>(x);
int range = (max - min + 1);
CUDA_1D_KERNEL_LOOP(i, N) {
x_int[i] = min + static_cast<int>(x[i] % range);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void RandUniform<float, CUDAContext>(
const size_t n,
const float min,
const float max,
float* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerateUniform(context->curand_generator(), r, n));
UniformShift<float>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<double, CUDAContext>(
const size_t n,
const double min,
const double max,
double* r,
CUDAContext* context) {
CURAND_ENFORCE(
curandGenerateUniformDouble(context->curand_generator(), r, n));
UniformShift<double>
<<<CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(n, min, max, r);
}
template <>
CAFFE2_CUDA_EXPORT void RandUniform<int, CUDAContext>(
const size_t n,
const int min,
const int max,
int* r,
CUDAContext* context) {
CURAND_ENFORCE(curandGenerate(
context->curand_generator(), reinterpret_cast<unsigned int*>(r), n));
UniformIntFit<<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
n, min, max, reinterpret_cast<unsigned int*>(r));
}
template <typename T>
size_t HandleOddLengthRandGaussian(
const size_t n,
const T mean,
const T std,
T* r,
CUDAContext* context) {
if (n % 2 == 1) {
std::default_random_engine generator;
std::normal_distribution<T> distribution(mean, std);
const T random_value = distribution(generator);
Set<T, CUDAContext>(1, random_value, r + (n - 1), context);
return n - 1;
}
return n;
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<float, CUDAContext>(
const size_t n,
const float mean,
const float std,
float* r,
CUDAContext* context) {
// If n is odd, we add a random Gaussian value at the end manually
// and generate n-1 random values using curandGenerateNormal.
// curandGenerateNormal requires n to be even.
const size_t even_n =
HandleOddLengthRandGaussian<float>(n, mean, std, r, context);
CURAND_ENFORCE(
curandGenerateNormal(context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void RandGaussian<double, CUDAContext>(
const size_t n,
const double mean,
const double std,
double* r,
CUDAContext* context) {
const size_t even_n =
HandleOddLengthRandGaussian<double>(n, mean, std, r, context);
CURAND_ENFORCE(curandGenerateNormalDouble(
context->curand_generator(), r, even_n, mean, std));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<float, CUDAContext>(
const int n,
const float* a,
const float* b,
float* y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasSdot(context->cublas_handle(), n, a, 1, b, 1, y));
}
template <>
CAFFE2_CUDA_EXPORT void Dot<at::Half, CUDAContext>(
const int n,
const at::Half* a,
const at::Half* b,
at::Half* y,
CUDAContext* context) {
#if defined(__HIP_PLATFORM_HCC__)
CAFFE_THROW("HIP currently does not support FP16 completely yet.");
#else
// execute with 32-bit math
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasDotEx(
context->cublas_handle(),
n,
a,
CUDA_R_16F,
1,
b,
CUDA_R_16F,
1,
y,
CUDA_R_16F,
CUDA_R_32F));
#endif
}
// A previous version of caffe2 used Thrust but it turns out that thrust
// reduction has an implicit scratch space allocation and deallocation, which
// may interfere with NCCL and create a deadlock. Hence we are using a custom
// reduction here.
#define SUM_KERNEL_NTHREADS 128
template <typename T>
__global__ void SumKernel(const int N, const T* X, T* Y, bool square) {
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SUM_KERNEL_NTHREADS];
reduction_buffer[idx] = 0;
// A multilevel reduction.
// N -> 128
if (!square) {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
reduction_buffer[idx] += convert::To<T, float>(X[i]);
}
} else {
for (int i = idx; i < N; i += SUM_KERNEL_NTHREADS) {
float Xi = convert::To<T, float>(X[i]);
reduction_buffer[idx] += Xi * Xi;
}
}
__syncthreads();
// 128 -> 32
if (idx < 32) {
reduction_buffer[idx] += reduction_buffer[idx + 32] +
reduction_buffer[idx + 64] + reduction_buffer[idx + 96];
}
__syncthreads();
// 32 -> 1
if (idx == 0) {
float tmp = 0;
for (int i = 0; i < 32; ++i) {
tmp += reduction_buffer[i];
}
*Y = convert::To<float, T>(tmp);
}
}
// According to the benchmarks script
// caffe2/caffe2/experiments/python/device_reduce_sum_bench.py,
// device reduce is slower for N <= 10000.
#define DEVICE_REDUCE_SIZE_THRESHOLD 10000
namespace {
template <typename T>
__global__ void SumConvertKernel(float* sum, T* dest) {
*dest = convert::To<float, T>(*sum);
}
template <typename T, typename IterT>
CAFFE2_CUDA_EXPORT void SumGenericIter(
const int N,
IterT it,
T*& dest,
CUDAContext* context,
Tensor* scratch_ptr) {
size_t memRequired = 0;
cub::DeviceReduce::Sum(
nullptr, memRequired, it, dest, N, context->cuda_stream());
auto buffer_size =
static_cast<int64_t>((memRequired + sizeof(T) - 1) / sizeof(T));
if (!dest) {
// allocate one more T at the end of scratch for dest
scratch_ptr->Resize(std::vector<int64_t>{buffer_size + 1});
dest = scratch_ptr->template mutable_data<T>() + buffer_size;
} else {
scratch_ptr->Resize(std::vector<int64_t>{buffer_size});
}
cub::DeviceReduce::Sum(
static_cast<void*>(scratch_ptr->template mutable_data<T>()),
memRequired,
it,
dest,
N,
context->cuda_stream());
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Sum<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<float>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
template <>
CAFFE2_CUDA_EXPORT void Sum<int32_t, CUDAContext>(
const int N,
const int32_t* x,
int32_t* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SumGenericIter<int32_t>(N, x, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, false);
}
}
namespace {
template <typename T>
struct FloatTransform {
inline __host__ __device__ float operator()(const T v) const {
return convert::To<T, float>(v);
}
};
} // namespace
#define CAFFE2_MATH_SUM_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void Sum<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> it( \
x, transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, false); \
} \
}
CAFFE2_MATH_SUM_FUNC(at::Half)
#undef CAFFE2_MATH_SUM_FUNC
namespace {
template <typename T>
struct SqrTransform {
inline __host__ __device__ T operator()(const T v) const {
return v * v;
}
};
} // namespace
template <>
CAFFE2_CUDA_EXPORT void SumSqr<float, CUDAContext>(
const int N,
const float* x,
float* y,
CUDAContext* context,
Tensor* scratch_ptr) {
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) {
SqrTransform<float> transform;
cub::TransformInputIterator<float, SqrTransform<float>, const float*> it(
x, transform);
SumGenericIter<float>(N, it, y, context, scratch_ptr);
} else {
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>(
N, x, y, true);
}
}
#define CAFFE2_MATH_SUMSQR_FUNC(T) \
template <> \
CAFFE2_CUDA_EXPORT void SumSqr<T, CUDAContext>( \
const int N, \
const T* x, \
T* y, \
CUDAContext* context, \
Tensor* scratch_ptr) { \
if (scratch_ptr && N > DEVICE_REDUCE_SIZE_THRESHOLD) { \
FloatTransform<T> float_transform; \
cub::TransformInputIterator<float, FloatTransform<T>, const T*> \
float_it(x, float_transform); \
SqrTransform<float> sqr_transform; \
cub::TransformInputIterator< \
float, \
SqrTransform<float>, \
decltype(float_it)> \
it(float_it, sqr_transform); \
float* sum = nullptr; \
SumGenericIter<float>(N, it, sum, context, scratch_ptr); \
SumConvertKernel<<<1, 1, 0, context->cuda_stream()>>>(sum, y); \
} else { \
SumKernel<<<1, SUM_KERNEL_NTHREADS, 0, context->cuda_stream()>>>( \
N, x, y, true); \
} \
}
CAFFE2_MATH_SUMSQR_FUNC(at::Half)
#undef CAFFE2_MATH_SUMSQR_FUNC
#undef DEVICE_REDUCE_SIZE_THRESHOLD
namespace {
template <typename T>
__global__ void
SelectKernel(const int N, const int D, const T* x, const int* idx, T* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = x[i * D + idx[i]];
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Select<float, CUDAContext>(
const int N,
const int D,
const float* x,
const int* idx,
float* y,
CUDAContext* context) {
SelectKernel<float>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
template <>
CAFFE2_CUDA_EXPORT void Select<at::Half, CUDAContext>(
const int N,
const int D,
const at::Half* x,
const int* idx,
at::Half* y,
CUDAContext* context) {
SelectKernel<at::Half>
<<<CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, D, x, idx, y);
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<double, CUDAContext>(
const int N,
const float alpha,
const double* X,
double* Y,
CUDAContext* context) {
double alpha_d{alpha};
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(
cublasDaxpy(context->cublas_handle(), N, &alpha_d, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>(
const int N,
const float alpha,
const at::Half* X,
at::Half* Y,
CUDAContext* context) {
#if defined(__HIP_PLATFORM_HCC__)
CAFFE_THROW("HIP currently does not support FP16 completely yet.");
#else
CUBLAS_ENFORCE(
cublasSetPointerMode(context->cublas_handle(), CUBLAS_POINTER_MODE_HOST));
CUBLAS_ENFORCE(cublasAxpyEx(
context->cublas_handle(),
N,
&alpha,
CUDA_R_32F,
X,
CUDA_R_16F,
1,
Y,
CUDA_R_16F,
1,
CUDA_R_32F));
#endif
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<float, CUDAContext>(
const int N,
const float* alpha,
const float* X,
float* Y,
CUDAContext* context) {
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasSaxpy(context->cublas_handle(), N, alpha, X, 1, Y, 1));
}
template <>
CAFFE2_CUDA_EXPORT void Axpy<at::Half, CUDAContext>(
const int N,
const float* alpha,
const at::Half* X,
at::Half* Y,
CUDAContext* context) {
#if defined(__HIP_PLATFORM_HCC__)
CAFFE_THROW("HIP currently does not support FP16 completely yet.");
#else
CUBLAS_ENFORCE(cublasSetPointerMode(
context->cublas_handle(), CUBLAS_POINTER_MODE_DEVICE));
CUBLAS_ENFORCE(cublasAxpyEx(
context->cublas_handle(),
N,
alpha,
CUDA_R_32F,
X,
CUDA_R_16F,
1,
Y,
CUDA_R_16F,
1,
CUDA_R_32F));
#endif
}
namespace {
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff a,
const TData* x,
const TCoeff b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * a + y[i] * b;
#else
y[i] = x[i] * a + y[i] * b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, at::Half>(
const int N,
const float a,
const at::Half* x,
const float b,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * a +
convert::To<at::Half, float>(y[i]) * b);
}
}
template <typename TCoeff, typename TData>
__global__ void AxpbyCUDAKernel(
const int N,
const TCoeff* a,
const TData* x,
const TCoeff* b,
TData* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = __ldg(x + i) * __ldg(a) + y[i] * __ldg(b);
#else
y[i] = x[i] * *a + y[i] * *b;
#endif
}
}
template <>
__global__ void AxpbyCUDAKernel<float, at::Half>(
const int N,
const float* a,
const at::Half* x,
const float* b,
at::Half* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
#if __CUDA_ARCH__ >= 350
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * __ldg(a) +
convert::To<at::Half, float>(y[i]) * __ldg(b));
#else
y[i] = convert::To<float, at::Half>(
convert::To<at::Half, float>(x[i]) * *a +
convert::To<at::Half, float>(y[i]) * *b);
#endif
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_AXPBY(TCoeff, TData) \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff a, \
const TData* x, \
const TCoeff b, \
TData* y, \
CUDAContext* context) { \
AxpbyCUDAKernel<TCoeff, TData> \
<<<CAFFE_GET_BLOCKS(n), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(n, a, x, b, y); \
} \
template <> \
CAFFE2_CUDA_EXPORT void Axpby<TCoeff, TData, CUDAContext>( \
const int n, \
const TCoeff* a, \
const TData* x, \
const TCoeff* b, \
TData* y, \
CUDAContext* context) { \
AxpbyCUDAKernel<TCoeff, TData> \
<<<CAFFE_GET_BLOCKS(n), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(n, a, x, b, y); \
}
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, float)
CAFFE2_SPECIALIZED_CUDA_AXPBY(float, at::Half)
#undef CAFFE2_SPECIALIZED_CUDA_AXPBY
namespace {
template <typename T>
__global__ void Im2ColNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int w_out = index % output_w;
const int h_index = index / output_w;
const int h_out = h_index % output_h;
const int channel_in = h_index / output_h;
const int channel_out = channel_in * kernel_h * kernel_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
const int output_size = output_h * output_w;
T* col_data_ptr =
col_data + (channel_out * output_h + h_out) * output_w + w_out;
const T* img_data_ptr =
img_data + (channel_in * input_h + h_in) * input_w + w_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data_ptr + dh * input_w + dw)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data_ptr[dh * input_w + dw]
: 0;
#endif
col_data_ptr += output_size;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Im2ColNHWCCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_w,
const int channels,
const T* img_data,
T* col_data) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int channel_in = index % channels;
const int w_out = index / channels % output_w;
const int h_out = index / channels / output_w;
const int h_in = h_out * stride_h - pad_t;
const int w_in = w_out * stride_w - pad_l;
T* col_data_ptr = col_data +
(h_out * output_w + w_out) * channels * kernel_h * kernel_w +
channel_in;
int dh = 0;
for (int i = 0; i < kernel_h; ++i) {
int dw = 0;
for (int j = 0; j < kernel_w; ++j) {
const int h = h_in + dh;
const int w = w_in + dw;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? __ldg(img_data + (h * input_w + w) * channels + channel_in)
: 0;
#else
*col_data_ptr = utils::IsAGeZeroAndALtB(h, input_h) &&
utils::IsAGeZeroAndALtB(w, input_w)
? img_data[(h * input_w + w) * channels + channel_in]
: 0;
#endif
col_data_ptr += channels;
dw += dilation_w;
}
dh += dilation_h;
}
}
}
template <typename T>
__global__ void Col2ImNCHWCUDAKernel(
const int n,
const int input_h,
const int input_w,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int w = index % input_w + pad_l;
const int h = index / input_w % input_h + pad_t;
const int c = index / (input_h * input_w);
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h - h_col * stride_h);
int w_k = (w - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int col_data_index =
(((c * patch_h + h_k) * patch_w + w_k) * output_h + h_col) *
output_w +
w_col;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
val += __ldg(col_data + col_data_index);
#else
val += col_data[col_data_index];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T>
__global__ void Col2ImNHWCCUDAKernel(
const int n,
const int input_w,
const int channels,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int stride_h,
const int stride_w,
const int output_h,
const int output_w,
const T* col_data,
T* img_data) {
const int dpatch_h = dilation_h * (patch_h - 1) + 1;
const int dpatch_w = dilation_w * (patch_w - 1) + 1;
CUDA_1D_KERNEL_LOOP(index, n) {
T val = 0;
const int c = index % channels;
const int w = index / channels % input_w + pad_l;
const int h = index / channels / input_w + pad_t;
// compute the start and end of the output
const int w_col_start = (w < dpatch_w) ? 0 : (w - dpatch_w) / stride_w + 1;
const int w_col_end = min(w / stride_w + 1, output_w);
const int h_col_start = (h < dpatch_h) ? 0 : (h - dpatch_h) / stride_h + 1;
const int h_col_end = min(h / stride_h + 1, output_h);
const int channels_col = patch_h * patch_w * channels;
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = h - h_col * stride_h;
int w_k = w - w_col * stride_w;
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int c_col = (h_k * patch_w + w_k) * channels + c;
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
val += __ldg(
col_data + (h_col * output_w + w_col) * channels_col + c_col);
#else
val += col_data[(h_col * output_w + w_col) * channels_col + c_col];
#endif
}
}
}
img_data[index] = val;
}
}
template <typename T, int N, bool kCol2Im>
__global__ void Im2ColNdNCHWCUDAKernel(
const int outer_size,
const int inner_size,
const int kernel_size,
SimpleArray<int, N + 1> img_shape,
SimpleArray<int, N + 1> col_shape,
SimpleArray<int, N> kernel_shape,
SimpleArray<int, N> stride,
SimpleArray<int, N> dilation,
SimpleArray<int, N> pad,
const T* X_data,
T* Y_data) {
int d_offset[N];
int d_iter[N];
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
int offset_i = i;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset_i % kernel_shape.data[d_i];
offset_i /= kernel_shape.data[d_i];
}
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
int offset_j = j;
#pragma unroll
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_iter[d_i] = offset_j % col_shape.data[d_i + 1];
offset_j /= col_shape.data[d_i + 1];
}
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
#pragma unroll
for (int d_i = 0; d_i < N; ++d_i) {
const int d_img = d_iter[d_i] * stride.data[d_i] - pad.data[d_i] +
d_offset[d_i] * dilation.data[d_i];
is_padding |= !utils::IsAGeZeroAndALtB(d_img, img_shape.data[d_i + 1]);
img_index = img_index * img_shape.data[d_i + 1] + d_img;
}
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : __ldg(X_data + img_index);
} else if (!is_padding) {
atomicAdd(Y_data + img_index, __ldg(X_data + col_index));
}
#else
if (!kCol2Im) {
Y_data[col_index] = is_padding ? 0 : X_data[img_index];
} else if (!is_padding) {
atomicAdd(Y_data + img_index, X_data[col_index]);
}
#endif
}
}
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Im2ColNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Im2ColNdNCHWCUDAKernel<T, N, false>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
img_data,
col_data);
}
template <typename T, int N>
CAFFE2_CUDA_EXPORT void Col2ImNdNCHWCUDAImpl(
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
SimpleArray<int, N + 1> img_shape_array;
SimpleArray<int, N + 1> col_shape_array;
SimpleArray<int, N> kernel_shape_array;
SimpleArray<int, N> stride_array;
SimpleArray<int, N> dilation_array;
SimpleArray<int, N> pad_array;
std::memcpy(img_shape_array.data, img_shape, (N + 1) * sizeof(int));
std::memcpy(col_shape_array.data, col_shape, (N + 1) * sizeof(int));
std::memcpy(kernel_shape_array.data, kernel_shape, N * sizeof(int));
std::memcpy(stride_array.data, stride, N * sizeof(int));
std::memcpy(dilation_array.data, dilation, N * sizeof(int));
std::memcpy(pad_array.data, pad, N * sizeof(int));
Set<T, CUDAContext>(img_size, 0, img_data, context);
Im2ColNdNCHWCUDAKernel<T, N, true>
<<<std::min(outer_size, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
outer_size,
inner_size,
kernel_size,
img_shape_array,
col_shape_array,
kernel_shape_array,
stride_array,
dilation_array,
pad_array,
col_data,
img_data);
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * output_h * output_w;
Im2ColNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2Col<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Im2Col");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = output_h * output_w * channels;
Im2ColNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_w,
channels,
img_data,
col_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NCHW>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int /* groups */) {
// In NCHW, the number of groups doesn't affect Col2Im.
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = channels * height * width;
Col2ImNCHWCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
height,
width,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Col2Im<float, CUDAContext, StorageOrder::NHWC>(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const float* col_data,
float* img_data,
CUDAContext* context,
const int groups) {
CAFFE_ENFORCE_EQ(groups, 1, "groups must be 1 for GPU NHWC Col2Im");
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
const int output_h = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
const int output_w = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
const int num_kernels = height * width * channels;
Col2ImNHWCCUDAKernel<float>
<<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
num_kernels,
width,
channels,
kernel_h,
kernel_w,
dilation_h,
dilation_w,
pad_t,
pad_l,
stride_h,
stride_w,
output_h,
output_w,
col_data,
img_data);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context,
const int /* groups */) {
// In NCHW, the number of groups doesn't affect Im2Col.
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Im2ColNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
img_data,
col_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Im2ColNd<float, CUDAContext, StorageOrder::NHWC>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* img_data,
float* col_data,
CUDAContext* context,
const int groups) {
CAFFE_NOT_IMPLEMENTED;
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NCHW>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context,
int /* groups */) {
// In NCHW, the number of groups doesn't affect Col2Im.
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1(
N,
Col2ImNdNCHWCUDAImpl,
float,
img_size,
col_size,
img_shape,
col_shape,
kernel_shape,
stride,
dilation,
pad,
col_data,
img_data,
context);
}
template <>
CAFFE2_CUDA_EXPORT void Col2ImNd<float, CUDAContext, StorageOrder::NHWC>(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const float* col_data,
float* img_data,
CUDAContext* context,
int groups) {
CAFFE_NOT_IMPLEMENTED;
}
template <>
CAFFE2_CUDA_EXPORT void CopyMatrix<CUDAContext>(
const size_t itemsize,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
CUDAContext* context,
TypeMeta::Copy copy) {
CAFFE_ENFORCE(!copy, "Copy constructor is not supported in CUDA context");
cudaMemcpy2DAsync(
B,
ldb * itemsize,
A,
lda * itemsize,
N * itemsize,
M,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
#define CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(T) \
template <> \
void CopyMatrix<T, CUDAContext>( \
const int M, \
const int N, \
const T* A, \
const int lda, \
T* B, \
const int ldb, \
CUDAContext* context) { \
if (M == 0 || N == 0) { \
return; \
} \
cudaMemcpy2DAsync( \
B, \
sizeof(T) * ldb, \
A, \
sizeof(T) * lda, \
sizeof(T) * N, \
M, \
cudaMemcpyDeviceToDevice, \
context->cuda_stream()); \
}
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(float)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(double)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int)
CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX(int64_t)
#undef CAFFE2_SPECIALIZED_CUDA_COPY_MATRIX
template <>
CAFFE2_CUDA_EXPORT void CopyVector<float, CUDAContext>(
const int N,
const float* src,
float* dst,
CUDAContext* context) {
if (src != dst && N > 0) {
cudaMemcpyAsync(
dst,
src,
sizeof(float) * N,
cudaMemcpyDeviceToDevice,
context->cuda_stream());
}
}
namespace {
template <typename T>
using BlockReduce = cub::BlockReduce<T, CAFFE_CUDA_NUM_THREADS>;
template <typename T, class Reducer>
__global__ void RowwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < rows; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < cols; j += blockDim.x) {
val = reducer(X[i * cols + j], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
template <typename T, class Reducer>
__global__ void ColwiseReduceKernel(
const int rows,
const int cols,
const Reducer reducer,
const T init,
const T alpha,
const T* X,
T* Y) {
__shared__ typename BlockReduce<T>::TempStorage temp_storage;
for (int i = blockIdx.x; i < cols; i += gridDim.x) {
T val = init;
for (int j = threadIdx.x; j < rows; j += blockDim.x) {
val = reducer(X[j * cols + i], val);
}
val = BlockReduce<T>(temp_storage).Reduce(val, reducer);
if (threadIdx.x == 0) {
Y[i] = val * alpha;
}
__syncthreads();
}
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void RowwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
RowwiseReduceKernel<<< \
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_ROWWISE_MAX
#define CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(T) \
template <> \
CAFFE2_CUDA_EXPORT void ColwiseMax<T, CUDAContext>( \
const int N, const int D, const T* x, T* y, CUDAContext* context) { \
ColwiseReduceKernel<<< \
std::min(D, CAFFE_MAXIMUM_NUM_BLOCKS), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>( \
N, D, cub::Max(), std::numeric_limits<T>::lowest(), T(1), x, y); \
}
CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX(float)
#undef CAFFE2_SPECIALIZED_CUDA_COLWISE_MAX
namespace {
__global__ void
maximum_kernel(const int N, const float alpha, const float* x, float* y) {
CUDA_1D_KERNEL_LOOP(i, N) {
y[i] = fmaxf(x[i], alpha);
}
}
} // namespace
template <>
CAFFE2_CUDA_EXPORT void Maximum(
const int N,
const float alpha,
const float* x,
float* y,
CUDAContext* context) {
maximum_kernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(N, alpha, x, y);
}
namespace {
template <typename T, int D>
__global__ void BroadcastCUDAKernel(
const int Y_size,
const SimpleArray<int, D> X_strides,
const SimpleArray<FIXED_DIVISOR, D> Y_dims,
const T alpha,
const T* X,
T* Y) {
CUDA_1D_KERNEL_LOOP(Y_index, Y_size) {
int X_index = 0;
int Y_index_val = Y_index;
#pragma unroll
for (int i = D - 1; i >= 0; --i) {
int d;
FIXED_DIVISOR_DIV_MOD(Y_dims.data[i], Y_index_val, &Y_index_val, &d);
X_index += d * X_strides.data[i];
}
#if __CUDA_ARCH__ >= 350 || defined(__HIP_PLATFORM_HCC__)
Y[Y_index] = __ldg(X + X_index) * alpha;
#else
Y[Y_index] = X[X_index] * alpha;
#endif
}
}
template <typename T, int D>
CAFFE2_CUDA_EXPORT void BroadcastCUDAImpl(
const int X_ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
CUDAContext* context) {
SimpleArray<int, D> X_strides_array;
SimpleArray<FIXED_DIVISOR, D> Y_dims_array;
const int d = D - X_ndim;
std::fill(X_strides_array.data, X_strides_array.data + d, 0);
int cur_stride = 1;
for (int i = D - 1; i >= d; --i) {
CAFFE_ENFORCE(X_dims[i - d] == 1 || X_dims[i - d] == Y_dims[i]);
X_strides_array.data[i] = X_dims[i - d] == 1 ? 0 : cur_stride;
cur_stride *= X_dims[i - d];
}
for (int i = 0; i < D; ++i) {
if (Y_dims[i] == 0) {
return;
}
Y_dims_array.data[i] = FIXED_DIVISOR(Y_dims[i]);
}
const int Y_size =
std::accumulate(Y_dims, Y_dims + D, 1, std::multiplies<int>());
BroadcastCUDAKernel<T, D>
<<<CAFFE_GET_BLOCKS(Y_size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(
Y_size, X_strides_array, Y_dims_array, alpha, X, Y);
}
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_BROADCAST(T) \
template <> \
CAFFE2_CUDA_EXPORT void Broadcast<T, CUDAContext>( \
const int X_ndim, \
const int* X_dims, \
const int Y_ndim, \
const int* Y_dims, \
const T alpha, \
const T* X, \
T* Y, \
CUDAContext* context) { \
CAFFE_ENFORCE_LE(X_ndim, Y_ndim); \
DISPATCH_FUNCTION_BY_VALUE_WITH_TYPE_1( \
Y_ndim, \
BroadcastCUDAImpl, \
T, \
X_ndim, \
X_dims, \
Y_dims, \
alpha, \
X, \
Y, \
context); \
}
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int32_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(std::int64_t)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(float)
CAFFE2_SPECIALIZED_CUDA_BROADCAST(double)
#undef CAFFE2_SPECIALIZED_CUDA_BROADCAST
namespace {
template <typename T>
__global__ void
InvStdCUDAKernel(const int N, const T epsilon, const T* var, T* inv_std);
#define DELEGATE_INV_STD_KERNEL_FUNCTION(T, Func) \
template <> \
__global__ void InvStdCUDAKernel<T>( \
const int N, const T epsilon, const T* var, T* inv_std) { \
CUDA_1D_KERNEL_LOOP(i, N) { \
inv_std[i] = Func(var[i] + epsilon); \
} \
}
DELEGATE_INV_STD_KERNEL_FUNCTION(float, rsqrtf)
#undef DELEGATE_INV_STD_KERNEL_FUNCTION
} // namespace
#define CAFFE2_SPECIALIZED_CUDA_INV_STD(T) \
template <> \
CAFFE2_CUDA_EXPORT void InvStd<T, CUDAContext>( \
const int N, \
const T epsilon, \
const T* var, \
T* inv_std, \
CUDAContext* context) { \
InvStdCUDAKernel<T> \
<<<CAFFE_GET_BLOCKS(N), \
CAFFE_CUDA_NUM_THREADS, \
0, \
context->cuda_stream()>>>(N, epsilon, var, inv_std); \
}
CAFFE2_SPECIALIZED_CUDA_INV_STD(float)
#undef CAFFE2_SPECIALIZED_CUDA_INV_STD
} // namespace math
} // namespace caffe2
|
35df03299a955c0ec96fca443801e5b5cec6ec3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "device_launch_parameters.h"
#include "fractal_params.h"
#include <algorithm>
#include <hip/hip_runtime.h>
using namespace std;
__device__ inline float operator !(const float3 v)
{
return rsqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
}
__device__ inline float operator &(const float3 v1, const float3 v2)
{
return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z;
}
__device__ inline float3 operator ^(const float3 v1, const float3 v2)
{
return make_float3(
v1.y * v2.z - v1.z * v2.y,
v1.z * v2.x - v1.x * v2.z,
v1.x * v2.y - v1.y * v2.x );
}
__device__ inline float3 operator /(float3 v1, const float3 v2)
{
v1.x /= v2.x;
v1.y /= v2.y;
v1.z /= v2.z;
return v1;
}
__device__ inline float3 operator /(float3 v, const float s)
{
v.x /= s;
v.y /= s;
v.z /= s;
return v;
}
__device__ inline float3 operator *(float3 v1, const float3 v2)
{
v1.x *= v2.x;
v1.y *= v2.y;
v1.z *= v2.z;
return v1;
}
__device__ inline float3 operator *(float3 v, const float s)
{
v.x *= s;
v.y *= s;
v.z *= s;
return v;
}
__device__ inline float3 operator +(float3 v1, const float3 v2)
{
v1.x += v2.x;
v1.y += v2.y;
v1.z += v2.z;
return v1;
}
__device__ inline float3 operator +(float3 v, const float s)
{
v.x += s;
v.y += s;
v.z += s;
return v;
}
__device__ inline float3 operator -(float3 v1, const float3 v2)
{
v1.x -= v2.x;
v1.y -= v2.y;
v1.z -= v2.z;
return v1;
}
__device__ inline float3 operator -(float3 v, const float s)
{
v.x -= s;
v.y -= s;
v.z -= s;
return v;
}
__device__ inline float3 operator %(float3 v1, const float3 v2)
{
v1.x = fmodf(v1.x, v2.x);
v1.y = fmodf(v1.y, v2.y);
v1.z = fmodf(v1.z, v2.z);
return v1;
}
__device__ inline int index(const int x, const int y, const int width)
{
return x + y * width;
}
__device__ inline float3 rotate_vec(const float3 vec, const float3 axis, const float cos, const float sin)
{
const float d = (1 - cos) * (axis & vec);
return make_float3(
d * axis.x + vec.x * cos + sin * (axis.y * vec.z - axis.z * vec.y),
d * axis.y + vec.y * cos + sin * (axis.z * vec.x - axis.x * vec.z),
d * axis.z + vec.z * cos + sin * (axis.x * vec.y - axis.y * vec.x));
}
__device__ inline float warp_scalar(float s, const int iterations, const float scale)
{
for (int i = 0; i < iterations; ++i)
s *= scale;
return s;
}
__device__ inline float cube_distance_equation(const float3 p, const float3 cube_center, const float cube_side_length)
{
return max(abs(p.x - cube_center.x), max(abs(p.y - cube_center.y), abs(p.z - cube_center.z))) - cube_side_length / 2;
}
__device__ inline float distance_equation(const float3 p, const float side)
{
return fmaxf(fabsf(p.x), fmaxf(fabsf(p.y), fabsf(p.z))) - side / 2;
}
__device__ inline float3 mod_space(const float3 p, const float3 mod)
{
return ((p + mod / 2) % mod + mod) % mod - mod / 2;
}
__device__ inline float3 fold_space(const float3 p, const float3 n)
{
if ((p & n) >= 0)
return p;
return p - n * 2 * (p & n) / (n & n);
}
__device__ inline float3 fold_menger(float3 vec)
{
float a = fminf(vec.x - vec.y, 0.f);
vec.x -= a;
vec.y += a;
a = fminf(vec.x - vec.z, 0.f);
vec.x -= a;
vec.z += a;
a = fminf(vec.y - vec.z, 0.f);
vec.y -= a;
vec.z += a;
return vec;
}
__device__ inline float3 max_space(float3 a, const float3 b)
{
a.x = fmaxf(a.x, b.x);
a.y = fmaxf(a.y, b.y);
a.z = fmaxf(a.z, b.z);
return a;
}
__device__ inline float3 max_space(float3 a, const float b)
{
a.x = fmaxf(a.x, b);
a.y = fmaxf(a.y, b);
a.z = fmaxf(a.z, b);
return a;
}
__device__ inline float3 min_space(float3 a, const float3 b)
{
a.x = fminf(a.x, b.x);
a.y = fminf(a.y, b.y);
a.z = fminf(a.z, b.z);
return a;
}
__device__ inline float3 min_space(float3 a, const float b)
{
a.x = fminf(a.x, b);
a.y = fminf(a.y, b);
a.z = fminf(a.z, b);
return a;
}
__device__ inline float3 fold_box(const float3 z, const float r)
{
return max_space(min_space(z, r), -r) * 2 - z;
}
__device__ inline float3 abs_space(float3 p)
{
p.x = fabsf(p.x);
p.y = fabsf(p.y);
p.z = fabsf(p.z);
return p;
}
__device__ inline float3 abs_space_x(float3 p)
{
p.x = fabsf(p.x);
return p;
}
__device__ inline float3 abs_space_y(float3 p)
{
p.y = fabsf(p.y);
return p;
}
__device__ inline float3 abs_space_z(float3 p)
{
p.z = fabsf(p.z);
return p;
}
__device__ inline float3 rotate_x(const float3 z, const float s, const float c)
{
return make_float3(z.x, c * z.y + s * z.z,c * z.z - s * z.y);
}
__device__ inline float3 rotate_y(const float3 z, const float s, const float c)
{
return make_float3(c * z.x - s * z.z, z.y,c * z.z + s * z.x);
}
__device__ inline float3 rotate_z(const float3 z, const float s, const float c)
{
return make_float3(c * z.x + s * z.y,c * z.y - s * z.x, z.z);
}
__device__ inline void transform(float3& p, const optimized_fractal_info params)
{
p = p / params.scale;
p = abs_space(p);
p = rotate_z(p,params.sin_theta, params.cos_theta);
p = fold_menger(p);
p = rotate_x(p, params.sin_phi, params.cos_phi);
p = p - params.offset;
}
__device__ inline void warp_space(float3& p, const int iterations, const optimized_fractal_info params)
{
for (int i = 0; i < iterations; ++i)
transform(p, params);
}
__device__ inline float scaled_de(float3 p, const int iterations, const float side, const optimized_fractal_info params)
{
warp_space(p, iterations, params);
return warp_scalar(distance_equation(p, side), iterations, params.scale);
}
__device__ inline float3 normal(const float3 p, const int iterations, const float side, const optimized_fractal_info params, const float epsilon)
{
float scaledde = scaled_de(make_float3(p.x, p.y, p.z), iterations, side, params);
const float3 scaled = {
scaled_de(make_float3(p.x + epsilon, p.y, p.z), iterations, side, params) -
scaledde,
scaled_de(make_float3(p.x, p.y + epsilon, p.z), iterations, side, params) -
scaledde,
scaled_de(make_float3(p.x, p.y, p.z + epsilon), iterations, side, params) -
scaledde
};
return scaled * !scaled;
}
__device__ inline float soft_shadow(const float3 p, const float3 d, const float shadow_strength, const int iterations, const float side, const optimized_fractal_info params, const float minDist, const float maxDist, const float minAngle)
{
float darkness = 1;
float prev_dist = 2147483647;
float total_dist = minDist;
while (total_dist < maxDist)
{
const float dist = scaled_de(p + d * total_dist, iterations, side, params);
const float old_new_int_dist = dist * dist / (2 * prev_dist);
const float leg_length = sqrtf(dist * dist - old_new_int_dist * old_new_int_dist);
float angle = shadow_strength * leg_length / fmaxf(0.f, total_dist - old_new_int_dist);
darkness = fminf(darkness, angle);
prev_dist = dist;
total_dist += dist;
if (dist <= 0 || darkness < minAngle)
return 0;
}
return darkness;
}
__device__ inline float orbit(float3 p, const int iterations, const float side, const optimized_fractal_info params)
{
const float direction = scaled_de(p, 1, side, params);
for (int i = 0; i < iterations; ++i)
{
transform(p, params);
const float f = warp_scalar(distance_equation(p, side), i, params.scale);
if (f * direction >= 0)
return f / params.scale * 6;
}
return warp_scalar(distance_equation(p, side), iterations - 1, params.scale) * 6;
}
__device__ inline float trapezoid_wave(const float loc)
{
return fminf(fmaxf(fabsf(loc - 3), 0) - 1, 1);
}
__device__ inline float red(const float loc)
{
return trapezoid_wave(fmodf(loc, 6));
}
__device__ inline float green(const float loc)
{
return trapezoid_wave(fmodf(loc + 4, 6));
}
__device__ inline float blue(const float loc)
{
return trapezoid_wave(fmodf(loc + 2, 6));
}
__global__ void get_direction_length_inv(float* ray_length_invs, const float focal_length, const int width, const int height)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= width || j >= height)
return;
ray_length_invs[index(i, height - 1 - j, width)] = rsqrtf(
focal_length * focal_length + ((j - height / 2.f) * (j - height / 2.f) + (i - width / 2.f) * (i - width / 2.f)) / height / height);
}
__global__ void get_direction(float3* directions, const float* ray_length_invs, const float3 x, const float3 y, const float3 z,
const float focal_length, const int width, const int height)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= width || j >= height)
return;
const int h = index(i, height - 1 - j, width);
directions[h] = (z * focal_length + y * ((j - height / 2.f) / height) + x * ((i - width / 2.f) / height)) * ray_length_invs[h];
}
__global__ void march_ray(const float3* directions, unsigned char* pixel_values, float3 camera,
const float3 light, const float3 cols, const int width, const int height, const int iterations, const optimized_fractal_info params)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= width || j >= height)
return;
const int h = index(i, j, width);
int step_num = 0;
float min_dist = minimum_distance / (iterations * iterations + 1);
float dist = min_dist + 1;
float total_dist = 0;
float3 direction = directions[h];
while (total_dist < maximum_distance && dist > min_dist && (step_num < max_steps || dist < min_dist * 3))
{
dist = scaled_de(camera, iterations, side, params);
camera = camera + direction * dist;
total_dist += dist;
++step_num;
}
if (fabsf(dist) <= min_dist)
{
float brightness = 0;
float3 off = light - camera;
const float light_vector_length = !off;
off = off * light_vector_length;
float diffuse_calculated = 0;
const float normal_angle = off & normal(camera, iterations, side, params, min_dist);
if (normal_angle > 0)
diffuse_calculated = fmaxf(cols.y * soft_shadow(camera, off, shadow_strength, iterations, side, params, min_dist,
1/light_vector_length, 0.01f) * normal_angle, 0.f);
brightness += diffuse_calculated + cols.x / (1 + step_num * ambient_occ_strength);
brightness = fminf(fmaxf(brightness, 0.f), 1.f);
const float col = orbit(camera, iterations, side, params);
pixel_values[h * 3] = static_cast<unsigned char>(blue(col) * brightness * 255);
pixel_values[h * 3 + 1] = static_cast<unsigned char>(green(col) * brightness * 255);
pixel_values[h * 3 + 2] = static_cast<unsigned char>(red(col) * brightness * 255);
}
else
{
pixel_values[h * 3] = 0;
pixel_values[h * 3 + 1] = 0;
pixel_values[h * 3 + 2] = 0;
}
} | 35df03299a955c0ec96fca443801e5b5cec6ec3c.cu | #include "device_launch_parameters.h"
#include "fractal_params.h"
#include <algorithm>
#include <cuda_runtime.h>
using namespace std;
__device__ inline float operator !(const float3 v)
{
return rsqrtf(v.x * v.x + v.y * v.y + v.z * v.z);
}
__device__ inline float operator &(const float3 v1, const float3 v2)
{
return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z;
}
__device__ inline float3 operator ^(const float3 v1, const float3 v2)
{
return make_float3(
v1.y * v2.z - v1.z * v2.y,
v1.z * v2.x - v1.x * v2.z,
v1.x * v2.y - v1.y * v2.x );
}
__device__ inline float3 operator /(float3 v1, const float3 v2)
{
v1.x /= v2.x;
v1.y /= v2.y;
v1.z /= v2.z;
return v1;
}
__device__ inline float3 operator /(float3 v, const float s)
{
v.x /= s;
v.y /= s;
v.z /= s;
return v;
}
__device__ inline float3 operator *(float3 v1, const float3 v2)
{
v1.x *= v2.x;
v1.y *= v2.y;
v1.z *= v2.z;
return v1;
}
__device__ inline float3 operator *(float3 v, const float s)
{
v.x *= s;
v.y *= s;
v.z *= s;
return v;
}
__device__ inline float3 operator +(float3 v1, const float3 v2)
{
v1.x += v2.x;
v1.y += v2.y;
v1.z += v2.z;
return v1;
}
__device__ inline float3 operator +(float3 v, const float s)
{
v.x += s;
v.y += s;
v.z += s;
return v;
}
__device__ inline float3 operator -(float3 v1, const float3 v2)
{
v1.x -= v2.x;
v1.y -= v2.y;
v1.z -= v2.z;
return v1;
}
__device__ inline float3 operator -(float3 v, const float s)
{
v.x -= s;
v.y -= s;
v.z -= s;
return v;
}
__device__ inline float3 operator %(float3 v1, const float3 v2)
{
v1.x = fmodf(v1.x, v2.x);
v1.y = fmodf(v1.y, v2.y);
v1.z = fmodf(v1.z, v2.z);
return v1;
}
__device__ inline int index(const int x, const int y, const int width)
{
return x + y * width;
}
__device__ inline float3 rotate_vec(const float3 vec, const float3 axis, const float cos, const float sin)
{
const float d = (1 - cos) * (axis & vec);
return make_float3(
d * axis.x + vec.x * cos + sin * (axis.y * vec.z - axis.z * vec.y),
d * axis.y + vec.y * cos + sin * (axis.z * vec.x - axis.x * vec.z),
d * axis.z + vec.z * cos + sin * (axis.x * vec.y - axis.y * vec.x));
}
__device__ inline float warp_scalar(float s, const int iterations, const float scale)
{
for (int i = 0; i < iterations; ++i)
s *= scale;
return s;
}
__device__ inline float cube_distance_equation(const float3 p, const float3 cube_center, const float cube_side_length)
{
return max(abs(p.x - cube_center.x), max(abs(p.y - cube_center.y), abs(p.z - cube_center.z))) - cube_side_length / 2;
}
__device__ inline float distance_equation(const float3 p, const float side)
{
return fmaxf(fabsf(p.x), fmaxf(fabsf(p.y), fabsf(p.z))) - side / 2;
}
__device__ inline float3 mod_space(const float3 p, const float3 mod)
{
return ((p + mod / 2) % mod + mod) % mod - mod / 2;
}
__device__ inline float3 fold_space(const float3 p, const float3 n)
{
if ((p & n) >= 0)
return p;
return p - n * 2 * (p & n) / (n & n);
}
__device__ inline float3 fold_menger(float3 vec)
{
float a = fminf(vec.x - vec.y, 0.f);
vec.x -= a;
vec.y += a;
a = fminf(vec.x - vec.z, 0.f);
vec.x -= a;
vec.z += a;
a = fminf(vec.y - vec.z, 0.f);
vec.y -= a;
vec.z += a;
return vec;
}
__device__ inline float3 max_space(float3 a, const float3 b)
{
a.x = fmaxf(a.x, b.x);
a.y = fmaxf(a.y, b.y);
a.z = fmaxf(a.z, b.z);
return a;
}
__device__ inline float3 max_space(float3 a, const float b)
{
a.x = fmaxf(a.x, b);
a.y = fmaxf(a.y, b);
a.z = fmaxf(a.z, b);
return a;
}
__device__ inline float3 min_space(float3 a, const float3 b)
{
a.x = fminf(a.x, b.x);
a.y = fminf(a.y, b.y);
a.z = fminf(a.z, b.z);
return a;
}
__device__ inline float3 min_space(float3 a, const float b)
{
a.x = fminf(a.x, b);
a.y = fminf(a.y, b);
a.z = fminf(a.z, b);
return a;
}
__device__ inline float3 fold_box(const float3 z, const float r)
{
return max_space(min_space(z, r), -r) * 2 - z;
}
__device__ inline float3 abs_space(float3 p)
{
p.x = fabsf(p.x);
p.y = fabsf(p.y);
p.z = fabsf(p.z);
return p;
}
__device__ inline float3 abs_space_x(float3 p)
{
p.x = fabsf(p.x);
return p;
}
__device__ inline float3 abs_space_y(float3 p)
{
p.y = fabsf(p.y);
return p;
}
__device__ inline float3 abs_space_z(float3 p)
{
p.z = fabsf(p.z);
return p;
}
__device__ inline float3 rotate_x(const float3 z, const float s, const float c)
{
return make_float3(z.x, c * z.y + s * z.z,c * z.z - s * z.y);
}
__device__ inline float3 rotate_y(const float3 z, const float s, const float c)
{
return make_float3(c * z.x - s * z.z, z.y,c * z.z + s * z.x);
}
__device__ inline float3 rotate_z(const float3 z, const float s, const float c)
{
return make_float3(c * z.x + s * z.y,c * z.y - s * z.x, z.z);
}
__device__ inline void transform(float3& p, const optimized_fractal_info params)
{
p = p / params.scale;
p = abs_space(p);
p = rotate_z(p,params.sin_theta, params.cos_theta);
p = fold_menger(p);
p = rotate_x(p, params.sin_phi, params.cos_phi);
p = p - params.offset;
}
__device__ inline void warp_space(float3& p, const int iterations, const optimized_fractal_info params)
{
for (int i = 0; i < iterations; ++i)
transform(p, params);
}
__device__ inline float scaled_de(float3 p, const int iterations, const float side, const optimized_fractal_info params)
{
warp_space(p, iterations, params);
return warp_scalar(distance_equation(p, side), iterations, params.scale);
}
__device__ inline float3 normal(const float3 p, const int iterations, const float side, const optimized_fractal_info params, const float epsilon)
{
float scaledde = scaled_de(make_float3(p.x, p.y, p.z), iterations, side, params);
const float3 scaled = {
scaled_de(make_float3(p.x + epsilon, p.y, p.z), iterations, side, params) -
scaledde,
scaled_de(make_float3(p.x, p.y + epsilon, p.z), iterations, side, params) -
scaledde,
scaled_de(make_float3(p.x, p.y, p.z + epsilon), iterations, side, params) -
scaledde
};
return scaled * !scaled;
}
__device__ inline float soft_shadow(const float3 p, const float3 d, const float shadow_strength, const int iterations, const float side, const optimized_fractal_info params, const float minDist, const float maxDist, const float minAngle)
{
float darkness = 1;
float prev_dist = 2147483647;
float total_dist = minDist;
while (total_dist < maxDist)
{
const float dist = scaled_de(p + d * total_dist, iterations, side, params);
const float old_new_int_dist = dist * dist / (2 * prev_dist);
const float leg_length = sqrtf(dist * dist - old_new_int_dist * old_new_int_dist);
float angle = shadow_strength * leg_length / fmaxf(0.f, total_dist - old_new_int_dist);
darkness = fminf(darkness, angle);
prev_dist = dist;
total_dist += dist;
if (dist <= 0 || darkness < minAngle)
return 0;
}
return darkness;
}
__device__ inline float orbit(float3 p, const int iterations, const float side, const optimized_fractal_info params)
{
const float direction = scaled_de(p, 1, side, params);
for (int i = 0; i < iterations; ++i)
{
transform(p, params);
const float f = warp_scalar(distance_equation(p, side), i, params.scale);
if (f * direction >= 0)
return f / params.scale * 6;
}
return warp_scalar(distance_equation(p, side), iterations - 1, params.scale) * 6;
}
__device__ inline float trapezoid_wave(const float loc)
{
return fminf(fmaxf(fabsf(loc - 3), 0) - 1, 1);
}
__device__ inline float red(const float loc)
{
return trapezoid_wave(fmodf(loc, 6));
}
__device__ inline float green(const float loc)
{
return trapezoid_wave(fmodf(loc + 4, 6));
}
__device__ inline float blue(const float loc)
{
return trapezoid_wave(fmodf(loc + 2, 6));
}
__global__ void get_direction_length_inv(float* ray_length_invs, const float focal_length, const int width, const int height)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= width || j >= height)
return;
ray_length_invs[index(i, height - 1 - j, width)] = rsqrtf(
focal_length * focal_length + ((j - height / 2.f) * (j - height / 2.f) + (i - width / 2.f) * (i - width / 2.f)) / height / height);
}
__global__ void get_direction(float3* directions, const float* ray_length_invs, const float3 x, const float3 y, const float3 z,
const float focal_length, const int width, const int height)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= width || j >= height)
return;
const int h = index(i, height - 1 - j, width);
directions[h] = (z * focal_length + y * ((j - height / 2.f) / height) + x * ((i - width / 2.f) / height)) * ray_length_invs[h];
}
__global__ void march_ray(const float3* directions, unsigned char* pixel_values, float3 camera,
const float3 light, const float3 cols, const int width, const int height, const int iterations, const optimized_fractal_info params)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= width || j >= height)
return;
const int h = index(i, j, width);
int step_num = 0;
float min_dist = minimum_distance / (iterations * iterations + 1);
float dist = min_dist + 1;
float total_dist = 0;
float3 direction = directions[h];
while (total_dist < maximum_distance && dist > min_dist && (step_num < max_steps || dist < min_dist * 3))
{
dist = scaled_de(camera, iterations, side, params);
camera = camera + direction * dist;
total_dist += dist;
++step_num;
}
if (fabsf(dist) <= min_dist)
{
float brightness = 0;
float3 off = light - camera;
const float light_vector_length = !off;
off = off * light_vector_length;
float diffuse_calculated = 0;
const float normal_angle = off & normal(camera, iterations, side, params, min_dist);
if (normal_angle > 0)
diffuse_calculated = fmaxf(cols.y * soft_shadow(camera, off, shadow_strength, iterations, side, params, min_dist,
1/light_vector_length, 0.01f) * normal_angle, 0.f);
brightness += diffuse_calculated + cols.x / (1 + step_num * ambient_occ_strength);
brightness = fminf(fmaxf(brightness, 0.f), 1.f);
const float col = orbit(camera, iterations, side, params);
pixel_values[h * 3] = static_cast<unsigned char>(blue(col) * brightness * 255);
pixel_values[h * 3 + 1] = static_cast<unsigned char>(green(col) * brightness * 255);
pixel_values[h * 3 + 2] = static_cast<unsigned char>(red(col) * brightness * 255);
}
else
{
pixel_values[h * 3] = 0;
pixel_values[h * 3 + 1] = 0;
pixel_values[h * 3 + 2] = 0;
}
} |
449d8f514cbc59ae8bdf5ae9b6029694cc366e57.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "fTanh.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *arguments = NULL;
hipMalloc(&arguments, XSIZE*YSIZE);
float *results = NULL;
hipMalloc(&results, XSIZE*YSIZE);
const long size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
fTanh), dim3(gridBlock),dim3(threadBlock), 0, 0, arguments,results,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
fTanh), dim3(gridBlock),dim3(threadBlock), 0, 0, arguments,results,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
fTanh), dim3(gridBlock),dim3(threadBlock), 0, 0, arguments,results,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 449d8f514cbc59ae8bdf5ae9b6029694cc366e57.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "fTanh.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *arguments = NULL;
cudaMalloc(&arguments, XSIZE*YSIZE);
float *results = NULL;
cudaMalloc(&results, XSIZE*YSIZE);
const long size = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
fTanh<<<gridBlock,threadBlock>>>(arguments,results,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
fTanh<<<gridBlock,threadBlock>>>(arguments,results,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
fTanh<<<gridBlock,threadBlock>>>(arguments,results,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0457462716c492c72535b1dff4f96887069d5315.hip | // !!! This is a file automatically generated by hipify!!!
/*
* SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <[email protected]>
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-FileType: SOURCE
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the license found in the LICENSE.txt file in the root
* directory of this source tree.
*/
// =======
// Headers
// =======
#include "./cu_golub_kahn_bidiagonalization.h"
#include <rocblas.h> // hipblasHandle_t
#include <cmath> // sqrt
#include "../_cu_basic_algebra/cu_vector_operations.h" // cuVectorOperations
#include "../_cu_trace_estimator/cu_orthogonalization.h" // cuOrthogonaliza...
#include "../_cuda_utilities/cuda_interface.h" // alloc, copy_to_device, del
// ============================
// golub-kahn bidiagonalization
// ============================
/// \brief Bi-diagonalizes the positive-definite matrix \c A using
/// Golub-Kahn-Lanczos method.
///
/// \details This method bi-diagonalizes matrix \c A to \c B using the start
/// vector \c w. \c m is the Lanczos degree, which will be the size
/// of square matrix \c B.
///
/// The output of this function are \c alpha (of length \c m) and
/// \c beta (of length \c m+1) which are diagonal (\c alpha[:]) and
/// off-diagonal (\c beta[1:]) elements of the bi-diagonal \c (m,m)
/// symmetric and positive-definite matrix \c B.
///
/// #### Lanczos tridiagonalization vs Golub-Kahn Bidiagonalization
/// * The Lanczos tri-diagonalization is twice faster (in runtime),
/// as it has only one matrix-vector multiplication. Whereas the
/// Golub-Kahn bi-diagonalization has two matrix-vector
/// multiplications.
/// * The Lanczos tri-diagonalization can only be applied to
/// symmetric matrices. Whereas the Golub-Kahn bi-diagonalization
/// can be applied to any matrix.
///
/// #### Reference
///
/// * NetLib Algorithm 6.27,
/// netlib.org/utk/people/JackDongarra/etemplates/node198.html
/// * Matrix Computations, Golub, p. 495
/// * Demmel, J., Templates for Solution of Algebraic Eigenvalue
/// Problem, p. 143
///
/// \warning When the matrix \c A is very close to the identity matrix, the
/// Golub-Kahn bi-diagonalization method can not find \c beta, as
/// \c beta becomes zero. If \c A is not exactly identity, you may
/// decrease the Tolerance to a very small number. However, if \c A
/// is almost identity matrix, decreasing \c lanczos_tol will not
/// help, and this function cannot be used.
///
/// \sa lanczos_tridiagonalizaton
///
/// \param[in] A
/// A linear operator that represents a matrix of size \c (n,n) and
/// can perform matrix-vector operation with \c dot() method and
/// transposed matrix-vector operation with \c transpose_dot()
/// method. This matrix should be positive-definite.
/// \param[in] v
/// Start vector for the Lanczos tri-diagonalization. Column vector
/// of size \c n. It could be generated randomly. Often it is
/// generated by the Rademacher distribution with entries \c +1 and
/// \c -1.
/// \param[in] n
/// Size of the square matrix \c A, which is also the size of the
/// vector \c v.
/// \param[in] m
/// Lanczos degree, which is the number of Lanczos iterations.
/// \param[in] lanczos_tol
/// The tolerance of the residual error of the Lanczos iteration.
/// \param[in] orthogonalize
/// Indicates whether to orthogonalize the orthogonal eigenvectors
/// during Lanczos recursive iterations.
/// * If set to \c 0, no orthogonalization is performed.
/// * If set to a negative integer, a newly computed eigenvector is
/// orthogonalized against all the previous eigenvectors (full
/// reorthogonalization).
/// * If set to a positive integer, say \c q less than
/// \c lanczos_degree, the newly computed eigenvector is
/// orthogonalized against the last \c q previous eigenvectors
/// (partial reorthogonalization).
/// * If set to an integer larger than \c lanczos_degree, it is cut
/// to \c lanczos_degree, which effectively orthogonalizes
/// against all previous eigenvectors (full reorthogonalization).
/// \param[out] alpha
/// This is a 1D array of size \c m and \c alpha[:] constitute the
/// diagonal elements of the bi-diagonal matrix \c B. This is the
/// output and written in place.
/// \param[out] beta
/// This is a 1D array of size \c m, and the elements \c beta[:]
/// constitute the sup-diagonals of the bi-diagonal matrix \c B.
/// This array is the output and written in place.
/// \return Counter for the Lanczos iterations. Normally, the size of the
/// output matrix should be \c (m,m), which is the Lanczos degree.
/// However, if the algorithm terminates early, the size of \c
/// alpha and \c beta, and hence the output tri-diagonal matrix, is
/// smaller. This counter keeps track of the *non-zero* size of \c
/// alpha and \c beta.
template <typename DataType>
IndexType cu_golub_kahn_bidiagonalization(
cuLinearOperator<DataType>* A,
const DataType* v,
const LongIndexType n,
const IndexType m,
const DataType lanczos_tol,
const FlagType orthogonalize,
DataType* alpha,
DataType* beta)
{
// Get cublas handle
hipblasHandle_t cublas_handle = A->get_cublas_handle();
// buffer_size is number of last orthogonal vectors to keep in buffers U, V
IndexType buffer_size;
if (orthogonalize == 0)
{
// At least two vectors must be stored in buffer for Lanczos recursion
buffer_size = 2;
}
else if ((orthogonalize < 0) ||
(orthogonalize > static_cast<FlagType>(m) - 1))
{
// Using full reorthogonalization, keep all of the m vectors in buffer
buffer_size = m;
}
else
{
// Orthogonalize with less than m vectors (0 < orthogonalize < m-1)
// plus one vector for the latest (the j-th) vector
buffer_size = orthogonalize + 1;
}
// Allocate 2D array (as 1D array, and coalesced row-wise) to store
// the last buffer_size of orthogonalized vectors of length n. New vectors
// are stored by cycling through the buffer to replace with old ones.
DataType* device_U = CudaInterface<DataType>::alloc(n * buffer_size);
DataType* device_V = CudaInterface<DataType>::alloc(n * buffer_size);
// Normalize vector v and copy to v_old
CudaInterface<DataType>::copy_to_device(v, n, &device_V[0]);
cuVectorOperations<DataType>::normalize_vector_in_place(
cublas_handle, &device_V[0], n);
// Declare iterators
IndexType j;
IndexType lanczos_size = 0;
IndexType num_ortho;
// Golub-Kahn iteration
for (j=0; j < m; ++j)
{
// Counter for the non-zero size of alpha and beta
++lanczos_size;
// u_new = A.dot(v_old)
A->dot(&device_V[(j % buffer_size)*n], &device_U[(j % buffer_size)*n]);
// Performing: u_new[i] = u_new[i] - beta[j] * u_old[i]
if (j > 0)
{
cuVectorOperations<DataType>::subtract_scaled_vector(
cublas_handle,
&device_U[((j-1) % buffer_size)*n], n, beta[j-1],
&device_U[(j % buffer_size)*n]);
}
// orthogonalize u_new against previous vectors
if (orthogonalize != 0)
{
// Find how many column vectors are filled so far in the buffer V
if (j < buffer_size)
{
num_ortho = j;
}
else
{
num_ortho = buffer_size - 1;
}
// Gram-Schmidt process
if (j > 0)
{
cuOrthogonalization<DataType>::gram_schmidt_process(
cublas_handle, &device_U[0], n, buffer_size,
(j-1)%buffer_size, num_ortho,
&device_U[(j % buffer_size)*n]);
}
}
// Normalize u_new and set its norm to alpha[j]
alpha[j] = cuVectorOperations<DataType>::normalize_vector_in_place(
cublas_handle, &device_U[(j % buffer_size)*n], n);
// Performing: v_new = A.T.dot(u_new) - alpha[j] * v_old
A->transpose_dot(&device_U[(j % buffer_size)*n],
&device_V[((j+1) % buffer_size)*n]);
// Performing: v_new[i] = v_new[i] - alpha[j] * v_old[i]
cuVectorOperations<DataType>::subtract_scaled_vector(
cublas_handle, &device_V[(j % buffer_size)*n], n, alpha[j],
&device_V[((j+1) % buffer_size)*n]);
// orthogonalize v_new against previous vectors
if (orthogonalize != 0)
{
cuOrthogonalization<DataType>::gram_schmidt_process(
cublas_handle, &device_V[0], n, buffer_size, j%buffer_size,
num_ortho, &device_V[((j+1) % buffer_size)*n]);
}
// Update beta as the norm of v_new
beta[j] = cuVectorOperations<DataType>::normalize_vector_in_place(
cublas_handle, &device_V[((j+1) % buffer_size)*n], n);
// Exit criterion when the vector r is zero. If each component of a
// zero vector has the tolerance epsilon, (which is called lanczos_tol
// here), the tolerance of norm of r is epsilon times sqrt of n.
if (beta[j] < lanczos_tol * sqrt(n))
{
break;
}
}
// Free dynamic memory
CudaInterface<DataType>::del(device_U);
CudaInterface<DataType>::del(device_V);
return lanczos_size;
}
// ===============================
// Explicit template instantiation
// ===============================
// golub kahn bidiagonalization
template IndexType cu_golub_kahn_bidiagonalization<float>(
cuLinearOperator<float>* A,
const float* v,
const LongIndexType n,
const IndexType m,
const float lanczos_tol,
const FlagType orthogonalize,
float* alpha,
float* beta);
template IndexType cu_golub_kahn_bidiagonalization<double>(
cuLinearOperator<double>* A,
const double* v,
const LongIndexType n,
const IndexType m,
const double lanczos_tol,
const FlagType orthogonalize,
double* alpha,
double* beta);
| 0457462716c492c72535b1dff4f96887069d5315.cu | /*
* SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <[email protected]>
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-FileType: SOURCE
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the license found in the LICENSE.txt file in the root
* directory of this source tree.
*/
// =======
// Headers
// =======
#include "./cu_golub_kahn_bidiagonalization.h"
#include <cublas_v2.h> // cublasHandle_t
#include <cmath> // sqrt
#include "../_cu_basic_algebra/cu_vector_operations.h" // cuVectorOperations
#include "../_cu_trace_estimator/cu_orthogonalization.h" // cuOrthogonaliza...
#include "../_cuda_utilities/cuda_interface.h" // alloc, copy_to_device, del
// ============================
// golub-kahn bidiagonalization
// ============================
/// \brief Bi-diagonalizes the positive-definite matrix \c A using
/// Golub-Kahn-Lanczos method.
///
/// \details This method bi-diagonalizes matrix \c A to \c B using the start
/// vector \c w. \c m is the Lanczos degree, which will be the size
/// of square matrix \c B.
///
/// The output of this function are \c alpha (of length \c m) and
/// \c beta (of length \c m+1) which are diagonal (\c alpha[:]) and
/// off-diagonal (\c beta[1:]) elements of the bi-diagonal \c (m,m)
/// symmetric and positive-definite matrix \c B.
///
/// #### Lanczos tridiagonalization vs Golub-Kahn Bidiagonalization
/// * The Lanczos tri-diagonalization is twice faster (in runtime),
/// as it has only one matrix-vector multiplication. Whereas the
/// Golub-Kahn bi-diagonalization has two matrix-vector
/// multiplications.
/// * The Lanczos tri-diagonalization can only be applied to
/// symmetric matrices. Whereas the Golub-Kahn bi-diagonalization
/// can be applied to any matrix.
///
/// #### Reference
///
/// * NetLib Algorithm 6.27,
/// netlib.org/utk/people/JackDongarra/etemplates/node198.html
/// * Matrix Computations, Golub, p. 495
/// * Demmel, J., Templates for Solution of Algebraic Eigenvalue
/// Problem, p. 143
///
/// \warning When the matrix \c A is very close to the identity matrix, the
/// Golub-Kahn bi-diagonalization method can not find \c beta, as
/// \c beta becomes zero. If \c A is not exactly identity, you may
/// decrease the Tolerance to a very small number. However, if \c A
/// is almost identity matrix, decreasing \c lanczos_tol will not
/// help, and this function cannot be used.
///
/// \sa lanczos_tridiagonalizaton
///
/// \param[in] A
/// A linear operator that represents a matrix of size \c (n,n) and
/// can perform matrix-vector operation with \c dot() method and
/// transposed matrix-vector operation with \c transpose_dot()
/// method. This matrix should be positive-definite.
/// \param[in] v
/// Start vector for the Lanczos tri-diagonalization. Column vector
/// of size \c n. It could be generated randomly. Often it is
/// generated by the Rademacher distribution with entries \c +1 and
/// \c -1.
/// \param[in] n
/// Size of the square matrix \c A, which is also the size of the
/// vector \c v.
/// \param[in] m
/// Lanczos degree, which is the number of Lanczos iterations.
/// \param[in] lanczos_tol
/// The tolerance of the residual error of the Lanczos iteration.
/// \param[in] orthogonalize
/// Indicates whether to orthogonalize the orthogonal eigenvectors
/// during Lanczos recursive iterations.
/// * If set to \c 0, no orthogonalization is performed.
/// * If set to a negative integer, a newly computed eigenvector is
/// orthogonalized against all the previous eigenvectors (full
/// reorthogonalization).
/// * If set to a positive integer, say \c q less than
/// \c lanczos_degree, the newly computed eigenvector is
/// orthogonalized against the last \c q previous eigenvectors
/// (partial reorthogonalization).
/// * If set to an integer larger than \c lanczos_degree, it is cut
/// to \c lanczos_degree, which effectively orthogonalizes
/// against all previous eigenvectors (full reorthogonalization).
/// \param[out] alpha
/// This is a 1D array of size \c m and \c alpha[:] constitute the
/// diagonal elements of the bi-diagonal matrix \c B. This is the
/// output and written in place.
/// \param[out] beta
/// This is a 1D array of size \c m, and the elements \c beta[:]
/// constitute the sup-diagonals of the bi-diagonal matrix \c B.
/// This array is the output and written in place.
/// \return Counter for the Lanczos iterations. Normally, the size of the
/// output matrix should be \c (m,m), which is the Lanczos degree.
/// However, if the algorithm terminates early, the size of \c
/// alpha and \c beta, and hence the output tri-diagonal matrix, is
/// smaller. This counter keeps track of the *non-zero* size of \c
/// alpha and \c beta.
template <typename DataType>
IndexType cu_golub_kahn_bidiagonalization(
cuLinearOperator<DataType>* A,
const DataType* v,
const LongIndexType n,
const IndexType m,
const DataType lanczos_tol,
const FlagType orthogonalize,
DataType* alpha,
DataType* beta)
{
// Get cublas handle
cublasHandle_t cublas_handle = A->get_cublas_handle();
// buffer_size is number of last orthogonal vectors to keep in buffers U, V
IndexType buffer_size;
if (orthogonalize == 0)
{
// At least two vectors must be stored in buffer for Lanczos recursion
buffer_size = 2;
}
else if ((orthogonalize < 0) ||
(orthogonalize > static_cast<FlagType>(m) - 1))
{
// Using full reorthogonalization, keep all of the m vectors in buffer
buffer_size = m;
}
else
{
// Orthogonalize with less than m vectors (0 < orthogonalize < m-1)
// plus one vector for the latest (the j-th) vector
buffer_size = orthogonalize + 1;
}
// Allocate 2D array (as 1D array, and coalesced row-wise) to store
// the last buffer_size of orthogonalized vectors of length n. New vectors
// are stored by cycling through the buffer to replace with old ones.
DataType* device_U = CudaInterface<DataType>::alloc(n * buffer_size);
DataType* device_V = CudaInterface<DataType>::alloc(n * buffer_size);
// Normalize vector v and copy to v_old
CudaInterface<DataType>::copy_to_device(v, n, &device_V[0]);
cuVectorOperations<DataType>::normalize_vector_in_place(
cublas_handle, &device_V[0], n);
// Declare iterators
IndexType j;
IndexType lanczos_size = 0;
IndexType num_ortho;
// Golub-Kahn iteration
for (j=0; j < m; ++j)
{
// Counter for the non-zero size of alpha and beta
++lanczos_size;
// u_new = A.dot(v_old)
A->dot(&device_V[(j % buffer_size)*n], &device_U[(j % buffer_size)*n]);
// Performing: u_new[i] = u_new[i] - beta[j] * u_old[i]
if (j > 0)
{
cuVectorOperations<DataType>::subtract_scaled_vector(
cublas_handle,
&device_U[((j-1) % buffer_size)*n], n, beta[j-1],
&device_U[(j % buffer_size)*n]);
}
// orthogonalize u_new against previous vectors
if (orthogonalize != 0)
{
// Find how many column vectors are filled so far in the buffer V
if (j < buffer_size)
{
num_ortho = j;
}
else
{
num_ortho = buffer_size - 1;
}
// Gram-Schmidt process
if (j > 0)
{
cuOrthogonalization<DataType>::gram_schmidt_process(
cublas_handle, &device_U[0], n, buffer_size,
(j-1)%buffer_size, num_ortho,
&device_U[(j % buffer_size)*n]);
}
}
// Normalize u_new and set its norm to alpha[j]
alpha[j] = cuVectorOperations<DataType>::normalize_vector_in_place(
cublas_handle, &device_U[(j % buffer_size)*n], n);
// Performing: v_new = A.T.dot(u_new) - alpha[j] * v_old
A->transpose_dot(&device_U[(j % buffer_size)*n],
&device_V[((j+1) % buffer_size)*n]);
// Performing: v_new[i] = v_new[i] - alpha[j] * v_old[i]
cuVectorOperations<DataType>::subtract_scaled_vector(
cublas_handle, &device_V[(j % buffer_size)*n], n, alpha[j],
&device_V[((j+1) % buffer_size)*n]);
// orthogonalize v_new against previous vectors
if (orthogonalize != 0)
{
cuOrthogonalization<DataType>::gram_schmidt_process(
cublas_handle, &device_V[0], n, buffer_size, j%buffer_size,
num_ortho, &device_V[((j+1) % buffer_size)*n]);
}
// Update beta as the norm of v_new
beta[j] = cuVectorOperations<DataType>::normalize_vector_in_place(
cublas_handle, &device_V[((j+1) % buffer_size)*n], n);
// Exit criterion when the vector r is zero. If each component of a
// zero vector has the tolerance epsilon, (which is called lanczos_tol
// here), the tolerance of norm of r is epsilon times sqrt of n.
if (beta[j] < lanczos_tol * sqrt(n))
{
break;
}
}
// Free dynamic memory
CudaInterface<DataType>::del(device_U);
CudaInterface<DataType>::del(device_V);
return lanczos_size;
}
// ===============================
// Explicit template instantiation
// ===============================
// golub kahn bidiagonalization
template IndexType cu_golub_kahn_bidiagonalization<float>(
cuLinearOperator<float>* A,
const float* v,
const LongIndexType n,
const IndexType m,
const float lanczos_tol,
const FlagType orthogonalize,
float* alpha,
float* beta);
template IndexType cu_golub_kahn_bidiagonalization<double>(
cuLinearOperator<double>* A,
const double* v,
const LongIndexType n,
const IndexType m,
const double lanczos_tol,
const FlagType orthogonalize,
double* alpha,
double* beta);
|
94052a96b64db43d59be3995ce03018dc072b03b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// CSR-SpMV kernel
__global__ void
zgecsrmv_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * dx[ dcolind[j] ];
dy[ row ] = dot *alpha + beta * dy[ row ];
}
}
// shifted CSR-SpMV kernel
__global__ void
zgecsrmv_kernel_shift(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex lambda,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
int offset,
int blocksize,
magma_index_t * addrows,
magmaDoubleComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * dx[ dcolind[j] ];
if( row<blocksize )
dy[ row ] = dot * alpha - lambda
* dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda
* dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR (val, row, col).
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zgecsrmv_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU.
It is a shifted version of the CSR-SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
lambda magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaDoubleComplex_ptr
output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zgecsrmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex lambda,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magma_int_t offset,
magma_int_t blocksize,
magma_index_t * addrows,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zgecsrmv_kernel_shift), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, lambda, dval, drowptr, dcolind, dx,
beta, offset, blocksize, addrows, dy);
return MAGMA_SUCCESS;
}
| 94052a96b64db43d59be3995ce03018dc072b03b.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
// CSR-SpMV kernel
__global__ void
zgecsrmv_kernel(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * dx[ dcolind[j] ];
dy[ row ] = dot *alpha + beta * dy[ row ];
}
}
// shifted CSR-SpMV kernel
__global__ void
zgecsrmv_kernel_shift(
int num_rows,
int num_cols,
magmaDoubleComplex alpha,
magmaDoubleComplex lambda,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
int offset,
int blocksize,
magma_index_t * addrows,
magmaDoubleComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * dx[ dcolind[j] ];
if( row<blocksize )
dy[ row ] = dot * alpha - lambda
* dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda
* dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
The input format is CSR (val, row, col).
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
zgecsrmv_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha * ( A -lambda I ) * x + beta * y on the GPU.
It is a shifted version of the CSR-SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
lambda magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaDoubleComplex_ptr
output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zgecsrmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magmaDoubleComplex alpha,
magmaDoubleComplex lambda,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magma_int_t offset,
magma_int_t blocksize,
magma_index_t * addrows,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
zgecsrmv_kernel_shift<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, lambda, dval, drowptr, dcolind, dx,
beta, offset, blocksize, addrows, dy);
return MAGMA_SUCCESS;
}
|
6899bf21e2eeab2147c963e19966a20bac6812b9.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _CENTROID_KERNEL_H_
#define _CENTROID_KERNEL_H_
#include "CUDA_Kernels.h"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime_api.h"
#include "hip/hip_runtime.h"
#include "hip/hip_vector_types.h"
#include "hip/device_functions.h"
#include "device_atomic_functions.h"
//#include "device_atomic_functions.hpp"
#include "math_functions.h"
#include "sm_30_intrinsics.h"
__device__ void AtomicMax(float * const address, const float value)
{
if (*address >= value) return;
int * const address_as_i = (int *)address;
int old = *address_as_i, assumed;
do {
assumed = old;
if (__int_as_float(assumed) >= value) break;
old = atomicCAS(address_as_i, assumed, __float_as_int(value));
} while (assumed != old);
}
__device__ float2 warp_reduce_max(float2 val) {
for (int offset = 16; offset > 0; offset /= 2) {
float temp = __shfl_down(val.x, offset);
float temp_loc = __shfl_down(val.y, offset);
if (temp > val.x) {
val.x = temp;
val.y = temp_loc;
}
}
return val;
}
__global__ void max_kernel(float* __restrict__ output, const float* __restrict__ input, const int2 i_size)
{
__shared__ float smem[2];
int tid = threadIdx.y * blockDim.x + threadIdx.x; // thread ID within the block
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
if (tid < 2) {
smem[tid] = 0.0f;
}
__syncthreads();
int id = idx_y*i_size.x + idx_x;
float2 f = { (float)input[id], (float)id };
for (int y = idx_y; y < i_size.y; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < i_size.x; x += blockDim.x * gridDim.x) {
int gid = y*i_size.x + x;
if (f.x < input[gid]) {
f.x = input[gid];
f.y = gid;
}
}
}
const float2 f_max = warp_reduce_max(f);
if ((tid & 31) == 0) { // once per warp
AtomicMax(&smem[0], f_max.x);
if (f_max.x == smem[0]) smem[1] = f_max.y;
}
__syncthreads();
if (tid == 0) { // lowest 9 threads of each block
AtomicMax(&output[1], smem[0]);
if (output[1] == smem[0]) output[0] = smem[1];
}
__syncthreads();
for (int y = idx_y; y < i_size.y; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < i_size.x; x += blockDim.x * gridDim.x) {
int gid = y*i_size.x + x;
if (output[1] == input[gid]) {
output[0] = gid;
}
}
}
};
// Device code
extern "C" void findmax_GPU(
float* output,
const float* input,
int width, int height, int GPUNo
)
{
// Launch the Vector Add CUDA Kernel
dim3 _threads(32, 32);
dim3 _blocks(10, 10);
int2 i_size = { width, height };
//hipSetDevice(GPUNo);
max_kernel << <_blocks, _threads >> >(output, input, i_size);
}
__inline__ __device__ float3 warp_reduce_sum_triple(float3 val) {
for (int offset = 16; offset > 0; offset /= 2) {
val.x += __shfl_xor(val.x, offset);
val.y += __shfl_xor(val.y, offset);
val.z += __shfl_xor(val.z, offset);
}
return val;
}
extern "C" __global__ void ImageMoment_Max_Radius_binarization_kernel(float* __restrict__ output,
const float* __restrict__ input, const int2 input_size,
const float* __restrict__ i_max, const float threshold, const float radius_max2)
{
__shared__ float smem[6];
int tid = threadIdx.y * blockDim.x + threadIdx.x; // thread ID within the block
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
if (tid < 6) {
smem[threadIdx.x] = 0.0f;
}
__syncthreads();
float3 g = { 0 };
float3 h = { 0 };
float yc = floor(i_max[0] / input_size.x);
float xc = (i_max[0] - yc*input_size.x);
for (int y = idx_y; y < input_size.y; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < input_size.x; x += blockDim.x * gridDim.x) {
if (input[y*input_size.x + x] > threshold) {
if (((x - xc)*(x - xc) + (y - yc)*(y - yc)) < radius_max2) {
g.x += x;
g.y += y;
g.z += 1;
h.x += x*x;
h.y += y*y;
h.z += x*y;
}
}
}
}
const float3 g_sum = warp_reduce_sum_triple(g);
const float3 h_sum = warp_reduce_sum_triple(h);
if ((tid & 31) == 0) { // once per warp
atomicAdd(&smem[0], g_sum.x);
atomicAdd(&smem[1], g_sum.y);
atomicAdd(&smem[2], g_sum.z);
atomicAdd(&smem[3], h_sum.x);
atomicAdd(&smem[4], h_sum.y);
atomicAdd(&smem[5], h_sum.z);
}
__syncthreads();
if (tid < 6) // lowest 9 threads of each block
atomicAdd(&output[threadIdx.x], smem[threadIdx.x]);
}
extern "C" __global__ void imgmoment2centroid_kernel(float* __restrict__ d_Dst, const float* __restrict__ d_Src) {
d_Dst[0] = d_Src[0] / d_Src[2];
d_Dst[1] = d_Src[1] / d_Src[2];
};
extern "C" void getImageMoment_GPU(
float* output,
const float* input,
const float* max_pt,
int width, int height,
float threshold,
float radius_max
) {
// Launch the Vector Add CUDA Kernel
dim3 _threads(16, 16);
dim3 _blocks(10, 10);
int2 i_size = { width, height };
//hipSetDevice(GPU0);
ImageMoment_Max_Radius_binarization_kernel << <_blocks, _threads >> >(output, input, i_size, max_pt, threshold, radius_max*radius_max);
}
#endif | 6899bf21e2eeab2147c963e19966a20bac6812b9.cu | #ifndef _CENTROID_KERNEL_H_
#define _CENTROID_KERNEL_H_
#include "CUDA_Kernels.h"
#include "cuda.h"
#include "cuda_runtime_api.h"
#include "cuda_runtime.h"
#include "vector_types.h"
#include "device_functions.h"
#include "device_atomic_functions.h"
//#include "device_atomic_functions.hpp"
#include "math_functions.h"
#include "sm_30_intrinsics.h"
__device__ void AtomicMax(float * const address, const float value)
{
if (*address >= value) return;
int * const address_as_i = (int *)address;
int old = *address_as_i, assumed;
do {
assumed = old;
if (__int_as_float(assumed) >= value) break;
old = atomicCAS(address_as_i, assumed, __float_as_int(value));
} while (assumed != old);
}
__device__ float2 warp_reduce_max(float2 val) {
for (int offset = 16; offset > 0; offset /= 2) {
float temp = __shfl_down(val.x, offset);
float temp_loc = __shfl_down(val.y, offset);
if (temp > val.x) {
val.x = temp;
val.y = temp_loc;
}
}
return val;
}
__global__ void max_kernel(float* __restrict__ output, const float* __restrict__ input, const int2 i_size)
{
__shared__ float smem[2];
int tid = threadIdx.y * blockDim.x + threadIdx.x; // thread ID within the block
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
if (tid < 2) {
smem[tid] = 0.0f;
}
__syncthreads();
int id = idx_y*i_size.x + idx_x;
float2 f = { (float)input[id], (float)id };
for (int y = idx_y; y < i_size.y; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < i_size.x; x += blockDim.x * gridDim.x) {
int gid = y*i_size.x + x;
if (f.x < input[gid]) {
f.x = input[gid];
f.y = gid;
}
}
}
const float2 f_max = warp_reduce_max(f);
if ((tid & 31) == 0) { // once per warp
AtomicMax(&smem[0], f_max.x);
if (f_max.x == smem[0]) smem[1] = f_max.y;
}
__syncthreads();
if (tid == 0) { // lowest 9 threads of each block
AtomicMax(&output[1], smem[0]);
if (output[1] == smem[0]) output[0] = smem[1];
}
__syncthreads();
for (int y = idx_y; y < i_size.y; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < i_size.x; x += blockDim.x * gridDim.x) {
int gid = y*i_size.x + x;
if (output[1] == input[gid]) {
output[0] = gid;
}
}
}
};
// Device code
extern "C" void findmax_GPU(
float* output,
const float* input,
int width, int height, int GPUNo
)
{
// Launch the Vector Add CUDA Kernel
dim3 _threads(32, 32);
dim3 _blocks(10, 10);
int2 i_size = { width, height };
//cudaSetDevice(GPUNo);
max_kernel << <_blocks, _threads >> >(output, input, i_size);
}
__inline__ __device__ float3 warp_reduce_sum_triple(float3 val) {
for (int offset = 16; offset > 0; offset /= 2) {
val.x += __shfl_xor(val.x, offset);
val.y += __shfl_xor(val.y, offset);
val.z += __shfl_xor(val.z, offset);
}
return val;
}
extern "C" __global__ void ImageMoment_Max_Radius_binarization_kernel(float* __restrict__ output,
const float* __restrict__ input, const int2 input_size,
const float* __restrict__ i_max, const float threshold, const float radius_max2)
{
__shared__ float smem[6];
int tid = threadIdx.y * blockDim.x + threadIdx.x; // thread ID within the block
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
if (tid < 6) {
smem[threadIdx.x] = 0.0f;
}
__syncthreads();
float3 g = { 0 };
float3 h = { 0 };
float yc = floor(i_max[0] / input_size.x);
float xc = (i_max[0] - yc*input_size.x);
for (int y = idx_y; y < input_size.y; y += blockDim.y * gridDim.y) {
for (int x = idx_x; x < input_size.x; x += blockDim.x * gridDim.x) {
if (input[y*input_size.x + x] > threshold) {
if (((x - xc)*(x - xc) + (y - yc)*(y - yc)) < radius_max2) {
g.x += x;
g.y += y;
g.z += 1;
h.x += x*x;
h.y += y*y;
h.z += x*y;
}
}
}
}
const float3 g_sum = warp_reduce_sum_triple(g);
const float3 h_sum = warp_reduce_sum_triple(h);
if ((tid & 31) == 0) { // once per warp
atomicAdd(&smem[0], g_sum.x);
atomicAdd(&smem[1], g_sum.y);
atomicAdd(&smem[2], g_sum.z);
atomicAdd(&smem[3], h_sum.x);
atomicAdd(&smem[4], h_sum.y);
atomicAdd(&smem[5], h_sum.z);
}
__syncthreads();
if (tid < 6) // lowest 9 threads of each block
atomicAdd(&output[threadIdx.x], smem[threadIdx.x]);
}
extern "C" __global__ void imgmoment2centroid_kernel(float* __restrict__ d_Dst, const float* __restrict__ d_Src) {
d_Dst[0] = d_Src[0] / d_Src[2];
d_Dst[1] = d_Src[1] / d_Src[2];
};
extern "C" void getImageMoment_GPU(
float* output,
const float* input,
const float* max_pt,
int width, int height,
float threshold,
float radius_max
) {
// Launch the Vector Add CUDA Kernel
dim3 _threads(16, 16);
dim3 _blocks(10, 10);
int2 i_size = { width, height };
//cudaSetDevice(GPU0);
ImageMoment_Max_Radius_binarization_kernel << <_blocks, _threads >> >(output, input, i_size, max_pt, threshold, radius_max*radius_max);
}
#endif |
b293608476f74fd6de5dd306203e9f15076cd4f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel2(int *c, const int *a, const int *b)
{
int i = blockIdx.x;
c[i] = (a[i] + b[i])*2;
} | b293608476f74fd6de5dd306203e9f15076cd4f2.cu | #include "includes.h"
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel2(int *c, const int *a, const int *b)
{
int i = blockIdx.x;
c[i] = (a[i] + b[i])*2;
} |
786e6cb3aea546c65c470b1aa1c1595e24c79c6b.hip | // !!! This is a file automatically generated by hipify!!!
/************ The program is writed by Lun Ruan, 2018.10***********************/
/*******3D Modeling for pure qP wave equation from Xu,2015************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <malloc.h>
#include <hip/hip_runtime.h>
//#include <hip/hip_runtime.h>
//#include <hip/device_functions.h>
#include "array_new.h"
#include "read_write.h"
#include "GPU_kernel.h"
#include "CPU_function.h"
#define M 10
#define eps 2.22e-17
#define Block_Sizex 8
#define Block_Sizey 8
#define Block_Sizez 8
void modeling3d(int nx, int ny, int nz, int nt, int ntsnap, float dx, float dy, float dz, float dt, int pml, int snapflag, int sx, int sy, int sz,
float *vp, float *epsilon, float *delta, float *source, float *wavelet, float *record, float *dlr,float *ddlr, float *dtb, float *ddtb,
float *dfb, float *ddfb, float *c, float *c2, const char *snap_file)
{
//time-assuming
clock_t starttime, endtime;
float timespent;
starttime = clock();
int device_num;
hipGetDeviceCount(&device_num);
if(device_num > 0)
hipSetDevice(0);
else
hipSetDevice(0);
float uxMax, uyMax, uzMax, uxyzMax;
int i,j,l,k;
char snapname[100], snapname_S[100], snapxzname[100], snapyzname[100],snapxyname[100],
snapSxzname[100], snapSyzname[100],snapSxyname[100];
dim3 grid((ny+Block_Sizey-1)/Block_Sizey, (nx+Block_Sizex-1)/Block_Sizex, (nz+Block_Sizez-1)/Block_Sizez);
dim3 block(Block_Sizey, Block_Sizex, Block_Sizez);
//allocate host memory
float *snap = array1d((nx-2*pml)*(ny-2*pml)*(nz-2*pml)), *snapxz = array1d((nx-2*pml)*(nz-2*pml)),
*snapyz = array1d((ny-2*pml)*(nz-2*pml)), *snapxy = array1d((ny-2*pml)*(nx-2*pml)),
*snapS = array1d(nx*ny*nz), *snapSxz = array1d(nx*nz), *snapSyz = array1d(ny*nz), *snapSxy = array1d(ny*nx),
*h_ux = array1d(nx*ny*nz), *h_uy = array1d(nx*ny*nz), *h_uz = array1d(nx*ny*nz),*h_u2 = array1d(nx*ny*nz);
/******* allocate device memory *****/
float *d_vp, *d_epsilon,*d_delta,*d_c,*d_c2,*d_dlr,*d_ddlr,*d_dtb,*d_ddtb,*d_dfb,
*d_ddfb, *d_source, *S, *u1, *u2, *u3, *ux, *uy, *uz,*d_record, *d_u,
*wl11, *wl12, *wl13, *wl21, *wl31, *wl32, *wl33, *pl1,*pl2,*pl3,
*wr11, *wr12, *wr13, *wr21, *wr31, *wr32, *wr33, *pr1,*pr2,*pr3,
*wt11, *wt12, *wt13, *wt21, *wt31, *wt32, *wt33, *pt1,*pt2,*pt3,
*wb11, *wb12, *wb13, *wb21, *wb31, *wb32, *wb33, *pb1,*pb2,*pb3,
*wf11, *wf12, *wf13, *wf21, *wf31, *wf32, *wf33, *pf1,*pf2,*pf3,
*wba11, *wba12, *wba13, *wba21, *wba31, *wba32, *wba33, *pba1,*pba2,*pba3;
/*cpu*/
for(i=0;i<nt;i++){
printf("wavelet=%4.3f ",wavelet[i]);
}
printf("\n");
for(i=0;i<pml;i++){
printf("dlr=%4.3f ",dlr[i]);
}
printf("\n");
for(i=0;i<pml;i++){
printf("ddlr=%4.3f ",ddlr[i]);
}
printf("\n");
for(i=0;i<pml;i++){
printf("dtb=%4.3f ",dtb[i]);
}
printf("\n");
for(i=0;i<pml;i++){
printf("ddtb=%4.3f ",ddtb[i]);
}
printf("\n");
for(i=0;i<pml;i++){
printf("dfb=%4.3f ",dfb[i]);
}
printf("\n");
for(i=0;i<pml;i++){
printf("ddfb=%4.3f ",ddfb[i]);
}
printf("\n");
for(i=(M/2)*(M/2+1)+1;i<(M/2)*(M/2+1)+6;i++){
printf("c2=%4.3f ",c2[i]);
}
printf("\n");
hipMalloc(&d_vp, nx*ny*nz*sizeof(float));
hipMalloc(&d_epsilon, nx*ny*nz*sizeof(float));
hipMalloc(&d_delta, nx*ny*nz*sizeof(float));
hipMalloc(&d_c, (M/2+1)*(M/2+1)*sizeof(float));
hipMalloc(&d_c2, (M/2+1)*(M/2+1)*sizeof(float));
hipMalloc(&d_dlr,pml*sizeof(float));
hipMalloc(&d_ddlr, pml*sizeof(float));
hipMalloc(&d_dtb, pml*sizeof(float));
hipMalloc(&d_ddtb, pml*sizeof(float));
hipMalloc(&d_dfb, pml*sizeof(float));
hipMalloc(&d_ddfb, pml*sizeof(float));
hipMalloc(&d_source, nx*ny*nz*sizeof(float));
hipMalloc(&S, nx*ny*nz*sizeof(float));
hipMalloc(&u1, nx*ny*nz*sizeof(float));
hipMalloc(&u2, nx*ny*nz*sizeof(float));
hipMalloc(&u3, nx*ny*nz*sizeof(float));
hipMalloc(&ux, nx*ny*nz*sizeof(float));
hipMalloc(&uy, nx*ny*nz*sizeof(float));
hipMalloc(&uz, nx*ny*nz*sizeof(float));
// hipMalloc(&d_record, (nx-2*pml)*(ny-2*pml)*nt*sizeof(float));
// hipMalloc(&d_u, (nx-2*pml)*(ny-2*pml)*(nz-2*pml)*sizeof(float));
hipMalloc(&wr11, pml*ny*nz*sizeof(float));
hipMalloc(&wr12, pml*ny*nz*sizeof(float));
hipMalloc(&wr13, pml*ny*nz*sizeof(float));
hipMalloc(&wr21, pml*ny*nz*sizeof(float));
hipMalloc(&wr31, pml*ny*nz*sizeof(float));
hipMalloc(&wr32, pml*ny*nz*sizeof(float));
hipMalloc(&wr33, pml*ny*nz*sizeof(float));
hipMalloc(&pr1, pml*ny*nz*sizeof(float));
hipMalloc(&pr2, pml*ny*nz*sizeof(float));
hipMalloc(&pr3, pml*ny*nz*sizeof(float));
hipMalloc(&wl11, pml*ny*nz*sizeof(float));
hipMalloc(&wl12, pml*ny*nz*sizeof(float));
hipMalloc(&wl13, pml*ny*nz*sizeof(float));
hipMalloc(&wl21, pml*ny*nz*sizeof(float));
hipMalloc(&wl31, pml*ny*nz*sizeof(float));
hipMalloc(&wl32, pml*ny*nz*sizeof(float));
hipMalloc(&wl33, pml*ny*nz*sizeof(float));
hipMalloc(&pl1, pml*ny*nz*sizeof(float));
hipMalloc(&pl2, pml*ny*nz*sizeof(float));
hipMalloc(&pl3, pml*ny*nz*sizeof(float));
hipMalloc(&wt11, pml*nx*ny*sizeof(float));
hipMalloc(&wt12, pml*nx*ny*sizeof(float));
hipMalloc(&wt13, pml*nx*ny*sizeof(float));
hipMalloc(&wt21, pml*nx*ny*sizeof(float));
hipMalloc(&wt31, pml*nx*ny*sizeof(float));
hipMalloc(&wt32, pml*nx*ny*sizeof(float));
hipMalloc(&wt33, pml*nx*ny*sizeof(float));
hipMalloc(&pt1, pml*nx*ny*sizeof(float));
hipMalloc(&pt2, pml*nx*ny*sizeof(float));
hipMalloc(&pt3, pml*nx*ny*sizeof(float));
hipMalloc(&wb11, pml*nx*ny*sizeof(float));
hipMalloc(&wb12, pml*nx*ny*sizeof(float));
hipMalloc(&wb13, pml*nx*ny*sizeof(float));
hipMalloc(&wb21, pml*nx*ny*sizeof(float));
hipMalloc(&wb31, pml*nx*ny*sizeof(float));
hipMalloc(&wb32, pml*nx*ny*sizeof(float));
hipMalloc(&wb33, pml*nx*ny*sizeof(float));
hipMalloc(&pb1, pml*nx*ny*sizeof(float));
hipMalloc(&pb2, pml*nx*ny*sizeof(float));
hipMalloc(&pb3, pml*nx*ny*sizeof(float));
hipMalloc(&wf11, pml*nx*nz*sizeof(float));
hipMalloc(&wf12, pml*nx*nz*sizeof(float));
hipMalloc(&wf13, pml*nx*nz*sizeof(float));
hipMalloc(&wf21, pml*nx*nz*sizeof(float));
hipMalloc(&wf31, pml*nx*nz*sizeof(float));
hipMalloc(&wf32, pml*nx*nz*sizeof(float));
hipMalloc(&wf33, pml*nx*nz*sizeof(float));
hipMalloc(&pf1, pml*nx*nz*sizeof(float));
hipMalloc(&pf2, pml*nx*nz*sizeof(float));
hipMalloc(&pf3, pml*nx*nz*sizeof(float));
hipMalloc(&wba11, pml*nx*nz*sizeof(float));
hipMalloc(&wba12, pml*nx*nz*sizeof(float));
hipMalloc(&wba13, pml*nx*nz*sizeof(float));
hipMalloc(&wba21, pml*nx*nz*sizeof(float));
hipMalloc(&wba31, pml*nx*nz*sizeof(float));
hipMalloc(&wba32, pml*nx*nz*sizeof(float));
hipMalloc(&wba33, pml*nx*nz*sizeof(float));
hipMalloc(&pba1, pml*nx*nz*sizeof(float));
hipMalloc(&pba2, pml*nx*nz*sizeof(float));
hipMalloc(&pba3, pml*nx*nz*sizeof(float));
//intialized memory
/* hipMemset(S, 0, nx*ny*nz*sizeof(float));
hipMemset(u1, 0, nx*ny*nz*sizeof(float));
hipMemset(u2, 0, nx*ny*nz*sizeof(float));
hipMemset(u3, 0, nx*ny*nz*sizeof(float));
hipMemset(ux, 0, nx*ny*nz*sizeof(float));
hipMemset(uy, 0, nx*ny*nz*sizeof(float));
hipMemset(uz, 0, nx*ny*nz*sizeof(float));
hipMemset(wr11, 0, pml*ny*nz*sizeof(float));
hipMemset(wr12, 0, pml*ny*nz*sizeof(float));
hipMemset(wr13, 0, pml*ny*nz*sizeof(float));
hipMemset(wr21, 0, pml*ny*nz*sizeof(float));
hipMemset(wr31, 0, pml*ny*nz*sizeof(float));
hipMemset(wr32, 0, pml*ny*nz*sizeof(float));
hipMemset(wr33, 0, pml*ny*nz*sizeof(float));
hipMemset(pr1, 0, pml*ny*nz*sizeof(float));
hipMemset(pr2, 0, pml*ny*nz*sizeof(float));
hipMemset(pr3, 0, pml*ny*nz*sizeof(float));
hipMemset(wl11, 0, pml*ny*nz*sizeof(float));
hipMemset(wl12, 0, pml*ny*nz*sizeof(float));
hipMemset(wl13, 0, pml*ny*nz*sizeof(float));
hipMemset(wl21, 0, pml*ny*nz*sizeof(float));
hipMemset(wl31, 0, pml*ny*nz*sizeof(float));
hipMemset(wl32, 0, pml*ny*nz*sizeof(float));
hipMemset(wl33, 0, pml*ny*nz*sizeof(float));
hipMemset(pl1, 0, pml*ny*nz*sizeof(float));
hipMemset(pl2, 0, pml*ny*nz*sizeof(float));
hipMemset(pl3, 0, pml*ny*nz*sizeof(float));
hipMemset(wt11, 0, pml*nx*ny*sizeof(float));
hipMemset(wt12, 0, pml*nx*ny*sizeof(float));
hipMemset(wt13, 0, pml*nx*ny*sizeof(float));
hipMemset(wt21, 0, pml*nx*ny*sizeof(float));
hipMemset(wt31, 0, pml*nx*ny*sizeof(float));
hipMemset(wt32, 0, pml*nx*ny*sizeof(float));
hipMemset(wt33, 0, pml*nx*ny*sizeof(float));
hipMemset(pt1, 0, pml*nx*ny*sizeof(float));
hipMemset(pt2, 0, pml*nx*ny*sizeof(float));
hipMemset(pt3, 0, pml*nx*ny*sizeof(float));
hipMemset(wb11, 0, pml*nx*ny*sizeof(float));
hipMemset(wb12, 0, pml*nx*ny*sizeof(float));
hipMemset(wb13, 0, pml*nx*ny*sizeof(float));
hipMemset(wb21, 0, pml*nx*ny*sizeof(float));
hipMemset(wb31, 0, pml*nx*ny*sizeof(float));
hipMemset(wb32, 0, pml*nx*ny*sizeof(float));
hipMemset(wb33, 0, pml*nx*ny*sizeof(float));
hipMemset(pb1, 0, pml*nx*ny*sizeof(float));
hipMemset(pb2, 0, pml*nx*ny*sizeof(float));
hipMemset(pb3, 0, pml*nx*ny*sizeof(float));
hipMemset(wf11, 0, pml*nx*nz*sizeof(float));
hipMemset(wf12, 0, pml*nx*nz*sizeof(float));
hipMemset(wf13, 0, pml*nx*nz*sizeof(float));
hipMemset(wf21, 0, pml*nx*nz*sizeof(float));
hipMemset(wf31, 0, pml*nx*nz*sizeof(float));
hipMemset(wf32, 0, pml*nx*nz*sizeof(float));
hipMemset(wf33, 0, pml*nx*nz*sizeof(float));
hipMemset(pf1, 0, pml*nx*nz*sizeof(float));
hipMemset(pf2, 0, pml*nx*nz*sizeof(float));
hipMemset(pf3, 0, pml*nx*nz*sizeof(float));
hipMemset(wba11, 0, pml*nx*nz*sizeof(float));
hipMemset(wba12, 0, pml*nx*nz*sizeof(float));
hipMemset(wba13, 0, pml*nx*nz*sizeof(float));
hipMemset(wba21, 0, pml*nx*nz*sizeof(float));
hipMemset(wba31, 0, pml*nx*nz*sizeof(float));
hipMemset(wba32, 0, pml*nx*nz*sizeof(float));
hipMemset(wba33, 0, pml*nx*nz*sizeof(float));
hipMemset(pba1, 0, pml*nx*nz*sizeof(float));
hipMemset(pba2, 0, pml*nx*nz*sizeof(float));
hipMemset(pba3, 0, pml*nx*nz*sizeof(float));
*/
hipMemcpy(d_vp, vp, nx*ny*nz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_epsilon, epsilon, nx*ny*nz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_delta, delta, nx*ny*nz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_c, c, (M/2+1)*(M/2+1)*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_c2, c2, (M/2+1)*(M/2+1)*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_source, source, nx*ny*nz*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_dlr, dlr, pml*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_ddlr, ddlr, pml*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_dtb, dtb, pml*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_ddtb, ddtb, pml*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_dfb, dfb, pml*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_ddfb, ddfb, pml*sizeof(float), hipMemcpyHostToDevice);
for(k=0;k<nt;k++)
{
if(k%100==0)
printf("nt = %d\n",k);
hipLaunchKernelGGL(( grad), dim3(grid),dim3(block), 0, 0, u2, ux, uy, uz, d_c, nx, ny, nz, dx, dy, dz);
hipMemcpy(h_ux, ux, nx*ny*nz*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(h_uy, uy, nx*ny*nz*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(h_uz, uz, nx*ny*nz*sizeof(float), hipMemcpyDeviceToHost);
uxMax = absMaxval(h_ux, nx, ny, nz);
uyMax = absMaxval(h_uy, nx, ny, nz);
uzMax = absMaxval(h_uz, nx, ny, nz);
uxyzMax = max(uxMax, uyMax);
uxyzMax = max(uxyzMax, uzMax);
//uxyzMax
printf("uxyzMax=%4.3f\n",uxyzMax);
//calculating S operators
hipLaunchKernelGGL(( scalar_operator), dim3(grid),dim3(block), 0, 0, uxyzMax, ux, uy, uz, d_epsilon, d_delta, S, nx, ny, nz);
//S GPU(GPU_kernel.cu)
//calculating wavefield using FD method
hipLaunchKernelGGL(( wavefield_update), dim3(grid),dim3(block), 0, 0, d_c, d_c2, d_dlr, d_ddlr, d_dtb, d_ddtb, d_dfb, d_ddfb, d_epsilon,d_delta,
d_vp, dx, dy, dz, dt, nx, ny, nz, pml, sz, ux, uy, uz, u1, u3, u2, S,
wl11, wl12, wl13, wl21, wl31, wl32, wl33, pl1, pl2, pl3,
wr11, wr12, wr13, wr21, wr31, wr32, wr33, pr1, pr2, pr3,
wt11, wt12, wt13, wt21, wt31, wt32, wt33, pt1, pt2, pt3,
wb11, wb12, wb13, wb21, wb31, wb32, wb33, pb1, pb2, pb3,
wf11, wf12, wf13, wf21, wf31, wf32, wf33, pf1, pf2, pf3,
wba11, wba12, wba13, wba21, wba31, wba32, wba33, pba1, pba2, pba3);
hipLaunchKernelGGL(( addsource), dim3(grid),dim3(block), 0, 0, d_source, wavelet[k], u3, nx, ny, nz);
hipLaunchKernelGGL(( exchange), dim3(grid),dim3(block), 0, 0, nx, ny, nz, pml, u1, u2, u3,
wl11, wl12, wl13, wl31, wl32, wl33, pl1, pl2, pl3,
wr11, wr12, wr13, wr31, wr32, wr33, pr1, pr2, pr3,
wt11, wt12, wt13, wt31, wt32, wt33, pt1, pt2, pt3,
wb11, wb12, wb13, wb31, wb32, wb33, pb1, pb2, pb3,
wf11, wf12, wf13, wf31, wf32, wf33, pf1, pf2, pf3,
wba11, wba12, wba13, wba31, wba32, wba33, pba1, pba2, pba3);
// seismic fullwavefield and record
//wavefield_output<<<grid,block>>>(u2, d_u, &d_record[k*(nx-2*pml)*(ny-2*pml)], nx, ny, nz, sz, pml);
hipMemcpy(h_u2, u2, nx*ny*nz*sizeof(float), hipMemcpyDeviceToHost);
// h_u2
// for(int i=0;i<nz;i++){
// for(int j=0;j<nx;j++){
// for(int k=0;k<ny;k++){
// if(h_u2[i*nx*ny+j*ny+k]>0.0001 || h_u2[i*nx*ny+j*ny+k]<-0.0001)
// printf("h_u2[xxx]=%4.3f ",h_u2[i*nx*ny+j*ny+k]);
// }
// }
// }
for(i=pml;i<nx-pml;i++)
for(j=pml;j<ny-pml;j++)
{
record[k*(nx-2*pml)*(ny-2*pml)+(i-pml)*(ny-2*pml)+j-pml] = h_u2[sz*nx*ny+i*ny+j];
}
if(snapflag ==1 && k%ntsnap==0)
{
sprintf(snapname,"%s%d.dat", snap_file, k);
sprintf(snapxzname,"%s_xz%d.dat", snap_file, k);
sprintf(snapyzname,"%s_yz%d.dat", snap_file, k);
sprintf(snapxyname,"%s_xy%d.dat", snap_file, k);
// hipMemcpy(snap, d_u, (nz-2*pml)*(nx-2*pml)*(ny-2*pml)*sizeof(float), hipMemcpyDeviceToHost);
for(i=pml;i<nz-pml;i++)
for(j=pml;j<nx-pml;j++)
for(l=pml;l<ny-pml;l++)
snap[(i-pml)*(nx-2*pml)*(ny-2*pml)+(j-pml)*(ny-2*pml)+l-pml] = h_u2[i*nx*ny+j*ny+l];
writefile_3d(snapname, snap, nz-2*pml, nx-2*pml, ny-2*pml);
for(i=0;i<nz-2*pml;i++)
for(j=0;j<nx-2*pml;j++)
for(l=0;l<ny-2*pml;l++)
{
if(l==(ny-2*pml-1)/2)
{
snapxz[i*(nx-2*pml)+j] = snap[i*(nx-2*pml)*(ny-2*pml)+j*(ny-2*pml)+l];
}
if(j==(nx-2*pml-1)/2)
{
snapyz[i*(ny-2*pml)+l] = snap[i*(nx-2*pml)*(ny-2*pml)+j*(ny-2*pml)+l];
}
if(i==(nz-2*pml-1)/2)
{
snapxy[j*(ny-2*pml)+l] = snap[i*(nx-2*pml)*(ny-2*pml)+j*(ny-2*pml)+l];
}
}
writefile_2d(snapxzname, snapxz, nz-2*pml, nx-2*pml);
writefile_2d(snapyzname, snapyz, nz-2*pml, ny-2*pml);
writefile_2d(snapxyname, snapxy, nx-2*pml, ny-2*pml);
}
// printf("%f\n",absMaxval(snap, nx-2*pml, ny-2*pml, nz-2*pml));
/* sprintf(snapname_S,"%s_S%d.dat", snap_file, k);
sprintf(snapSxzname,"%s_Sxz%d.dat", snap_file, k);
sprintf(snapSyzname,"%s_Syz%d.dat", snap_file, k);
sprintf(snapSxyname,"%s_Sxy%d.dat", snap_file, k);
hipMemcpy(snapS, S, nx*ny*nz*sizeof(float), hipMemcpyDeviceToHost);
writefile_3d(snapname_S, snapS, nz, nx, ny);
for(i=0;i<nz;i++)
for(j=0;j<nx;j++)
for(l=0;l<ny;l++)
{
if(l==(ny-1)/2)
{
snapSxz[i*nx+j] = snapS[i*nx*ny+j*ny+l];
}
if(j==(nx-1)/2)
{
snapSyz[i*ny+l] = snapS[i*nx*ny+j*ny+l];
}
if(i==(nz-1)/2)
{
snapSxy[j*ny+l] = snapS[i*nx*ny+j*ny+l];
}
}
writefile_2d(snapSxzname, snapSxz, nz, nx);
writefile_2d(snapSyzname, snapSyz, nz, ny);
writefile_2d(snapSxyname, snapSxy, nx, ny);*/
// }
}
// hipMemcpy(record, d_record, nt*(nx-2*pml)*(ny-2*pml)*sizeof(float), hipMemcpyDeviceToHost);
//free device memory
hipFree(d_vp);hipFree(d_epsilon);hipFree(d_delta);hipFree(d_c);hipFree(d_c2);
hipFree(d_dlr);hipFree(d_ddlr);hipFree(d_dtb);hipFree(d_ddtb);hipFree(d_dfb);hipFree(d_ddfb);
hipFree(d_source);hipFree(S);hipFree(u1);hipFree(u2);hipFree(u3);
hipFree(ux);hipFree(uy);hipFree(uz);hipFree(d_record);hipFree(d_u);
hipFree(wl11);hipFree(wl12);hipFree(wl13);hipFree(wl21);
hipFree(wl31);hipFree(wl32);hipFree(wl33);hipFree(pl1);
hipFree(pl2);hipFree(pl3);
hipFree(wr11);hipFree(wr12);hipFree(wr13);hipFree(wr21);
hipFree(wr31);hipFree(wr32);hipFree(wr33);hipFree(pr1);
hipFree(pr2);hipFree(pr3);
hipFree(wt11);hipFree(wt12);hipFree(wt13);hipFree(wt21);
hipFree(wt31);hipFree(wt32);hipFree(wt33);hipFree(pt1);
hipFree(pt2);hipFree(pt3);
hipFree(wb11);hipFree(wb12);hipFree(wb13);hipFree(wb21);
hipFree(wb31);hipFree(wb32);hipFree(wb33);hipFree(pb1);
hipFree(pb2);hipFree(pb3);
hipFree(wf11);hipFree(wf12);hipFree(wf13);hipFree(wf21);
hipFree(wf31);hipFree(wf32);hipFree(wf33);hipFree(pf1);
hipFree(pf2);hipFree(pf3);
hipFree(wba11);hipFree(wba12);hipFree(wba13);hipFree(wba21);
hipFree(wba31);hipFree(wba32);hipFree(wba33);hipFree(pba1);
hipFree(pba2);hipFree(pba3);
free(h_ux); free(h_uy); free(h_uz); free(snap); free(snapxz);free(snapyz);free(snapxy);
free(snapS);free(snapSxz);free(snapSyz);free(snapSxy);
endtime = clock();
timespent=(float)(endtime-starttime)/CLOCKS_PER_SEC;
printf("Singshot modeling time-assuming is %f s.\n",timespent);
}
| 786e6cb3aea546c65c470b1aa1c1595e24c79c6b.cu | /************ The program is writed by Lun Ruan, 2018.10***********************/
/*******3D Modeling for pure qP wave equation from Xu,2015************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include <malloc.h>
#include <cuda.h>
//#include <cuda_runtime.h>
//#include <device_functions.h>
#include "array_new.h"
#include "read_write.h"
#include "GPU_kernel.h"
#include "CPU_function.h"
#define M 10
#define eps 2.22e-17
#define Block_Sizex 8
#define Block_Sizey 8
#define Block_Sizez 8
void modeling3d(int nx, int ny, int nz, int nt, int ntsnap, float dx, float dy, float dz, float dt, int pml, int snapflag, int sx, int sy, int sz,
float *vp, float *epsilon, float *delta, float *source, float *wavelet, float *record, float *dlr,float *ddlr, float *dtb, float *ddtb,
float *dfb, float *ddfb, float *c, float *c2, const char *snap_file)
{
//time-assuming
clock_t starttime, endtime;
float timespent;
starttime = clock();
int device_num;
cudaGetDeviceCount(&device_num);
if(device_num > 0)
cudaSetDevice(0);
else
cudaSetDevice(0);
float uxMax, uyMax, uzMax, uxyzMax;
int i,j,l,k;
char snapname[100], snapname_S[100], snapxzname[100], snapyzname[100],snapxyname[100],
snapSxzname[100], snapSyzname[100],snapSxyname[100];
dim3 grid((ny+Block_Sizey-1)/Block_Sizey, (nx+Block_Sizex-1)/Block_Sizex, (nz+Block_Sizez-1)/Block_Sizez);
dim3 block(Block_Sizey, Block_Sizex, Block_Sizez);
//allocate host memory
float *snap = array1d((nx-2*pml)*(ny-2*pml)*(nz-2*pml)), *snapxz = array1d((nx-2*pml)*(nz-2*pml)),
*snapyz = array1d((ny-2*pml)*(nz-2*pml)), *snapxy = array1d((ny-2*pml)*(nx-2*pml)),
*snapS = array1d(nx*ny*nz), *snapSxz = array1d(nx*nz), *snapSyz = array1d(ny*nz), *snapSxy = array1d(ny*nx),
*h_ux = array1d(nx*ny*nz), *h_uy = array1d(nx*ny*nz), *h_uz = array1d(nx*ny*nz),*h_u2 = array1d(nx*ny*nz);
/******* allocate device memory *****/
float *d_vp, *d_epsilon,*d_delta,*d_c,*d_c2,*d_dlr,*d_ddlr,*d_dtb,*d_ddtb,*d_dfb,
*d_ddfb, *d_source, *S, *u1, *u2, *u3, *ux, *uy, *uz,*d_record, *d_u,
*wl11, *wl12, *wl13, *wl21, *wl31, *wl32, *wl33, *pl1,*pl2,*pl3,
*wr11, *wr12, *wr13, *wr21, *wr31, *wr32, *wr33, *pr1,*pr2,*pr3,
*wt11, *wt12, *wt13, *wt21, *wt31, *wt32, *wt33, *pt1,*pt2,*pt3,
*wb11, *wb12, *wb13, *wb21, *wb31, *wb32, *wb33, *pb1,*pb2,*pb3,
*wf11, *wf12, *wf13, *wf21, *wf31, *wf32, *wf33, *pf1,*pf2,*pf3,
*wba11, *wba12, *wba13, *wba21, *wba31, *wba32, *wba33, *pba1,*pba2,*pba3;
/*打印cpu参数*/
for(i=0;i<nt;i++){
printf("wavelet=%4.3f ",wavelet[i]);
}
printf("\n");
for(i=0;i<pml;i++){
printf("dlr=%4.3f ",dlr[i]);
}
printf("\n");
for(i=0;i<pml;i++){
printf("ddlr=%4.3f ",ddlr[i]);
}
printf("\n");
for(i=0;i<pml;i++){
printf("dtb=%4.3f ",dtb[i]);
}
printf("\n");
for(i=0;i<pml;i++){
printf("ddtb=%4.3f ",ddtb[i]);
}
printf("\n");
for(i=0;i<pml;i++){
printf("dfb=%4.3f ",dfb[i]);
}
printf("\n");
for(i=0;i<pml;i++){
printf("ddfb=%4.3f ",ddfb[i]);
}
printf("\n");
for(i=(M/2)*(M/2+1)+1;i<(M/2)*(M/2+1)+6;i++){
printf("c2=%4.3f ",c2[i]);
}
printf("\n");
cudaMalloc(&d_vp, nx*ny*nz*sizeof(float));
cudaMalloc(&d_epsilon, nx*ny*nz*sizeof(float));
cudaMalloc(&d_delta, nx*ny*nz*sizeof(float));
cudaMalloc(&d_c, (M/2+1)*(M/2+1)*sizeof(float));
cudaMalloc(&d_c2, (M/2+1)*(M/2+1)*sizeof(float));
cudaMalloc(&d_dlr,pml*sizeof(float));
cudaMalloc(&d_ddlr, pml*sizeof(float));
cudaMalloc(&d_dtb, pml*sizeof(float));
cudaMalloc(&d_ddtb, pml*sizeof(float));
cudaMalloc(&d_dfb, pml*sizeof(float));
cudaMalloc(&d_ddfb, pml*sizeof(float));
cudaMalloc(&d_source, nx*ny*nz*sizeof(float));
cudaMalloc(&S, nx*ny*nz*sizeof(float));
cudaMalloc(&u1, nx*ny*nz*sizeof(float));
cudaMalloc(&u2, nx*ny*nz*sizeof(float));
cudaMalloc(&u3, nx*ny*nz*sizeof(float));
cudaMalloc(&ux, nx*ny*nz*sizeof(float));
cudaMalloc(&uy, nx*ny*nz*sizeof(float));
cudaMalloc(&uz, nx*ny*nz*sizeof(float));
// cudaMalloc(&d_record, (nx-2*pml)*(ny-2*pml)*nt*sizeof(float));
// cudaMalloc(&d_u, (nx-2*pml)*(ny-2*pml)*(nz-2*pml)*sizeof(float));
cudaMalloc(&wr11, pml*ny*nz*sizeof(float));
cudaMalloc(&wr12, pml*ny*nz*sizeof(float));
cudaMalloc(&wr13, pml*ny*nz*sizeof(float));
cudaMalloc(&wr21, pml*ny*nz*sizeof(float));
cudaMalloc(&wr31, pml*ny*nz*sizeof(float));
cudaMalloc(&wr32, pml*ny*nz*sizeof(float));
cudaMalloc(&wr33, pml*ny*nz*sizeof(float));
cudaMalloc(&pr1, pml*ny*nz*sizeof(float));
cudaMalloc(&pr2, pml*ny*nz*sizeof(float));
cudaMalloc(&pr3, pml*ny*nz*sizeof(float));
cudaMalloc(&wl11, pml*ny*nz*sizeof(float));
cudaMalloc(&wl12, pml*ny*nz*sizeof(float));
cudaMalloc(&wl13, pml*ny*nz*sizeof(float));
cudaMalloc(&wl21, pml*ny*nz*sizeof(float));
cudaMalloc(&wl31, pml*ny*nz*sizeof(float));
cudaMalloc(&wl32, pml*ny*nz*sizeof(float));
cudaMalloc(&wl33, pml*ny*nz*sizeof(float));
cudaMalloc(&pl1, pml*ny*nz*sizeof(float));
cudaMalloc(&pl2, pml*ny*nz*sizeof(float));
cudaMalloc(&pl3, pml*ny*nz*sizeof(float));
cudaMalloc(&wt11, pml*nx*ny*sizeof(float));
cudaMalloc(&wt12, pml*nx*ny*sizeof(float));
cudaMalloc(&wt13, pml*nx*ny*sizeof(float));
cudaMalloc(&wt21, pml*nx*ny*sizeof(float));
cudaMalloc(&wt31, pml*nx*ny*sizeof(float));
cudaMalloc(&wt32, pml*nx*ny*sizeof(float));
cudaMalloc(&wt33, pml*nx*ny*sizeof(float));
cudaMalloc(&pt1, pml*nx*ny*sizeof(float));
cudaMalloc(&pt2, pml*nx*ny*sizeof(float));
cudaMalloc(&pt3, pml*nx*ny*sizeof(float));
cudaMalloc(&wb11, pml*nx*ny*sizeof(float));
cudaMalloc(&wb12, pml*nx*ny*sizeof(float));
cudaMalloc(&wb13, pml*nx*ny*sizeof(float));
cudaMalloc(&wb21, pml*nx*ny*sizeof(float));
cudaMalloc(&wb31, pml*nx*ny*sizeof(float));
cudaMalloc(&wb32, pml*nx*ny*sizeof(float));
cudaMalloc(&wb33, pml*nx*ny*sizeof(float));
cudaMalloc(&pb1, pml*nx*ny*sizeof(float));
cudaMalloc(&pb2, pml*nx*ny*sizeof(float));
cudaMalloc(&pb3, pml*nx*ny*sizeof(float));
cudaMalloc(&wf11, pml*nx*nz*sizeof(float));
cudaMalloc(&wf12, pml*nx*nz*sizeof(float));
cudaMalloc(&wf13, pml*nx*nz*sizeof(float));
cudaMalloc(&wf21, pml*nx*nz*sizeof(float));
cudaMalloc(&wf31, pml*nx*nz*sizeof(float));
cudaMalloc(&wf32, pml*nx*nz*sizeof(float));
cudaMalloc(&wf33, pml*nx*nz*sizeof(float));
cudaMalloc(&pf1, pml*nx*nz*sizeof(float));
cudaMalloc(&pf2, pml*nx*nz*sizeof(float));
cudaMalloc(&pf3, pml*nx*nz*sizeof(float));
cudaMalloc(&wba11, pml*nx*nz*sizeof(float));
cudaMalloc(&wba12, pml*nx*nz*sizeof(float));
cudaMalloc(&wba13, pml*nx*nz*sizeof(float));
cudaMalloc(&wba21, pml*nx*nz*sizeof(float));
cudaMalloc(&wba31, pml*nx*nz*sizeof(float));
cudaMalloc(&wba32, pml*nx*nz*sizeof(float));
cudaMalloc(&wba33, pml*nx*nz*sizeof(float));
cudaMalloc(&pba1, pml*nx*nz*sizeof(float));
cudaMalloc(&pba2, pml*nx*nz*sizeof(float));
cudaMalloc(&pba3, pml*nx*nz*sizeof(float));
//intialized memory
/* cudaMemset(S, 0, nx*ny*nz*sizeof(float));
cudaMemset(u1, 0, nx*ny*nz*sizeof(float));
cudaMemset(u2, 0, nx*ny*nz*sizeof(float));
cudaMemset(u3, 0, nx*ny*nz*sizeof(float));
cudaMemset(ux, 0, nx*ny*nz*sizeof(float));
cudaMemset(uy, 0, nx*ny*nz*sizeof(float));
cudaMemset(uz, 0, nx*ny*nz*sizeof(float));
cudaMemset(wr11, 0, pml*ny*nz*sizeof(float));
cudaMemset(wr12, 0, pml*ny*nz*sizeof(float));
cudaMemset(wr13, 0, pml*ny*nz*sizeof(float));
cudaMemset(wr21, 0, pml*ny*nz*sizeof(float));
cudaMemset(wr31, 0, pml*ny*nz*sizeof(float));
cudaMemset(wr32, 0, pml*ny*nz*sizeof(float));
cudaMemset(wr33, 0, pml*ny*nz*sizeof(float));
cudaMemset(pr1, 0, pml*ny*nz*sizeof(float));
cudaMemset(pr2, 0, pml*ny*nz*sizeof(float));
cudaMemset(pr3, 0, pml*ny*nz*sizeof(float));
cudaMemset(wl11, 0, pml*ny*nz*sizeof(float));
cudaMemset(wl12, 0, pml*ny*nz*sizeof(float));
cudaMemset(wl13, 0, pml*ny*nz*sizeof(float));
cudaMemset(wl21, 0, pml*ny*nz*sizeof(float));
cudaMemset(wl31, 0, pml*ny*nz*sizeof(float));
cudaMemset(wl32, 0, pml*ny*nz*sizeof(float));
cudaMemset(wl33, 0, pml*ny*nz*sizeof(float));
cudaMemset(pl1, 0, pml*ny*nz*sizeof(float));
cudaMemset(pl2, 0, pml*ny*nz*sizeof(float));
cudaMemset(pl3, 0, pml*ny*nz*sizeof(float));
cudaMemset(wt11, 0, pml*nx*ny*sizeof(float));
cudaMemset(wt12, 0, pml*nx*ny*sizeof(float));
cudaMemset(wt13, 0, pml*nx*ny*sizeof(float));
cudaMemset(wt21, 0, pml*nx*ny*sizeof(float));
cudaMemset(wt31, 0, pml*nx*ny*sizeof(float));
cudaMemset(wt32, 0, pml*nx*ny*sizeof(float));
cudaMemset(wt33, 0, pml*nx*ny*sizeof(float));
cudaMemset(pt1, 0, pml*nx*ny*sizeof(float));
cudaMemset(pt2, 0, pml*nx*ny*sizeof(float));
cudaMemset(pt3, 0, pml*nx*ny*sizeof(float));
cudaMemset(wb11, 0, pml*nx*ny*sizeof(float));
cudaMemset(wb12, 0, pml*nx*ny*sizeof(float));
cudaMemset(wb13, 0, pml*nx*ny*sizeof(float));
cudaMemset(wb21, 0, pml*nx*ny*sizeof(float));
cudaMemset(wb31, 0, pml*nx*ny*sizeof(float));
cudaMemset(wb32, 0, pml*nx*ny*sizeof(float));
cudaMemset(wb33, 0, pml*nx*ny*sizeof(float));
cudaMemset(pb1, 0, pml*nx*ny*sizeof(float));
cudaMemset(pb2, 0, pml*nx*ny*sizeof(float));
cudaMemset(pb3, 0, pml*nx*ny*sizeof(float));
cudaMemset(wf11, 0, pml*nx*nz*sizeof(float));
cudaMemset(wf12, 0, pml*nx*nz*sizeof(float));
cudaMemset(wf13, 0, pml*nx*nz*sizeof(float));
cudaMemset(wf21, 0, pml*nx*nz*sizeof(float));
cudaMemset(wf31, 0, pml*nx*nz*sizeof(float));
cudaMemset(wf32, 0, pml*nx*nz*sizeof(float));
cudaMemset(wf33, 0, pml*nx*nz*sizeof(float));
cudaMemset(pf1, 0, pml*nx*nz*sizeof(float));
cudaMemset(pf2, 0, pml*nx*nz*sizeof(float));
cudaMemset(pf3, 0, pml*nx*nz*sizeof(float));
cudaMemset(wba11, 0, pml*nx*nz*sizeof(float));
cudaMemset(wba12, 0, pml*nx*nz*sizeof(float));
cudaMemset(wba13, 0, pml*nx*nz*sizeof(float));
cudaMemset(wba21, 0, pml*nx*nz*sizeof(float));
cudaMemset(wba31, 0, pml*nx*nz*sizeof(float));
cudaMemset(wba32, 0, pml*nx*nz*sizeof(float));
cudaMemset(wba33, 0, pml*nx*nz*sizeof(float));
cudaMemset(pba1, 0, pml*nx*nz*sizeof(float));
cudaMemset(pba2, 0, pml*nx*nz*sizeof(float));
cudaMemset(pba3, 0, pml*nx*nz*sizeof(float));
*/
cudaMemcpy(d_vp, vp, nx*ny*nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_epsilon, epsilon, nx*ny*nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_delta, delta, nx*ny*nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, (M/2+1)*(M/2+1)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_c2, c2, (M/2+1)*(M/2+1)*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_source, source, nx*ny*nz*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_dlr, dlr, pml*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_ddlr, ddlr, pml*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_dtb, dtb, pml*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_ddtb, ddtb, pml*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_dfb, dfb, pml*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_ddfb, ddfb, pml*sizeof(float), cudaMemcpyHostToDevice);
for(k=0;k<nt;k++)
{
if(k%100==0)
printf("nt = %d\n",k);
grad<<<grid,block>>>(u2, ux, uy, uz, d_c, nx, ny, nz, dx, dy, dz);
cudaMemcpy(h_ux, ux, nx*ny*nz*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_uy, uy, nx*ny*nz*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_uz, uz, nx*ny*nz*sizeof(float), cudaMemcpyDeviceToHost);
uxMax = absMaxval(h_ux, nx, ny, nz);
uyMax = absMaxval(h_uy, nx, ny, nz);
uzMax = absMaxval(h_uz, nx, ny, nz);
uxyzMax = max(uxMax, uyMax);
uxyzMax = max(uxyzMax, uzMax);
//打印uxyzMax
printf("uxyzMax=%4.3f\n",uxyzMax);
//calculating S operators
scalar_operator<<<grid,block>>>(uxyzMax, ux, uy, uz, d_epsilon, d_delta, S, nx, ny, nz);
//打印S GPU端(见GPU_kernel.cu)
//calculating wavefield using FD method
wavefield_update<<<grid,block>>>(d_c, d_c2, d_dlr, d_ddlr, d_dtb, d_ddtb, d_dfb, d_ddfb, d_epsilon,d_delta,
d_vp, dx, dy, dz, dt, nx, ny, nz, pml, sz, ux, uy, uz, u1, u3, u2, S,
wl11, wl12, wl13, wl21, wl31, wl32, wl33, pl1, pl2, pl3,
wr11, wr12, wr13, wr21, wr31, wr32, wr33, pr1, pr2, pr3,
wt11, wt12, wt13, wt21, wt31, wt32, wt33, pt1, pt2, pt3,
wb11, wb12, wb13, wb21, wb31, wb32, wb33, pb1, pb2, pb3,
wf11, wf12, wf13, wf21, wf31, wf32, wf33, pf1, pf2, pf3,
wba11, wba12, wba13, wba21, wba31, wba32, wba33, pba1, pba2, pba3);
addsource<<<grid,block>>>(d_source, wavelet[k], u3, nx, ny, nz);
exchange<<<grid,block>>>(nx, ny, nz, pml, u1, u2, u3,
wl11, wl12, wl13, wl31, wl32, wl33, pl1, pl2, pl3,
wr11, wr12, wr13, wr31, wr32, wr33, pr1, pr2, pr3,
wt11, wt12, wt13, wt31, wt32, wt33, pt1, pt2, pt3,
wb11, wb12, wb13, wb31, wb32, wb33, pb1, pb2, pb3,
wf11, wf12, wf13, wf31, wf32, wf33, pf1, pf2, pf3,
wba11, wba12, wba13, wba31, wba32, wba33, pba1, pba2, pba3);
// seismic fullwavefield and record
//wavefield_output<<<grid,block>>>(u2, d_u, &d_record[k*(nx-2*pml)*(ny-2*pml)], nx, ny, nz, sz, pml);
cudaMemcpy(h_u2, u2, nx*ny*nz*sizeof(float), cudaMemcpyDeviceToHost);
//打印 h_u2
// for(int i=0;i<nz;i++){
// for(int j=0;j<nx;j++){
// for(int k=0;k<ny;k++){
// if(h_u2[i*nx*ny+j*ny+k]>0.0001 || h_u2[i*nx*ny+j*ny+k]<-0.0001)
// printf("h_u2[xxx]=%4.3f ",h_u2[i*nx*ny+j*ny+k]);
// }
// }
// }
for(i=pml;i<nx-pml;i++)
for(j=pml;j<ny-pml;j++)
{
record[k*(nx-2*pml)*(ny-2*pml)+(i-pml)*(ny-2*pml)+j-pml] = h_u2[sz*nx*ny+i*ny+j];
}
if(snapflag ==1 && k%ntsnap==0)
{
sprintf(snapname,"%s%d.dat", snap_file, k);
sprintf(snapxzname,"%s_xz%d.dat", snap_file, k);
sprintf(snapyzname,"%s_yz%d.dat", snap_file, k);
sprintf(snapxyname,"%s_xy%d.dat", snap_file, k);
// cudaMemcpy(snap, d_u, (nz-2*pml)*(nx-2*pml)*(ny-2*pml)*sizeof(float), cudaMemcpyDeviceToHost);
for(i=pml;i<nz-pml;i++)
for(j=pml;j<nx-pml;j++)
for(l=pml;l<ny-pml;l++)
snap[(i-pml)*(nx-2*pml)*(ny-2*pml)+(j-pml)*(ny-2*pml)+l-pml] = h_u2[i*nx*ny+j*ny+l];
writefile_3d(snapname, snap, nz-2*pml, nx-2*pml, ny-2*pml);
for(i=0;i<nz-2*pml;i++)
for(j=0;j<nx-2*pml;j++)
for(l=0;l<ny-2*pml;l++)
{
if(l==(ny-2*pml-1)/2)
{
snapxz[i*(nx-2*pml)+j] = snap[i*(nx-2*pml)*(ny-2*pml)+j*(ny-2*pml)+l];
}
if(j==(nx-2*pml-1)/2)
{
snapyz[i*(ny-2*pml)+l] = snap[i*(nx-2*pml)*(ny-2*pml)+j*(ny-2*pml)+l];
}
if(i==(nz-2*pml-1)/2)
{
snapxy[j*(ny-2*pml)+l] = snap[i*(nx-2*pml)*(ny-2*pml)+j*(ny-2*pml)+l];
}
}
writefile_2d(snapxzname, snapxz, nz-2*pml, nx-2*pml);
writefile_2d(snapyzname, snapyz, nz-2*pml, ny-2*pml);
writefile_2d(snapxyname, snapxy, nx-2*pml, ny-2*pml);
}
// printf("%f\n",absMaxval(snap, nx-2*pml, ny-2*pml, nz-2*pml));
/* sprintf(snapname_S,"%s_S%d.dat", snap_file, k);
sprintf(snapSxzname,"%s_Sxz%d.dat", snap_file, k);
sprintf(snapSyzname,"%s_Syz%d.dat", snap_file, k);
sprintf(snapSxyname,"%s_Sxy%d.dat", snap_file, k);
cudaMemcpy(snapS, S, nx*ny*nz*sizeof(float), cudaMemcpyDeviceToHost);
writefile_3d(snapname_S, snapS, nz, nx, ny);
for(i=0;i<nz;i++)
for(j=0;j<nx;j++)
for(l=0;l<ny;l++)
{
if(l==(ny-1)/2)
{
snapSxz[i*nx+j] = snapS[i*nx*ny+j*ny+l];
}
if(j==(nx-1)/2)
{
snapSyz[i*ny+l] = snapS[i*nx*ny+j*ny+l];
}
if(i==(nz-1)/2)
{
snapSxy[j*ny+l] = snapS[i*nx*ny+j*ny+l];
}
}
writefile_2d(snapSxzname, snapSxz, nz, nx);
writefile_2d(snapSyzname, snapSyz, nz, ny);
writefile_2d(snapSxyname, snapSxy, nx, ny);*/
// }
}
// cudaMemcpy(record, d_record, nt*(nx-2*pml)*(ny-2*pml)*sizeof(float), cudaMemcpyDeviceToHost);
//free device memory
cudaFree(d_vp);cudaFree(d_epsilon);cudaFree(d_delta);cudaFree(d_c);cudaFree(d_c2);
cudaFree(d_dlr);cudaFree(d_ddlr);cudaFree(d_dtb);cudaFree(d_ddtb);cudaFree(d_dfb);cudaFree(d_ddfb);
cudaFree(d_source);cudaFree(S);cudaFree(u1);cudaFree(u2);cudaFree(u3);
cudaFree(ux);cudaFree(uy);cudaFree(uz);cudaFree(d_record);cudaFree(d_u);
cudaFree(wl11);cudaFree(wl12);cudaFree(wl13);cudaFree(wl21);
cudaFree(wl31);cudaFree(wl32);cudaFree(wl33);cudaFree(pl1);
cudaFree(pl2);cudaFree(pl3);
cudaFree(wr11);cudaFree(wr12);cudaFree(wr13);cudaFree(wr21);
cudaFree(wr31);cudaFree(wr32);cudaFree(wr33);cudaFree(pr1);
cudaFree(pr2);cudaFree(pr3);
cudaFree(wt11);cudaFree(wt12);cudaFree(wt13);cudaFree(wt21);
cudaFree(wt31);cudaFree(wt32);cudaFree(wt33);cudaFree(pt1);
cudaFree(pt2);cudaFree(pt3);
cudaFree(wb11);cudaFree(wb12);cudaFree(wb13);cudaFree(wb21);
cudaFree(wb31);cudaFree(wb32);cudaFree(wb33);cudaFree(pb1);
cudaFree(pb2);cudaFree(pb3);
cudaFree(wf11);cudaFree(wf12);cudaFree(wf13);cudaFree(wf21);
cudaFree(wf31);cudaFree(wf32);cudaFree(wf33);cudaFree(pf1);
cudaFree(pf2);cudaFree(pf3);
cudaFree(wba11);cudaFree(wba12);cudaFree(wba13);cudaFree(wba21);
cudaFree(wba31);cudaFree(wba32);cudaFree(wba33);cudaFree(pba1);
cudaFree(pba2);cudaFree(pba3);
free(h_ux); free(h_uy); free(h_uz); free(snap); free(snapxz);free(snapyz);free(snapxy);
free(snapS);free(snapSxz);free(snapSyz);free(snapSxy);
endtime = clock();
timespent=(float)(endtime-starttime)/CLOCKS_PER_SEC;
printf("Singshot modeling time-assuming is %f s.\n",timespent);
}
|
88de94a0f4bb529e68c5d66193c7ed046c556c84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
============================================================================
Name : NTTFFTCUDA.cu
Author : Owen
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <stdint.h>
#include <math.h>
static void CheckCudaErrorAux (const char *, unsigned, const char *, hipError_t);
void ProcessandTime(const int size, const int bpe, const int thlimit);
void initialize(uint32_t* arr, int mod, const int size);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
/**
* CUDA kernel that computes the integer FFT of an array of unsigned integers
*/
// both of the following constants must be a power of two
// the first part takes care of the first stages before the shuffling and the
// element transfer from shared memory to global memory
__global__ void MulSMNTTKernel1(uint32_t *data, const int NTT_SIZE, const short gridSizeSh, short curr_k, short k, int mod, uint32_t* g, bool repos) {
unsigned short idx = threadIdx.x;
unsigned short b = blockIdx.x;
const int BLOCK_SIZE = blockDim.x; // the offset given for the second part of shared memory
if (idx < BLOCK_SIZE){ // for maximum performance, the last idx should be a multiple of 32 (minus 1 since it starts from 0)
uint32_t tempvar; // a temporary variable to manage subtractions and hold temporary data
short i = idx >> 1;
short j = idx & 1; // even or odd thread
// the following three variables are used in the twiddle factor index computation
short l = k - curr_k - 1;
short m = i & 1; // helps in indexing for each thread in a block
short b_twid = b << (l - 1); // twiddle factor indexing for each block
short elem = b + (idx << gridSizeSh);
//short elem = idx + (b << l) + ((BLOCK_SIZE >> gridSizeSh) * (i >> (l-1)));
short p, tmp, x, offx, x1, twid_1, twid_2, twid_index;
// sign takes care of the operations for even (addition)
// and odd (subtraction) threads
short sign = ((-2) * j) + 1; // can be 1 or -1
// temporary array to store intermediate data
extern __shared__ uint32_t final_temp[];
extern __shared__ uint32_t temp_data[];
// copy data from global memory to shared memory
final_temp[idx] = data[elem];
__syncthreads(); // ensure that the shared memory is properly populated
// stg is the stage (or epoch)
// k is the number of stages in the computation
for(short stg = 1; stg <= curr_k; stg++){
p = BLOCK_SIZE >> stg; // this variable will help in indexing
// indexing
tmp = i + ((i >> (k - curr_k - stg)) * (1 << (k - curr_k - stg)));
x = tmp + (j * p);
offx = BLOCK_SIZE + x; // use this to index second part of shared memory (temp_data[])
x1 = x + (sign * p);
if(repos){
twid_1 = (b + (i << gridSizeSh)) * (curr_k - stg); // twiddle for stage 1
twid_2 = ((l - 1) * ((!m * b_twid) + (m * (b_twid + BLOCK_SIZE))) + (curr_k - l) * (b << l)) * (stg - 1); // twiddle for stage 2
// twid_index manages the indexing of the twiddle factors
twid_index = twid_1 + twid_2;
}
// since the value should be unsigned, a subtraction cannot result in a negative number
// so we add the modulus to the number being subtracted to prevent that from happening
tempvar = final_temp[x1] + mod;
// addition and subtraction is taken care of here
// modulus is done after addition/subtraction
temp_data[offx] = (tempvar + (sign * final_temp[x])) % mod;
// shift by twiddle factor and perform modulus
if(repos){
temp_data[offx] <<= j * g[twid_index];
temp_data[offx] %= mod;
}
final_temp[x] = temp_data[offx];
__syncthreads();
}
// shuffle data from shared to global memory
data[elem] = final_temp[idx];
}
}
// the first part takes care of the first stages before the shuffling and the
// element transfer from shared memory to global memory
__global__ void MulSMNTTKernel2(uint32_t *data, int NTT_SIZE, const short gridSizeSh, short curr_k, short k, int mod, uint32_t* g, bool repos) {
unsigned short idx = threadIdx.x;
unsigned short b = blockIdx.x;
unsigned short glob_idx = b * blockDim.x + idx;
const int BLOCK_SIZE = blockDim.x; // the offset given for the second part of shared memory
if (idx < BLOCK_SIZE){ // for maximum performance, the last idx should be a multiple of 32 (minus 1 since it starts from 0)
uint32_t tempvar; // a temporary variable to manage subtractions and hold temporary data
short i = idx >> 1;
short j = idx & 1; // even or odd thread
short p, tmp, x, offx, x1, twid_index;
// sign takes care of the operations for even (addition)
// and odd (subtraction) threads
short sign = ((-2) * j) + 1; // can be 1 or -1
// temporary array to store intermediate data
extern __shared__ uint32_t final_temp[];
extern __shared__ uint32_t temp_data[];
// copy data from global memory to shared memory
final_temp[idx] = data[glob_idx];
__syncthreads(); // ensure that the shared memory is properly populated
// stg is the stage (or epoch)
// k is the number of stages in the computation
for(short stg = curr_k; stg <= k; stg++){
p = NTT_SIZE >> stg; // this variable will help in indexing
// indexing
tmp = i + ((i >> (k - stg)) * (1 << (k - stg)));
x = tmp + (j * p);
offx = BLOCK_SIZE + x;
x1 = x + (sign * p);
// since the value should be unsigned, a subtraction cannot result in a negative number
// so we add the modulus to the number being subtracted to prevent that from happening
tempvar = final_temp[x1] + mod;
// addition and subtraction is taken care of here
// modulus is done after addition/subtraction
temp_data[offx] = (tempvar + (sign * final_temp[x])) % mod;
// shift by twiddle factor and perform modulus
if(repos){
// twid_index manages the indexing of the twiddle factors
twid_index = (i % p) << (stg - 1);
temp_data[offx] <<= j * g[twid_index];
temp_data[offx] %= mod;
}
final_temp[x] = temp_data[offx];
__syncthreads();
}
// write finished data from shared to global memory
data[glob_idx] = final_temp[idx];
}
}
// kernel for the inverse NTT FFT
__global__ void MulSMINTTKernel1(uint32_t *data, const int NTT_SIZE, const int BITS_PER_ELEM, const short gridSizeSh, short curr_k, short k, int mod, uint32_t* g, bool repos, bool fin) {
unsigned short idx = threadIdx.x;
unsigned short b = blockIdx.x;
unsigned short glob_idx = b * blockDim.x + idx;
const int BLOCK_SIZE = NTT_SIZE >> gridSizeSh; // the offset given for the second part of shared memory
if (idx < BLOCK_SIZE){ // for maximum performance, the last idx should be a multiple of 32 (minus 1 since it starts from 0)
uint32_t tempvar; // a temporary variable to manage subtractions and hold temporary data
short i = idx >> 1;
short j = idx & 1; // even or odd thread
short p, tmp, x, offx, x1, twid_index;
// sign takes care of the operations for even (addition)
// and odd (subtraction) threads
short sign = ((-2) * j) + 1; // can be 1 or -1
// temporary array to store intermediate data
extern __shared__ uint32_t final_temp[];
extern __shared__ uint32_t temp_data[];
// copy data from global memory to shared memory
final_temp[idx] = data[glob_idx];
__syncthreads(); // ensure that the shared memory is properly populated
// stg is the stage (or epoch)
// k is the number of stages in the computation
for(short stg = k; stg >= curr_k; stg--){
p = NTT_SIZE >> stg; // this variable will help in indexing
// indexing
tmp = i + ((i >> (k - stg)) * (1 << (k - stg)));
x = tmp + (j * p);
offx = BLOCK_SIZE + x;
x1 = x + (sign * p);
if(repos){
// twid_index manages the indexing of the twiddle factors
twid_index = (i % p) << (stg - 1);
// shift by twiddle factor and perform modulus
tempvar = (final_temp[x1] << (!j * g[twid_index])) % mod;
temp_data[offx] = final_temp[x] << (j * g[twid_index]);
temp_data[offx] %= mod;
}
else{
tempvar = final_temp[x1];
temp_data[offx] = final_temp[x];
}
// since the value should be unsigned, a subtraction cannot result in a negative number
// so we add the modulus to the number being subtracted to prevent that from happening
tempvar += mod;
// addition and subtraction is taken care of here
// modulus is done after addition/subtraction
temp_data[offx] = (tempvar + (sign * temp_data[offx])) % mod;
final_temp[x] = temp_data[offx];
// new data is ready for next stage
__syncthreads();
}
if(fin){ // if there is only 1 SM
// divide each element by N
short t = (BITS_PER_ELEM << 1) - k; // convert division into multiplication
uint32_t ls, rs;
short size = sizeof(uint32_t) << 3; // multiply 4 bytes by 8 in this case (32 bits)
bool rt_shift = false; // should the shift be to the right and rotate bits?
if(t < 0){ // if t is negative, all shifts are to the right
uint32_t mask = t >> (size - 1); // make a mask of the sign bit
t ^= mask; // toggle the bits
t += mask & 1; // add one
rt_shift = true;
}
short temp_sh;
short shift = size - (BITS_PER_ELEM + 1);
// shifting will be done in an optimized manner
if (shift > t)
shift = t; // if t is less than the maximum shift amount, then assign the shift amount to be t
temp_sh = shift;
for (short m = 0; m < t; m += shift) {
shift = temp_sh; // assign the shift value from the previous iteration
// there is no thread divergence here since all threads execute the same branch
if(!rt_shift){ // normal left shift
final_temp[idx] <<= shift;
}
else{ // right shift and bit rotation
rs = final_temp[idx] >> shift;
ls = final_temp[idx] << (size - shift);
final_temp[idx] = rs | ls;
}
final_temp[idx] %= mod;
if (t - shift <= (BITS_PER_ELEM - 1))
temp_sh = t - shift; // all remaining shifts will be done after modulus
}
// copy final data from shared memory to global memory
if(repos)
data[(NTT_SIZE - glob_idx) % NTT_SIZE] = final_temp[idx];
else
data[glob_idx] = final_temp[idx];
}
else{
// copy from shared memory to global memory in a coalesced manner
data[glob_idx] = final_temp[idx];
}
}
}
__global__ void MulSMINTTKernel2(uint32_t *data, const int NTT_SIZE, const int BITS_PER_ELEM, const short gridSizeSh, short curr_k, short k, int mod, uint32_t* g, bool repos) {
unsigned short idx = threadIdx.x;
unsigned short b = blockIdx.x;
const int BLOCK_SIZE = NTT_SIZE >> gridSizeSh; // the offset given for the second part of shared memory
if (idx < NTT_SIZE >> gridSizeSh){ // for maximum performance, the last idx should be a multiple of 32 (minus 1 since it starts from 0)
uint32_t tempvar; // a temporary variable to manage subtractions and hold temporary data
short i = idx >> 1;
short j = idx & 1; // even or odd thread
// the following three variables are used in the twiddle factor index computation
short l = k - curr_k - 1;
short m = i & 1;
short b_twid = b << (l - 1); // twiddle factor indexing for each block
short elem = b + (idx << gridSizeSh);
//short elem = idx + (b << l) + ((BLOCK_SIZE >> gridSizeSh) * (i >> (l-1)));
short p, tmp, x, offx, x1, twid_1, twid_2, twid_index;
// sign takes care of the operations for even (addition)
// and odd (subtraction) threads
short sign = ((-2) * j) + 1; // can be 1 or -1
// temporary array to store intermediate data
extern __shared__ uint32_t final_temp[];
extern __shared__ uint32_t temp_data[];
// copy data from global memory to shared memory
final_temp[idx] = data[elem];
__syncthreads(); // ensure that the shared memory is properly populated
// stg is the stage (or epoch)
// k is the number of stages in the computation
for(short stg = curr_k; stg >= 1; stg--){
p = BLOCK_SIZE >> stg; // this variable will help in indexing
// indexing
tmp = i + ((i >> (k - curr_k - stg)) * (1 << (k - curr_k - stg)));
x = tmp + (j * p);
offx = BLOCK_SIZE + x;
x1 = x + (sign * p);
if(repos){
twid_1 = (b + (i << gridSizeSh)) * (curr_k - stg); // twiddle for stage 1
twid_2 = ((l - 1) * ((!m * b_twid) + (m * (b_twid + BLOCK_SIZE))) + (curr_k - l) * (b << l)) * (stg - 1); // twiddle for stage 2
// twid_index manages the indexing of the twiddle factors
twid_index = twid_1 + twid_2;
// shift by twiddle factor exponent and perform modulus
tempvar = (final_temp[x1] << (!j * g[twid_index])) % mod;
temp_data[offx] = final_temp[x] << (j * g[twid_index]);
temp_data[offx] %= mod;
}
else{
tempvar = final_temp[x1];
temp_data[offx] = final_temp[x];
}
// since the value should be unsigned, a subtraction cannot result in a negative number
// so we add the modulus to the number being subtracted to prevent that from happening
tempvar += mod;
// addition and subtraction is taken care of here
// modulus is done after addition/subtraction
temp_data[offx] = (tempvar + (sign * temp_data[offx])) % mod;
final_temp[x] = temp_data[offx];
// new data is ready for next stage
__syncthreads();
}
// divide each element by N
short t = (BITS_PER_ELEM << 1) - k; // convert division into multiplication
uint32_t ls, rs;
short size = sizeof(uint32_t) << 3; // multiply 4 bytes by 8 in this case (32 bits)
bool rt_shift = false; // should the shift be to the right and rotate bits?
if(t < 0){ // if t is negative, all shifts are to the right
uint32_t mask = t >> (size - 1); // make a mask of the sign bit
t ^= mask; // toggle the bits
t += mask & 1; // add one
rt_shift = true;
}
short temp_sh;
short shift = size - (BITS_PER_ELEM + 1);
// shifting will be done in an optimized manner
if (shift > t)
shift = t; // if t is less than the maximum shift amount, then assign the shift amount to be t
temp_sh = shift;
for (short m = 0; m < t; m += shift) {
shift = temp_sh; // assign the shift value from the previous iteration
// there is no thread divergence here since all threads execute the same branch
if(!rt_shift){ // normal left shift
final_temp[idx] <<= shift;
}
else{ // right shift and bit rotation
rs = final_temp[idx] >> shift;
ls = final_temp[idx] << (size - shift);
final_temp[idx] = rs | ls;
}
final_temp[idx] %= mod;
if (t - shift <= (BITS_PER_ELEM - 1))
temp_sh = t - shift; // all remaining shifts will be done after modulus
}
// copy final data from shared memory to global memory
if(repos)
data[(NTT_SIZE - elem) % NTT_SIZE] = final_temp[idx];
else
data[elem] = final_temp[idx];
}
}
/*! \name GPU function caller and process timing function */
void ProcessandTime(const int size, const int bpe, const int thlimit){
// define NTT variables
double intervalNTT, intervalINTT;
float NTTms, iNTTms;
// declare and create events
hipEvent_t NTTstart, NTTstop, INTTstart, INTTstop;
hipEventCreate(&NTTstart);
hipEventCreate(&NTTstop);
hipEventCreate(&INTTstart);
hipEventCreate(&INTTstop);
int kt = log2((double)size); // actual k
int i; // index for traversing through the arrays
int modulus = (1 << bpe) + 1; // prime number for modulo arithmetic
int rt_unity = 2 * bpe / size; // calculate # bits for root of unity
// these flags determine whether the INTT repositions the elements
// and whether the result of the INTT matches the input array or not, respectively
bool repos_flag = true, flag = true;
int runs = 1E3; // number of times each kernel function is executed
uint32_t* in, *NTT_out, *INTT_out, *twiddle, *gpuNTTData, *gpuINTTData, *gpuTwid;
CUDA_CHECK_RETURN(hipHostMalloc((void **)&in, sizeof(uint32_t)*size, hipHostMallocDefault));
CUDA_CHECK_RETURN(hipHostMalloc((void **)&NTT_out, sizeof(uint32_t)*size, hipHostMallocDefault));
CUDA_CHECK_RETURN(hipHostMalloc((void **)&INTT_out, sizeof(uint32_t)*size, hipHostMallocDefault));
CUDA_CHECK_RETURN(hipHostMalloc((void **)&twiddle, sizeof(uint32_t)*size / 2, hipHostMallocDefault));
CUDA_CHECK_RETURN(hipMalloc((void **)&gpuNTTData, sizeof(uint32_t) * size));
CUDA_CHECK_RETURN(hipMalloc((void **)&gpuTwid, sizeof(uint32_t)*size / 2));
CUDA_CHECK_RETURN(hipMalloc((void **)&gpuINTTData, sizeof(uint32_t)*size));
if(!rt_unity)
repos_flag = false;
for(int i = 0; i < size / 2; i++){ // pre-compute twiddle factor array
twiddle[i] = rt_unity * i;
}
int blk_size;
bool isSingleSM = false;
if(size <= thlimit){
blk_size = size;
isSingleSM = true;
}
else
blk_size = thlimit;
static const int BLOCK_SIZE = blk_size; // amount of threads in each block
const int blockCount = (size) / BLOCK_SIZE; // amount of blocks in a grid
const short gridSizeSh = logf(blockCount) / logf(2);
short k = log2((double)blockCount); // first kernel covers first k stages only out of kt for FFT, and vice-versa for IFFT kernels
std::cout << "Launching kernels with " << blockCount << " block(s), each with " << BLOCK_SIZE << " threads." << std::endl;
srand(time(NULL)); // generate the seed for the pseudo-random number generator
for (int j = 0; j < runs; j++) {
initialize(in, modulus, size);
CUDA_CHECK_RETURN(hipMemcpy(gpuNTTData, in, sizeof(uint32_t)*size, hipMemcpyHostToDevice));
CUDA_CHECK_RETURN(hipMemcpy(gpuTwid, twiddle, sizeof(uint32_t)*size/2, hipMemcpyHostToDevice));
hipEventRecord(NTTstart);
if(!isSingleSM){
// computes first k stages and shuffles data from shared memory to global memory
hipLaunchKernelGGL(( MulSMNTTKernel1), dim3(blockCount), dim3(BLOCK_SIZE), 2 * BLOCK_SIZE * sizeof(uint32_t), 0, gpuNTTData, size, gridSizeSh, k, kt, modulus, gpuTwid, repos_flag);
}
// shuffles data from global memory to shared memory and computes last set of stages
hipLaunchKernelGGL(( MulSMNTTKernel2), dim3(blockCount), dim3(BLOCK_SIZE), 2 * BLOCK_SIZE * sizeof(uint32_t), 0, gpuNTTData, size, gridSizeSh, k + 1, kt, modulus, gpuTwid, repos_flag);
hipEventRecord(NTTstop);
CUDA_CHECK_RETURN(hipMemcpy(NTT_out, gpuNTTData, sizeof(uint32_t) * size, hipMemcpyDeviceToHost));
CUDA_CHECK_RETURN(hipMemcpy(gpuINTTData, NTT_out, sizeof(uint32_t)*size, hipMemcpyHostToDevice));
hipEventSynchronize(NTTstop);
hipEventElapsedTime(&NTTms, NTTstart, NTTstop);
hipEventRecord(INTTstart);
// computes first stages and shuffles data from shared memory to global memory
hipLaunchKernelGGL(( MulSMINTTKernel1), dim3(blockCount), dim3(BLOCK_SIZE), 2 * BLOCK_SIZE * sizeof(uint32_t), 0, gpuINTTData, size, bpe, gridSizeSh, k + 1, kt, modulus, gpuTwid, repos_flag, isSingleSM);
if(!isSingleSM){
// shuffles data from global memory to shared memory and computes last set of stages
hipLaunchKernelGGL(( MulSMINTTKernel2), dim3(blockCount), dim3(BLOCK_SIZE), 2 * BLOCK_SIZE * sizeof(uint32_t), 0, gpuINTTData, size, bpe, gridSizeSh, k, kt, modulus, gpuTwid, repos_flag);
}
hipEventRecord(INTTstop);
CUDA_CHECK_RETURN(hipMemcpy(INTT_out, gpuINTTData, sizeof(uint32_t)*size, hipMemcpyDeviceToHost));
hipEventSynchronize(INTTstop);
hipEventElapsedTime(&iNTTms, INTTstart, INTTstop);
intervalNTT += NTTms;
intervalINTT += iNTTms;
for (i = 0; i < size; i++) {
if (in[i] != INTT_out[i]) {
flag = false;
break;
}
}
if (!flag) {
std::cout << "j = " << j << ", i = " << i << "\nin = " << in[i] << ", out = " << INTT_out[i] << std::endl;
break;
}
}
if (flag){
std::cout << "NTT matched. Average time taken for NTT is: " << intervalNTT << " microseconds" << std::endl;
std::cout << "Average time taken for INTT is: " << intervalINTT << " microseconds" << std::endl;
}
// clean up
hipEventDestroy(NTTstart);
hipEventDestroy(NTTstop);
hipEventDestroy(INTTstart);
hipEventDestroy(INTTstop);
CUDA_CHECK_RETURN(hipFree(gpuNTTData));
CUDA_CHECK_RETURN(hipFree(gpuINTTData));
CUDA_CHECK_RETURN(hipFree(gpuTwid));
CUDA_CHECK_RETURN(hipHostFree(in));
CUDA_CHECK_RETURN(hipHostFree(NTT_out));
CUDA_CHECK_RETURN(hipHostFree(INTT_out));
CUDA_CHECK_RETURN(hipHostFree(twiddle));
return;
}
// @}
/* \name Array initializer function */
void initialize(uint32_t* in, int mod, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (uint32_t)(rand() % mod);
}
}
// @}
int main(int argc, char *argv[])
{
std::cerr << "NTT FFT" << std::endl;
const int NTT_SIZE = atoi(argv[1]); // number of elements to be processed (N)
const int BITS_PER_ELEM = atoi(argv[2]); // number of bits to represent one element (n)
const int TH_LIM = atoi(argv[3]); // maximum number of threads allowable per kernel
double chk1 = log2((double)NTT_SIZE);
double chk2 = log2((double)BITS_PER_ELEM);
double chk3 = (double)logf(TH_LIM) / logf(2);
// check whether all user parameters are valid
if (argc != 4 || chk1 != round(chk1) || chk2 != round(chk2) || chk3 != round(chk3) || TH_LIM < 0 || TH_LIM > 1024 || BITS_PER_ELEM > 16
|| NTT_SIZE/TH_LIM > TH_LIM || NTT_SIZE <= 1 || BITS_PER_ELEM <= 1)
{
std::cerr << "Usage: " << argv[0]
<< " <NTT size> <Bits per element> <thread limit per block>" << std::endl;
exit(1);
}
std::cout << "Computing..." << std::endl;
ProcessandTime(NTT_SIZE, BITS_PER_ELEM, TH_LIM);
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err)
{
if (err == hipSuccess)
return;
std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
| 88de94a0f4bb529e68c5d66193c7ed046c556c84.cu | /*
============================================================================
Name : NTTFFTCUDA.cu
Author : Owen
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <stdint.h>
#include <math.h>
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
void ProcessandTime(const int size, const int bpe, const int thlimit);
void initialize(uint32_t* arr, int mod, const int size);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
/**
* CUDA kernel that computes the integer FFT of an array of unsigned integers
*/
// both of the following constants must be a power of two
// the first part takes care of the first stages before the shuffling and the
// element transfer from shared memory to global memory
__global__ void MulSMNTTKernel1(uint32_t *data, const int NTT_SIZE, const short gridSizeSh, short curr_k, short k, int mod, uint32_t* g, bool repos) {
unsigned short idx = threadIdx.x;
unsigned short b = blockIdx.x;
const int BLOCK_SIZE = blockDim.x; // the offset given for the second part of shared memory
if (idx < BLOCK_SIZE){ // for maximum performance, the last idx should be a multiple of 32 (minus 1 since it starts from 0)
uint32_t tempvar; // a temporary variable to manage subtractions and hold temporary data
short i = idx >> 1;
short j = idx & 1; // even or odd thread
// the following three variables are used in the twiddle factor index computation
short l = k - curr_k - 1;
short m = i & 1; // helps in indexing for each thread in a block
short b_twid = b << (l - 1); // twiddle factor indexing for each block
short elem = b + (idx << gridSizeSh);
//short elem = idx + (b << l) + ((BLOCK_SIZE >> gridSizeSh) * (i >> (l-1)));
short p, tmp, x, offx, x1, twid_1, twid_2, twid_index;
// sign takes care of the operations for even (addition)
// and odd (subtraction) threads
short sign = ((-2) * j) + 1; // can be 1 or -1
// temporary array to store intermediate data
extern __shared__ uint32_t final_temp[];
extern __shared__ uint32_t temp_data[];
// copy data from global memory to shared memory
final_temp[idx] = data[elem];
__syncthreads(); // ensure that the shared memory is properly populated
// stg is the stage (or epoch)
// k is the number of stages in the computation
for(short stg = 1; stg <= curr_k; stg++){
p = BLOCK_SIZE >> stg; // this variable will help in indexing
// indexing
tmp = i + ((i >> (k - curr_k - stg)) * (1 << (k - curr_k - stg)));
x = tmp + (j * p);
offx = BLOCK_SIZE + x; // use this to index second part of shared memory (temp_data[])
x1 = x + (sign * p);
if(repos){
twid_1 = (b + (i << gridSizeSh)) * (curr_k - stg); // twiddle for stage 1
twid_2 = ((l - 1) * ((!m * b_twid) + (m * (b_twid + BLOCK_SIZE))) + (curr_k - l) * (b << l)) * (stg - 1); // twiddle for stage 2
// twid_index manages the indexing of the twiddle factors
twid_index = twid_1 + twid_2;
}
// since the value should be unsigned, a subtraction cannot result in a negative number
// so we add the modulus to the number being subtracted to prevent that from happening
tempvar = final_temp[x1] + mod;
// addition and subtraction is taken care of here
// modulus is done after addition/subtraction
temp_data[offx] = (tempvar + (sign * final_temp[x])) % mod;
// shift by twiddle factor and perform modulus
if(repos){
temp_data[offx] <<= j * g[twid_index];
temp_data[offx] %= mod;
}
final_temp[x] = temp_data[offx];
__syncthreads();
}
// shuffle data from shared to global memory
data[elem] = final_temp[idx];
}
}
// the first part takes care of the first stages before the shuffling and the
// element transfer from shared memory to global memory
__global__ void MulSMNTTKernel2(uint32_t *data, int NTT_SIZE, const short gridSizeSh, short curr_k, short k, int mod, uint32_t* g, bool repos) {
unsigned short idx = threadIdx.x;
unsigned short b = blockIdx.x;
unsigned short glob_idx = b * blockDim.x + idx;
const int BLOCK_SIZE = blockDim.x; // the offset given for the second part of shared memory
if (idx < BLOCK_SIZE){ // for maximum performance, the last idx should be a multiple of 32 (minus 1 since it starts from 0)
uint32_t tempvar; // a temporary variable to manage subtractions and hold temporary data
short i = idx >> 1;
short j = idx & 1; // even or odd thread
short p, tmp, x, offx, x1, twid_index;
// sign takes care of the operations for even (addition)
// and odd (subtraction) threads
short sign = ((-2) * j) + 1; // can be 1 or -1
// temporary array to store intermediate data
extern __shared__ uint32_t final_temp[];
extern __shared__ uint32_t temp_data[];
// copy data from global memory to shared memory
final_temp[idx] = data[glob_idx];
__syncthreads(); // ensure that the shared memory is properly populated
// stg is the stage (or epoch)
// k is the number of stages in the computation
for(short stg = curr_k; stg <= k; stg++){
p = NTT_SIZE >> stg; // this variable will help in indexing
// indexing
tmp = i + ((i >> (k - stg)) * (1 << (k - stg)));
x = tmp + (j * p);
offx = BLOCK_SIZE + x;
x1 = x + (sign * p);
// since the value should be unsigned, a subtraction cannot result in a negative number
// so we add the modulus to the number being subtracted to prevent that from happening
tempvar = final_temp[x1] + mod;
// addition and subtraction is taken care of here
// modulus is done after addition/subtraction
temp_data[offx] = (tempvar + (sign * final_temp[x])) % mod;
// shift by twiddle factor and perform modulus
if(repos){
// twid_index manages the indexing of the twiddle factors
twid_index = (i % p) << (stg - 1);
temp_data[offx] <<= j * g[twid_index];
temp_data[offx] %= mod;
}
final_temp[x] = temp_data[offx];
__syncthreads();
}
// write finished data from shared to global memory
data[glob_idx] = final_temp[idx];
}
}
// kernel for the inverse NTT FFT
__global__ void MulSMINTTKernel1(uint32_t *data, const int NTT_SIZE, const int BITS_PER_ELEM, const short gridSizeSh, short curr_k, short k, int mod, uint32_t* g, bool repos, bool fin) {
unsigned short idx = threadIdx.x;
unsigned short b = blockIdx.x;
unsigned short glob_idx = b * blockDim.x + idx;
const int BLOCK_SIZE = NTT_SIZE >> gridSizeSh; // the offset given for the second part of shared memory
if (idx < BLOCK_SIZE){ // for maximum performance, the last idx should be a multiple of 32 (minus 1 since it starts from 0)
uint32_t tempvar; // a temporary variable to manage subtractions and hold temporary data
short i = idx >> 1;
short j = idx & 1; // even or odd thread
short p, tmp, x, offx, x1, twid_index;
// sign takes care of the operations for even (addition)
// and odd (subtraction) threads
short sign = ((-2) * j) + 1; // can be 1 or -1
// temporary array to store intermediate data
extern __shared__ uint32_t final_temp[];
extern __shared__ uint32_t temp_data[];
// copy data from global memory to shared memory
final_temp[idx] = data[glob_idx];
__syncthreads(); // ensure that the shared memory is properly populated
// stg is the stage (or epoch)
// k is the number of stages in the computation
for(short stg = k; stg >= curr_k; stg--){
p = NTT_SIZE >> stg; // this variable will help in indexing
// indexing
tmp = i + ((i >> (k - stg)) * (1 << (k - stg)));
x = tmp + (j * p);
offx = BLOCK_SIZE + x;
x1 = x + (sign * p);
if(repos){
// twid_index manages the indexing of the twiddle factors
twid_index = (i % p) << (stg - 1);
// shift by twiddle factor and perform modulus
tempvar = (final_temp[x1] << (!j * g[twid_index])) % mod;
temp_data[offx] = final_temp[x] << (j * g[twid_index]);
temp_data[offx] %= mod;
}
else{
tempvar = final_temp[x1];
temp_data[offx] = final_temp[x];
}
// since the value should be unsigned, a subtraction cannot result in a negative number
// so we add the modulus to the number being subtracted to prevent that from happening
tempvar += mod;
// addition and subtraction is taken care of here
// modulus is done after addition/subtraction
temp_data[offx] = (tempvar + (sign * temp_data[offx])) % mod;
final_temp[x] = temp_data[offx];
// new data is ready for next stage
__syncthreads();
}
if(fin){ // if there is only 1 SM
// divide each element by N
short t = (BITS_PER_ELEM << 1) - k; // convert division into multiplication
uint32_t ls, rs;
short size = sizeof(uint32_t) << 3; // multiply 4 bytes by 8 in this case (32 bits)
bool rt_shift = false; // should the shift be to the right and rotate bits?
if(t < 0){ // if t is negative, all shifts are to the right
uint32_t mask = t >> (size - 1); // make a mask of the sign bit
t ^= mask; // toggle the bits
t += mask & 1; // add one
rt_shift = true;
}
short temp_sh;
short shift = size - (BITS_PER_ELEM + 1);
// shifting will be done in an optimized manner
if (shift > t)
shift = t; // if t is less than the maximum shift amount, then assign the shift amount to be t
temp_sh = shift;
for (short m = 0; m < t; m += shift) {
shift = temp_sh; // assign the shift value from the previous iteration
// there is no thread divergence here since all threads execute the same branch
if(!rt_shift){ // normal left shift
final_temp[idx] <<= shift;
}
else{ // right shift and bit rotation
rs = final_temp[idx] >> shift;
ls = final_temp[idx] << (size - shift);
final_temp[idx] = rs | ls;
}
final_temp[idx] %= mod;
if (t - shift <= (BITS_PER_ELEM - 1))
temp_sh = t - shift; // all remaining shifts will be done after modulus
}
// copy final data from shared memory to global memory
if(repos)
data[(NTT_SIZE - glob_idx) % NTT_SIZE] = final_temp[idx];
else
data[glob_idx] = final_temp[idx];
}
else{
// copy from shared memory to global memory in a coalesced manner
data[glob_idx] = final_temp[idx];
}
}
}
__global__ void MulSMINTTKernel2(uint32_t *data, const int NTT_SIZE, const int BITS_PER_ELEM, const short gridSizeSh, short curr_k, short k, int mod, uint32_t* g, bool repos) {
unsigned short idx = threadIdx.x;
unsigned short b = blockIdx.x;
const int BLOCK_SIZE = NTT_SIZE >> gridSizeSh; // the offset given for the second part of shared memory
if (idx < NTT_SIZE >> gridSizeSh){ // for maximum performance, the last idx should be a multiple of 32 (minus 1 since it starts from 0)
uint32_t tempvar; // a temporary variable to manage subtractions and hold temporary data
short i = idx >> 1;
short j = idx & 1; // even or odd thread
// the following three variables are used in the twiddle factor index computation
short l = k - curr_k - 1;
short m = i & 1;
short b_twid = b << (l - 1); // twiddle factor indexing for each block
short elem = b + (idx << gridSizeSh);
//short elem = idx + (b << l) + ((BLOCK_SIZE >> gridSizeSh) * (i >> (l-1)));
short p, tmp, x, offx, x1, twid_1, twid_2, twid_index;
// sign takes care of the operations for even (addition)
// and odd (subtraction) threads
short sign = ((-2) * j) + 1; // can be 1 or -1
// temporary array to store intermediate data
extern __shared__ uint32_t final_temp[];
extern __shared__ uint32_t temp_data[];
// copy data from global memory to shared memory
final_temp[idx] = data[elem];
__syncthreads(); // ensure that the shared memory is properly populated
// stg is the stage (or epoch)
// k is the number of stages in the computation
for(short stg = curr_k; stg >= 1; stg--){
p = BLOCK_SIZE >> stg; // this variable will help in indexing
// indexing
tmp = i + ((i >> (k - curr_k - stg)) * (1 << (k - curr_k - stg)));
x = tmp + (j * p);
offx = BLOCK_SIZE + x;
x1 = x + (sign * p);
if(repos){
twid_1 = (b + (i << gridSizeSh)) * (curr_k - stg); // twiddle for stage 1
twid_2 = ((l - 1) * ((!m * b_twid) + (m * (b_twid + BLOCK_SIZE))) + (curr_k - l) * (b << l)) * (stg - 1); // twiddle for stage 2
// twid_index manages the indexing of the twiddle factors
twid_index = twid_1 + twid_2;
// shift by twiddle factor exponent and perform modulus
tempvar = (final_temp[x1] << (!j * g[twid_index])) % mod;
temp_data[offx] = final_temp[x] << (j * g[twid_index]);
temp_data[offx] %= mod;
}
else{
tempvar = final_temp[x1];
temp_data[offx] = final_temp[x];
}
// since the value should be unsigned, a subtraction cannot result in a negative number
// so we add the modulus to the number being subtracted to prevent that from happening
tempvar += mod;
// addition and subtraction is taken care of here
// modulus is done after addition/subtraction
temp_data[offx] = (tempvar + (sign * temp_data[offx])) % mod;
final_temp[x] = temp_data[offx];
// new data is ready for next stage
__syncthreads();
}
// divide each element by N
short t = (BITS_PER_ELEM << 1) - k; // convert division into multiplication
uint32_t ls, rs;
short size = sizeof(uint32_t) << 3; // multiply 4 bytes by 8 in this case (32 bits)
bool rt_shift = false; // should the shift be to the right and rotate bits?
if(t < 0){ // if t is negative, all shifts are to the right
uint32_t mask = t >> (size - 1); // make a mask of the sign bit
t ^= mask; // toggle the bits
t += mask & 1; // add one
rt_shift = true;
}
short temp_sh;
short shift = size - (BITS_PER_ELEM + 1);
// shifting will be done in an optimized manner
if (shift > t)
shift = t; // if t is less than the maximum shift amount, then assign the shift amount to be t
temp_sh = shift;
for (short m = 0; m < t; m += shift) {
shift = temp_sh; // assign the shift value from the previous iteration
// there is no thread divergence here since all threads execute the same branch
if(!rt_shift){ // normal left shift
final_temp[idx] <<= shift;
}
else{ // right shift and bit rotation
rs = final_temp[idx] >> shift;
ls = final_temp[idx] << (size - shift);
final_temp[idx] = rs | ls;
}
final_temp[idx] %= mod;
if (t - shift <= (BITS_PER_ELEM - 1))
temp_sh = t - shift; // all remaining shifts will be done after modulus
}
// copy final data from shared memory to global memory
if(repos)
data[(NTT_SIZE - elem) % NTT_SIZE] = final_temp[idx];
else
data[elem] = final_temp[idx];
}
}
/*! \name GPU function caller and process timing function */
void ProcessandTime(const int size, const int bpe, const int thlimit){
// define NTT variables
double intervalNTT, intervalINTT;
float NTTms, iNTTms;
// declare and create events
cudaEvent_t NTTstart, NTTstop, INTTstart, INTTstop;
cudaEventCreate(&NTTstart);
cudaEventCreate(&NTTstop);
cudaEventCreate(&INTTstart);
cudaEventCreate(&INTTstop);
int kt = log2((double)size); // actual k
int i; // index for traversing through the arrays
int modulus = (1 << bpe) + 1; // prime number for modulo arithmetic
int rt_unity = 2 * bpe / size; // calculate # bits for root of unity
// these flags determine whether the INTT repositions the elements
// and whether the result of the INTT matches the input array or not, respectively
bool repos_flag = true, flag = true;
int runs = 1E3; // number of times each kernel function is executed
uint32_t* in, *NTT_out, *INTT_out, *twiddle, *gpuNTTData, *gpuINTTData, *gpuTwid;
CUDA_CHECK_RETURN(cudaMallocHost((void **)&in, sizeof(uint32_t)*size, cudaHostAllocDefault));
CUDA_CHECK_RETURN(cudaMallocHost((void **)&NTT_out, sizeof(uint32_t)*size, cudaHostAllocDefault));
CUDA_CHECK_RETURN(cudaMallocHost((void **)&INTT_out, sizeof(uint32_t)*size, cudaHostAllocDefault));
CUDA_CHECK_RETURN(cudaMallocHost((void **)&twiddle, sizeof(uint32_t)*size / 2, cudaHostAllocDefault));
CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuNTTData, sizeof(uint32_t) * size));
CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuTwid, sizeof(uint32_t)*size / 2));
CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuINTTData, sizeof(uint32_t)*size));
if(!rt_unity)
repos_flag = false;
for(int i = 0; i < size / 2; i++){ // pre-compute twiddle factor array
twiddle[i] = rt_unity * i;
}
int blk_size;
bool isSingleSM = false;
if(size <= thlimit){
blk_size = size;
isSingleSM = true;
}
else
blk_size = thlimit;
static const int BLOCK_SIZE = blk_size; // amount of threads in each block
const int blockCount = (size) / BLOCK_SIZE; // amount of blocks in a grid
const short gridSizeSh = logf(blockCount) / logf(2);
short k = log2((double)blockCount); // first kernel covers first k stages only out of kt for FFT, and vice-versa for IFFT kernels
std::cout << "Launching kernels with " << blockCount << " block(s), each with " << BLOCK_SIZE << " threads." << std::endl;
srand(time(NULL)); // generate the seed for the pseudo-random number generator
for (int j = 0; j < runs; j++) {
initialize(in, modulus, size);
CUDA_CHECK_RETURN(cudaMemcpy(gpuNTTData, in, sizeof(uint32_t)*size, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(gpuTwid, twiddle, sizeof(uint32_t)*size/2, cudaMemcpyHostToDevice));
cudaEventRecord(NTTstart);
if(!isSingleSM){
// computes first k stages and shuffles data from shared memory to global memory
MulSMNTTKernel1<<<blockCount, BLOCK_SIZE, 2 * BLOCK_SIZE * sizeof(uint32_t)>>> (gpuNTTData, size, gridSizeSh, k, kt, modulus, gpuTwid, repos_flag);
}
// shuffles data from global memory to shared memory and computes last set of stages
MulSMNTTKernel2<<<blockCount, BLOCK_SIZE, 2 * BLOCK_SIZE * sizeof(uint32_t)>>> (gpuNTTData, size, gridSizeSh, k + 1, kt, modulus, gpuTwid, repos_flag);
cudaEventRecord(NTTstop);
CUDA_CHECK_RETURN(cudaMemcpy(NTT_out, gpuNTTData, sizeof(uint32_t) * size, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaMemcpy(gpuINTTData, NTT_out, sizeof(uint32_t)*size, cudaMemcpyHostToDevice));
cudaEventSynchronize(NTTstop);
cudaEventElapsedTime(&NTTms, NTTstart, NTTstop);
cudaEventRecord(INTTstart);
// computes first stages and shuffles data from shared memory to global memory
MulSMINTTKernel1<<<blockCount, BLOCK_SIZE, 2 * BLOCK_SIZE * sizeof(uint32_t)>>> (gpuINTTData, size, bpe, gridSizeSh, k + 1, kt, modulus, gpuTwid, repos_flag, isSingleSM);
if(!isSingleSM){
// shuffles data from global memory to shared memory and computes last set of stages
MulSMINTTKernel2<<<blockCount, BLOCK_SIZE, 2 * BLOCK_SIZE * sizeof(uint32_t)>>> (gpuINTTData, size, bpe, gridSizeSh, k, kt, modulus, gpuTwid, repos_flag);
}
cudaEventRecord(INTTstop);
CUDA_CHECK_RETURN(cudaMemcpy(INTT_out, gpuINTTData, sizeof(uint32_t)*size, cudaMemcpyDeviceToHost));
cudaEventSynchronize(INTTstop);
cudaEventElapsedTime(&iNTTms, INTTstart, INTTstop);
intervalNTT += NTTms;
intervalINTT += iNTTms;
for (i = 0; i < size; i++) {
if (in[i] != INTT_out[i]) {
flag = false;
break;
}
}
if (!flag) {
std::cout << "j = " << j << ", i = " << i << "\nin = " << in[i] << ", out = " << INTT_out[i] << std::endl;
break;
}
}
if (flag){
std::cout << "NTT matched. Average time taken for NTT is: " << intervalNTT << " microseconds" << std::endl;
std::cout << "Average time taken for INTT is: " << intervalINTT << " microseconds" << std::endl;
}
// clean up
cudaEventDestroy(NTTstart);
cudaEventDestroy(NTTstop);
cudaEventDestroy(INTTstart);
cudaEventDestroy(INTTstop);
CUDA_CHECK_RETURN(cudaFree(gpuNTTData));
CUDA_CHECK_RETURN(cudaFree(gpuINTTData));
CUDA_CHECK_RETURN(cudaFree(gpuTwid));
CUDA_CHECK_RETURN(cudaFreeHost(in));
CUDA_CHECK_RETURN(cudaFreeHost(NTT_out));
CUDA_CHECK_RETURN(cudaFreeHost(INTT_out));
CUDA_CHECK_RETURN(cudaFreeHost(twiddle));
return;
}
// @}
/* \name Array initializer function */
void initialize(uint32_t* in, int mod, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (uint32_t)(rand() % mod);
}
}
// @}
int main(int argc, char *argv[])
{
std::cerr << "NTT FFT" << std::endl;
const int NTT_SIZE = atoi(argv[1]); // number of elements to be processed (N)
const int BITS_PER_ELEM = atoi(argv[2]); // number of bits to represent one element (n)
const int TH_LIM = atoi(argv[3]); // maximum number of threads allowable per kernel
double chk1 = log2((double)NTT_SIZE);
double chk2 = log2((double)BITS_PER_ELEM);
double chk3 = (double)logf(TH_LIM) / logf(2);
// check whether all user parameters are valid
if (argc != 4 || chk1 != round(chk1) || chk2 != round(chk2) || chk3 != round(chk3) || TH_LIM < 0 || TH_LIM > 1024 || BITS_PER_ELEM > 16
|| NTT_SIZE/TH_LIM > TH_LIM || NTT_SIZE <= 1 || BITS_PER_ELEM <= 1)
{
std::cerr << "Usage: " << argv[0]
<< " <NTT size> <Bits per element> <thread limit per block>" << std::endl;
exit(1);
}
std::cout << "Computing..." << std::endl;
ProcessandTime(NTT_SIZE, BITS_PER_ELEM, TH_LIM);
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
962f0c362c13d81746f3f92ea091dbe85e8a0bea.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <typeinfo>
#include <color_spinor_field.h>
#include <blas_quda.h>
#include <string.h>
#include <iostream>
#include <misc_helpers.h>
#include <face_quda.h>
#include <dslash_quda.h>
#ifdef DEVICE_PACK
static const QudaFieldLocation reorder_location_ = QUDA_CUDA_FIELD_LOCATION;
#else
static const QudaFieldLocation reorder_location_ = QUDA_CPU_FIELD_LOCATION;
#endif
int zeroCopy = 0;
namespace quda {
int cudaColorSpinorField::bufferIndex = 0;
bool cudaColorSpinorField::initGhostFaceBuffer = false;
void *cudaColorSpinorField::ghost_field[2] = {nullptr, nullptr};
void* cudaColorSpinorField::ghostFaceBuffer[2] = {nullptr, nullptr}; //gpu memory
void* cudaColorSpinorField::fwdGhostFaceBuffer[2][QUDA_MAX_DIM]; //pointers to ghostFaceBuffer
void* cudaColorSpinorField::backGhostFaceBuffer[2][QUDA_MAX_DIM]; //pointers to ghostFaceBuffer
size_t cudaColorSpinorField::ghostFaceBytes = 0;
bool cudaColorSpinorField::initIPCComms = false;
int cudaColorSpinorField::buffer_send_p2p_fwd[2][QUDA_MAX_DIM];
int cudaColorSpinorField::buffer_recv_p2p_fwd[2][QUDA_MAX_DIM];
int cudaColorSpinorField::buffer_send_p2p_back[2][QUDA_MAX_DIM];
int cudaColorSpinorField::buffer_recv_p2p_back[2][QUDA_MAX_DIM];
MsgHandle* cudaColorSpinorField::mh_send_p2p_fwd[2][QUDA_MAX_DIM];
MsgHandle* cudaColorSpinorField::mh_send_p2p_back[2][QUDA_MAX_DIM];
MsgHandle* cudaColorSpinorField::mh_recv_p2p_fwd[2][QUDA_MAX_DIM];
MsgHandle* cudaColorSpinorField::mh_recv_p2p_back[2][QUDA_MAX_DIM];
hipEvent_t cudaColorSpinorField::ipcCopyEvent[2][2][QUDA_MAX_DIM];
hipEvent_t cudaColorSpinorField::ipcRemoteCopyEvent[2][2][QUDA_MAX_DIM];
void* cudaColorSpinorField::fwdGhostSendDest[2][QUDA_MAX_DIM];
void* cudaColorSpinorField::backGhostSendDest[2][QUDA_MAX_DIM];
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorParam ¶m) :
ColorSpinorField(param), alloc(false), init(true), texInit(false),
ghostTexInit(false), ghost_field_tex{nullptr,nullptr}, initComms(false), bufferMessageHandler(0)
{
// this must come before create
if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
v = param.v;
norm = param.norm;
}
create(param.create);
if (param.create == QUDA_NULL_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_ZERO_FIELD_CREATE) {
zero();
} else if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_COPY_FIELD_CREATE) {
errorQuda("not implemented");
}
}
cudaColorSpinorField::cudaColorSpinorField(const cudaColorSpinorField &src) :
ColorSpinorField(src), alloc(false), init(true), texInit(false),
ghostTexInit(false), ghost_field_tex{nullptr,nullptr}, initComms(false), bufferMessageHandler(0)
{
create(QUDA_COPY_FIELD_CREATE);
copySpinorField(src);
}
// creates a copy of src, any differences defined in param
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorField &src,
const ColorSpinorParam ¶m) :
ColorSpinorField(src), alloc(false), init(true), texInit(false),
ghostTexInit(false), ghost_field_tex{nullptr,nullptr}, initComms(false), bufferMessageHandler(0)
{
// can only overide if we are not using a reference or parity special case
if (param.create != QUDA_REFERENCE_FIELD_CREATE ||
(param.create == QUDA_REFERENCE_FIELD_CREATE &&
src.SiteSubset() == QUDA_FULL_SITE_SUBSET &&
param.siteSubset == QUDA_PARITY_SITE_SUBSET &&
typeid(src) == typeid(cudaColorSpinorField) ) ||
(param.create == QUDA_REFERENCE_FIELD_CREATE && (param.is_composite || param.is_component))) {
reset(param);
} else {
errorQuda("Undefined behaviour"); // else silent bug possible?
}
// This must be set before create is called
if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
if (typeid(src) == typeid(cudaColorSpinorField)) {
v = (void*)src.V();
norm = (void*)src.Norm();
} else {
errorQuda("Cannot reference a non-cuda field");
}
if (composite_descr.is_component && !(src.SiteSubset() == QUDA_FULL_SITE_SUBSET && this->SiteSubset() == QUDA_PARITY_SITE_SUBSET))
{//setup eigenvector form the set
v = (void*)((char*)v + composite_descr.id*bytes);
norm = (void*)((char*)norm + composite_descr.id*norm_bytes);
}
}
create(param.create);
if (param.create == QUDA_NULL_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_ZERO_FIELD_CREATE) {
zero();
} else if (param.create == QUDA_COPY_FIELD_CREATE) {
copySpinorField(src);
} else if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
// do nothing
} else {
errorQuda("CreateType %d not implemented", param.create);
}
}
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorField &src)
: ColorSpinorField(src), alloc(false), init(true), texInit(false),
ghostTexInit(false), ghost_field_tex{nullptr,nullptr}, initComms(false), bufferMessageHandler(0)
{
create(QUDA_COPY_FIELD_CREATE);
copySpinorField(src);
}
ColorSpinorField& cudaColorSpinorField::operator=(const ColorSpinorField &src) {
if (typeid(src) == typeid(cudaColorSpinorField)) {
*this = (dynamic_cast<const cudaColorSpinorField&>(src));
} else if (typeid(src) == typeid(cpuColorSpinorField)) {
*this = (dynamic_cast<const cpuColorSpinorField&>(src));
} else {
errorQuda("Unknown input ColorSpinorField %s", typeid(src).name());
}
return *this;
}
cudaColorSpinorField& cudaColorSpinorField::operator=(const cudaColorSpinorField &src) {
if (&src != this) {
// keep current attributes unless unset
if (!ColorSpinorField::init) { // note this will turn a reference field into a regular field
destroy();
destroyComms(); // not sure if this necessary
ColorSpinorField::operator=(src);
create(QUDA_COPY_FIELD_CREATE);
}
copySpinorField(src);
}
return *this;
}
cudaColorSpinorField& cudaColorSpinorField::operator=(const cpuColorSpinorField &src) {
// keep current attributes unless unset
if (!ColorSpinorField::init) { // note this will turn a reference field into a regular field
destroy();
ColorSpinorField::operator=(src);
create(QUDA_COPY_FIELD_CREATE);
}
loadSpinorField(src);
return *this;
}
cudaColorSpinorField::~cudaColorSpinorField() {
destroyComms();
destroy();
}
void cudaColorSpinorField::create(const QudaFieldCreate create) {
if (siteSubset == QUDA_FULL_SITE_SUBSET && siteOrder != QUDA_EVEN_ODD_SITE_ORDER) {
errorQuda("Subset not implemented");
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
v = pool_device_malloc(bytes);
if (precision == QUDA_HALF_PRECISION) norm = pool_device_malloc(norm_bytes);
alloc = true;
}
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
if(composite_descr.is_composite && (create != QUDA_REFERENCE_FIELD_CREATE)) {
if(composite_descr.dim <= 0) errorQuda("\nComposite size is not defined\n");
ColorSpinorParam param;
param.siteSubset = QUDA_FULL_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
param.is_composite = false;
param.composite_dim = 0;
param.is_component = true;
components.reserve(composite_descr.dim);
for(int cid = 0; cid < composite_descr.dim; cid++) {
param.component_id = cid;
components.push_back(new cudaColorSpinorField(*this, param));
}
} else {
// create the associated even and odd subsets
ColorSpinorParam param;
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.x[0] /= 2; // set single parity dimensions
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
param.is_composite = false;
param.composite_dim = 0;
param.is_component = composite_descr.is_component;
param.component_id = composite_descr.id;
even = new cudaColorSpinorField(*this, param);
odd = new cudaColorSpinorField(*this, param);
// need this hackery for the moment (need to locate the odd pointers half way into the full field)
(dynamic_cast<cudaColorSpinorField*>(odd))->v = (void*)((char*)v + bytes/2);
if (precision == QUDA_HALF_PRECISION)
(dynamic_cast<cudaColorSpinorField*>(odd))->norm = (void*)((char*)norm + norm_bytes/2);
#ifdef USE_TEXTURE_OBJECTS
dynamic_cast<cudaColorSpinorField*>(even)->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(even)->createTexObject();
dynamic_cast<cudaColorSpinorField*>(odd)->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(odd)->createTexObject();
#endif
}
} else { //siteSubset == QUDA_PARITY_SITE_SUBSET
//! setup an object for selected eigenvector (the 1st one as a default):
if (composite_descr.is_composite && (create != QUDA_REFERENCE_FIELD_CREATE))
{
if(composite_descr.dim <= 0) errorQuda("\nComposite size is not defined\n");
//if(bytes > 1811939328) warningQuda("\nCUDA API probably won't be able to create texture object for the eigenvector set... Object size is : %u bytes\n", bytes);
if (getVerbosity() == QUDA_DEBUG_VERBOSE) printfQuda("\nEigenvector set constructor...\n");
// create the associated even and odd subsets
ColorSpinorParam param;
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
param.is_composite = false;
param.composite_dim = 0;
param.is_component = true;
//reserve eigvector set
components.reserve(composite_descr.dim);
//setup volume, [real_]length and stride for a single eigenvector
for(int cid = 0; cid < composite_descr.dim; cid++)
{
param.component_id = cid;
components.push_back(new cudaColorSpinorField(*this, param));
#ifdef USE_TEXTURE_OBJECTS //(a lot of texture objects...)
dynamic_cast<cudaColorSpinorField*>(components[cid])->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(components[cid])->createTexObject();
#endif
}
}
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
if (siteSubset != QUDA_FULL_SITE_SUBSET) {
zeroPad();
} else if(!composite_descr.is_composite) {
(dynamic_cast<cudaColorSpinorField*>(even))->zeroPad();
(dynamic_cast<cudaColorSpinorField*>(odd))->zeroPad();
} else { //temporary hack for the full spinor field sets, manual zeroPad for each component:
for(int cid = 0; cid < composite_descr.dim; cid++) {
(dynamic_cast<cudaColorSpinorField&>(components[cid]->Even())).zeroPad();
(dynamic_cast<cudaColorSpinorField&>(components[cid]->Odd())).zeroPad();
}
}
}
#ifdef USE_TEXTURE_OBJECTS
if (!composite_descr.is_composite || composite_descr.is_component)
createTexObject();
#endif
}
#ifdef USE_TEXTURE_OBJECTS
void cudaColorSpinorField::createTexObject() {
if (isNative()) {
if (texInit) errorQuda("Already bound textures");
// create the texture for the field components
hipChannelFormatDesc desc;
memset(&desc, 0, sizeof(hipChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = hipChannelFormatKindFloat;
else desc.f = hipChannelFormatKindSigned; // half is short, double is int2
// staggered and coarse fields in half and single are always two component
if ( (nSpin == 1 || nSpin == 2) && (precision == QUDA_HALF_PRECISION || precision == QUDA_SINGLE_PRECISION)) {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = 0;
desc.w = 0;
} else { // all others are four component (double2 is spread across int4)
desc.x = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.y = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.z = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.w = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
}
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = v;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = bytes;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = hipReadModeNormalizedFloat;
else texDesc.readMode = hipReadModeElementType;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
// create the texture for the norm components
if (precision == QUDA_HALF_PRECISION) {
hipChannelFormatDesc desc;
memset(&desc, 0, sizeof(hipChannelFormatDesc));
desc.f = hipChannelFormatKindFloat;
desc.x = 8*QUDA_SINGLE_PRECISION; desc.y = 0; desc.z = 0; desc.w = 0;
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = norm;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = norm_bytes;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
hipCreateTextureObject(&texNorm, &resDesc, &texDesc, NULL);
}
texInit = true;
checkCudaError();
}
}
void cudaColorSpinorField::createGhostTexObject() {
// create the ghost texture object
if (isNative() && ghost_bytes) {
if (ghostTexInit) errorQuda("Already bound ghost texture");
for (int b=0; b<2; b++) {
hipChannelFormatDesc desc;
memset(&desc, 0, sizeof(hipChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = hipChannelFormatKindFloat;
else desc.f = hipChannelFormatKindSigned; // half is short, double is int2
// staggered and coarse fields in half and single are always two component
if ( (nSpin == 1 || nSpin == 2) && (precision == QUDA_HALF_PRECISION || precision == QUDA_SINGLE_PRECISION)) {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = 0;
desc.w = 0;
} else { // all others are four component (double2 is spread across int4)
desc.x = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.y = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.z = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.w = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
}
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = ghost_field[b];
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = ghost_bytes;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = hipReadModeNormalizedFloat;
else texDesc.readMode = hipReadModeElementType;
hipCreateTextureObject(&ghostTex[b], &resDesc, &texDesc, NULL);
if (precision == QUDA_HALF_PRECISION) {
hipChannelFormatDesc desc;
memset(&desc, 0, sizeof(hipChannelFormatDesc));
desc.f = hipChannelFormatKindFloat;
desc.x = 8*QUDA_SINGLE_PRECISION; desc.y = 0; desc.z = 0; desc.w = 0;
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = ghost_field[b];
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = ghost_bytes;
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
hipCreateTextureObject(&ghostTexNorm[b], &resDesc, &texDesc, NULL);
}
ghost_field_tex[b] = ghost_field[b];
} // buffer index
ghostTexInit = true;
checkCudaError();
}
}
void cudaColorSpinorField::destroyTexObject() {
if (isNative() && texInit) {
hipDestroyTextureObject(tex);
if (ghost_bytes) {
hipDestroyTextureObject(ghostTex[0]);
hipDestroyTextureObject(ghostTex[1]);
}
if (precision == QUDA_HALF_PRECISION) {
hipDestroyTextureObject(texNorm);
if (ghost_bytes) {
hipDestroyTextureObject(ghostTexNorm[0]);
hipDestroyTextureObject(ghostTexNorm[1]);
}
}
texInit = false;
}
}
void cudaColorSpinorField::destroyGhostTexObject() {
if (isNative() && ghostTexInit) {
hipDestroyTextureObject(ghostTex[0]);
hipDestroyTextureObject(ghostTex[1]);
if (precision == QUDA_HALF_PRECISION) {
hipDestroyTextureObject(ghostTexNorm[0]);
hipDestroyTextureObject(ghostTexNorm[1]);
}
ghostTexInit = false;
}
}
#endif
void cudaColorSpinorField::destroy() {
if (alloc) {
pool_device_free(v);
if (precision == QUDA_HALF_PRECISION) pool_device_free(norm);
alloc = false;
}
if (composite_descr.is_composite)
{
CompositeColorSpinorField::iterator vec;
for(vec = components.begin(); vec != components.end(); vec++) delete *vec;
}
if ((siteSubset == QUDA_FULL_SITE_SUBSET && !composite_descr.is_composite) || (siteSubset == QUDA_FULL_SITE_SUBSET && composite_descr.is_component)) {
delete even;
delete odd;
}
#ifdef USE_TEXTURE_OBJECTS
if (!composite_descr.is_composite || composite_descr.is_component)
destroyTexObject();
#endif
}
// cuda's floating point format, IEEE-754, represents the floating point
// zero as 4 zero bytes
void cudaColorSpinorField::zero() {
hipMemsetAsync(v, 0, bytes, streams[Nstream-1]);
if (precision == QUDA_HALF_PRECISION) hipMemsetAsync(norm, 0, norm_bytes, streams[Nstream-1]);
}
void cudaColorSpinorField::zeroPad() {
size_t pad_bytes = (stride - volume) * precision * fieldOrder;
int Npad = nColor * nSpin * 2 / fieldOrder;
if (composite_descr.is_composite && !composite_descr.is_component){//we consider the whole eigenvector set:
Npad *= composite_descr.dim;
pad_bytes /= composite_descr.dim;
}
size_t pitch = ((!composite_descr.is_composite || composite_descr.is_component) ? stride : composite_descr.stride)*fieldOrder*precision;
char *dst = (char*)v + ((!composite_descr.is_composite || composite_descr.is_component) ? volume : composite_descr.volume)*fieldOrder*precision;
if (pad_bytes) hipMemset2D(dst, pitch, 0, pad_bytes, Npad);
//for (int i=0; i<Npad; i++) {
// if (pad_bytes) hipMemset((char*)v + (volume + i*stride)*fieldOrder*precision, 0, pad_bytes);
//}
}
void cudaColorSpinorField::copy(const cudaColorSpinorField &src) {
checkField(*this, src);
if (this->GammaBasis() != src.GammaBasis()) errorQuda("cannot call this copy with different basis");
blas::copy(*this, src);
}
void cudaColorSpinorField::copySpinorField(const ColorSpinorField &src) {
// src is on the device and is native
if (typeid(src) == typeid(cudaColorSpinorField) &&
isNative() && dynamic_cast<const cudaColorSpinorField &>(src).isNative() &&
this->GammaBasis() == src.GammaBasis()) {
copy(dynamic_cast<const cudaColorSpinorField&>(src));
} else if (typeid(src) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION);
} else if (typeid(src) == typeid(cpuColorSpinorField)) { // src is on the host
loadSpinorField(src);
} else {
errorQuda("Unknown input ColorSpinorField %s", typeid(src).name());
}
}
void cudaColorSpinorField::loadSpinorField(const ColorSpinorField &src) {
if (reorder_location_ == QUDA_CPU_FIELD_LOCATION &&typeid(src) == typeid(cpuColorSpinorField)) {
void *buffer = pool_pinned_malloc(bytes + norm_bytes);
memset(buffer, 0, bytes+norm_bytes); // FIXME (temporary?) bug fix for padding
copyGenericColorSpinor(*this, src, QUDA_CPU_FIELD_LOCATION, buffer, 0, static_cast<char*>(buffer)+bytes, 0);
qudaMemcpy(v, buffer, bytes, hipMemcpyHostToDevice);
qudaMemcpy(norm, static_cast<char*>(buffer)+bytes, norm_bytes, hipMemcpyHostToDevice);
pool_pinned_free(buffer);
} else if (typeid(src) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION);
} else {
void *Src=nullptr, *srcNorm=nullptr, *buffer=nullptr;
if (!zeroCopy) {
resizeBufferDevice(src.Bytes()+src.NormBytes());
Src = bufferDevice;
srcNorm = (char*)bufferDevice + src.Bytes();
qudaMemcpy(Src, src.V(), src.Bytes(), hipMemcpyHostToDevice);
qudaMemcpy(srcNorm, src.Norm(), src.NormBytes(), hipMemcpyHostToDevice);
} else {
buffer = pool_pinned_malloc(src.Bytes()+src.NormBytes());
memcpy(buffer, src.V(), src.Bytes());
memcpy(static_cast<char*>(buffer)+src.Bytes(), src.Norm(), src.NormBytes());
hipHostGetDevicePointer(&Src, buffer, 0);
srcNorm = (void*)((char*)Src + src.Bytes());
}
hipMemset(v, 0, bytes); // FIXME (temporary?) bug fix for padding
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION, 0, Src, 0, srcNorm);
if (zeroCopy) pool_pinned_free(buffer);
}
return;
}
void cudaColorSpinorField::saveSpinorField(ColorSpinorField &dest) const {
if (reorder_location_ == QUDA_CPU_FIELD_LOCATION && typeid(dest) == typeid(cpuColorSpinorField)) {
void *buffer = pool_pinned_malloc(bytes+norm_bytes);
qudaMemcpy(buffer, v, bytes, hipMemcpyDeviceToHost);
qudaMemcpy(static_cast<char*>(buffer)+bytes, norm, norm_bytes, hipMemcpyDeviceToHost);
copyGenericColorSpinor(dest, *this, QUDA_CPU_FIELD_LOCATION, 0, buffer, 0, static_cast<char*>(buffer)+bytes);
pool_pinned_free(buffer);
} else if (typeid(dest) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(dest, *this, QUDA_CUDA_FIELD_LOCATION);
} else {
void *dst=nullptr, *dstNorm=nullptr, *buffer=nullptr;
if (!zeroCopy) {
resizeBufferDevice(dest.Bytes()+dest.NormBytes());
dst = bufferDevice;
dstNorm = (char*)bufferDevice+dest.Bytes();
} else {
buffer = pool_pinned_malloc(dest.Bytes()+dest.NormBytes());
hipHostGetDevicePointer(&dst, buffer, 0);
dstNorm = (char*)dst+dest.Bytes();
}
copyGenericColorSpinor(dest, *this, QUDA_CUDA_FIELD_LOCATION, dst, v, dstNorm, 0);
if (!zeroCopy) {
qudaMemcpy(dest.V(), dst, dest.Bytes(), hipMemcpyDeviceToHost);
qudaMemcpy(dest.Norm(), dstNorm, dest.NormBytes(), hipMemcpyDeviceToHost);
} else {
memcpy(dest.V(), buffer, dest.Bytes());
memcpy(dest.Norm(), static_cast<char*>(buffer) + dest.Bytes(), dest.NormBytes());
}
if (zeroCopy) pool_pinned_free(buffer);
}
return;
}
static bool ghost_field_reset = false;
void cudaColorSpinorField::allocateGhostBuffer(int nFace) {
if (!comm_partitioned()) return;
createGhostZone(nFace);
// only allocate if not already allocated or buffer required is bigger than previously
if ( !initGhostFaceBuffer || ghost_bytes > ghostFaceBytes ) {
if (initGhostFaceBuffer) {
#ifdef USE_TEXTURE_OBJECTS
destroyGhostTexObject();
#endif
if (initGhostFaceBuffer && ghost_bytes) {
for (int b=0; b<2; b++) device_pinned_free(ghost_field[b]);
}
for (int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
}
if (ghost_bytes > 0) {
// GPU pinned allocator to avoid this being redirected, e.g., by QDPJIT
if (ghost_bytes) {
for (int b=0; b<2; b++) ghost_field[b] = device_pinned_malloc(ghost_bytes);
}
ghost_field_reset = true;
for (int b=0; b<2; ++b) ghostFaceBuffer[b] = device_malloc(ghost_bytes);
initGhostFaceBuffer = true;
ghostFaceBytes = ghost_bytes;
}
}
#ifdef USE_TEXTURE_OBJECTS
// ghost texture is per object
if (ghost_field_tex[0] != ghost_field[0] || ghost_field_tex[1] != ghost_field[1]) destroyGhostTexObject();
if (!ghostTexInit) createGhostTexObject();
#endif
// always initialize the ghost receive pointers
if (siteSubset == QUDA_PARITY_SITE_SUBSET) {
for (int i=0; i<nDim; ++i) {
if (commDimPartitioned(i)) {
for (int b=0; b<2; b++) {
ghost[b][i] = (char*)ghost_field[b] + ghostOffset[i][0]*precision;
if (precision == QUDA_HALF_PRECISION)
ghostNorm[b][i] = (char*)ghost_field[b] + ghostNormOffset[i][0]*QUDA_SINGLE_PRECISION;
}
}
}
}
// always initialize the ghost send pointers
int Nint = nColor * nSpin * 2 / (nSpin == 4 ? 2 : 1); // number of internal degrees of freedom
size_t offset = 0;
for (int i=0; i<4; i++) {
if (!commDimPartitioned(i)) continue;
// compute size of buffer required
ghost_face_bytes[i] = nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) {
ghost_face_bytes[i] += nFace*ghostFace[i]*sizeof(float);
}
for (int b=0; b<2; ++b) {
backGhostFaceBuffer[b][i] = (void*)(((char*)ghostFaceBuffer[b]) + offset);
}
offset += nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) offset += nFace*ghostFace[i]*sizeof(float);
for (int b=0; b<2; ++b) {
fwdGhostFaceBuffer[b][i] = (void*)(((char*)ghostFaceBuffer[b]) + offset);
}
offset += nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) offset += nFace*ghostFace[i]*sizeof(float);
}
}
void cudaColorSpinorField::allocateGhostBuffer(void *send_buf[], void *recv_buf[]) const
{
int num_faces = 1;
if (nSpin == 1) num_faces = 3; // staggered
int spinor_size = 2*nSpin*nColor*precision;
// resize face only if requested size is larger than previously allocated one
size_t faceBytes = 0;
for (int i=0; i<nDimComms; i++) {
if (comm_dim_partitioned(i)) faceBytes += 2*siteSubset*num_faces*surfaceCB[i]*spinor_size;
}
if (!initGhostFaceBuffer || faceBytes > ghostFaceBytes) {
if (initGhostFaceBuffer) {
for (int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
}
if (faceBytes > 0) {
for (int b=0; b<2; ++b) ghostFaceBuffer[b] = device_malloc(faceBytes);
initGhostFaceBuffer = true;
ghostFaceBytes = faceBytes;
}
}
size_t offset = 0;
for (int i=0; i<nDimComms; i++) {
if (comm_dim_partitioned(i)) {
// use first buffer for recv and second for send
recv_buf[2*i+0] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[0]) + offset));
send_buf[2*i+0] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[1]) + offset));
offset += siteSubset*num_faces*surfaceCB[i]*spinor_size;
recv_buf[2*i+1] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[0]) + offset));
send_buf[2*i+1] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[1]) + offset));
offset += siteSubset*num_faces*surfaceCB[i]*spinor_size;
} else {
recv_buf[2*i+0] = nullptr;
recv_buf[2*i+1] = nullptr;
send_buf[2*i+0] = nullptr;
send_buf[2*i+1] = nullptr;
}
}
}
void cudaColorSpinorField::freeGhostBuffer(void)
{
destroyIPCComms();
if (!initGhostFaceBuffer) return;
for (int b=0; b<2; b++) {
if (ghost_field[b]) device_pinned_free(ghost_field[b]);
if (ghostFaceBuffer[b]) device_free(ghostFaceBuffer[b]);
for (int i=0;i < 4; i++) {
if (!commDimPartitioned(i)) continue;
backGhostFaceBuffer[b][i] = NULL;
fwdGhostFaceBuffer[b][i] = NULL;
}
}
initGhostFaceBuffer = false;
}
// pack the ghost zone into a contiguous buffer for communications
void cudaColorSpinorField::packGhost(const int nFace, const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, hipStream_t *stream,
void *buffer, bool zero_copy, double a, double b)
{
#ifdef MULTI_GPU
int face_num;
if (dir == QUDA_BACKWARDS) {
face_num = 0;
}else if (dir == QUDA_FORWARDS) {
face_num = 1;
}else{
face_num = 2;
}
void *packBuffer = buffer ? buffer : ghostFaceBuffer[bufferIndex];
packFace(packBuffer, *this, zero_copy, nFace, dagger, parity, dim, face_num, *stream, a, b);
#else
errorQuda("packGhost not built on single-GPU build");
#endif
}
// send the ghost zone to the host
void cudaColorSpinorField::sendGhost(void *ghost_spinor, const int nFace, const int dim,
const QudaDirection dir, const int dagger,
hipStream_t *stream) {
#ifdef MULTI_GPU
int Nvec = (nSpin == 1 || precision == QUDA_DOUBLE_PRECISION) ? 2 : 4;
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int Npad = Nint / Nvec; // number Nvec buffers we have
if (dim !=3 || getKernelPackT() || getTwistPack()) { // use kernels to pack into contiguous buffers then a single hipMemcpy
size_t bytes = nFace*Nint*ghostFace[dim]*precision;
if (precision == QUDA_HALF_PRECISION) bytes += nFace*ghostFace[dim]*sizeof(float);
void* gpu_buf =
(dir == QUDA_BACKWARDS) ? this->backGhostFaceBuffer[bufferIndex][dim] : this->fwdGhostFaceBuffer[bufferIndex][dim];
hipMemcpyAsync(ghost_spinor, gpu_buf, bytes, hipMemcpyDeviceToHost, *stream);
} else if (this->TwistFlavor() != QUDA_TWIST_NONDEG_DOUBLET) { // do multiple cudaMemcpys
const int x4 = nDim==5 ? x[4] : 1;
const int Nt_minus1_offset = (volumeCB - nFace*ghostFace[3])/x4; // N_t -1 = Vh-Vsh
int offset = 0;
if (nSpin == 1) {
offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset;
} else if (nSpin == 4) {
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == QUDA_FORWARDS) upper = !upper;
int lower_spin_offset = Npad*stride;
if (upper) offset = (dir == QUDA_BACKWARDS ? 0 : Nt_minus1_offset);
else offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? 0 : Nt_minus1_offset);
}
size_t len = nFace*(ghostFace[3]/x4)*Nvec*precision;
size_t dpitch = x4*len;
size_t spitch = stride*Nvec*precision;
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
for (int s=0; s<x4; s++) { // loop over multiple 4-d volumes (if they exist)
void *dst = (char*)ghost_spinor + s*len;
void *src = (char*)v + (offset + s*(volumeCB/x4))*Nvec*precision;
hipMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, hipMemcpyDeviceToHost, *stream);
if (precision == QUDA_HALF_PRECISION) {
size_t len = nFace*(ghostFace[3]/x4)*sizeof(float);
int norm_offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset*sizeof(float);
void *dst = (char*)ghost_spinor + nFace*Nint*ghostFace[3]*precision + s*len;
void *src = (char*)norm + norm_offset + s*(volumeCB/x4)*sizeof(float);
hipMemcpyAsync(dst, src, len, hipMemcpyDeviceToHost, *stream);
}
}
}else{
int flavorVolume = volume / 2;
int flavorTFace = ghostFace[3] / 2;
int flavor1_Nt_minus1_offset = (flavorVolume - flavorTFace);
int flavor2_Nt_minus1_offset = (volume - flavorTFace);
int flavor1_offset = 0;
int flavor2_offset = 0;
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == QUDA_FORWARDS) upper = !upper;
int lower_spin_offset = Npad*stride;//ndeg tm: stride=2*flavor_volume+pad
if (upper) {
flavor1_offset = (dir == QUDA_BACKWARDS ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = (dir == QUDA_BACKWARDS ? flavorVolume : flavor2_Nt_minus1_offset);
}else{
flavor1_offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? flavorVolume : flavor2_Nt_minus1_offset);
}
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
void *dst = (char*)ghost_spinor;
void *src = (char*)v + flavor1_offset*Nvec*precision;
size_t len = flavorTFace*Nvec*precision;
size_t spitch = stride*Nvec*precision;//ndeg tm: stride=2*flavor_volume+pad
size_t dpitch = 2*len;
hipMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, hipMemcpyDeviceToHost, *stream);
dst = (char*)ghost_spinor+len;
src = (char*)v + flavor2_offset*Nvec*precision;
hipMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, hipMemcpyDeviceToHost, *stream);
if (precision == QUDA_HALF_PRECISION) {
int Nt_minus1_offset = (flavorVolume - flavorTFace);
int norm_offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset*sizeof(float);
void *dst = (char*)ghost_spinor + Nint*ghostFace[3]*precision;
void *src = (char*)norm + norm_offset;
size_t dpitch = flavorTFace*sizeof(float);
size_t spitch = flavorVolume*sizeof(float);
hipMemcpy2DAsync(dst, dpitch, src, spitch, flavorTFace*sizeof(float), 2, hipMemcpyDeviceToHost, *stream);
}
}
#else
errorQuda("sendGhost not built on single-GPU build");
#endif
}
void cudaColorSpinorField::unpackGhost(const void* ghost_spinor, const int nFace,
const int dim, const QudaDirection dir,
const int dagger, hipStream_t* stream)
{
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int len = nFace*ghostFace[dim]*Nint*precision;
const void *src = ghost_spinor;
int ghost_offset = (dir == QUDA_BACKWARDS) ? ghostOffset[dim][0] : ghostOffset[dim][1];
void *ghost_dst = (char*)ghost_field[bufferIndex] + precision*ghost_offset;
if (precision == QUDA_HALF_PRECISION) len += nFace*ghostFace[dim]*sizeof(float);
hipMemcpyAsync(ghost_dst, src, len, hipMemcpyHostToDevice, *stream);
}
// pack the ghost zone into a contiguous buffer for communications
void cudaColorSpinorField::packGhostExtended(const int nFace, const int R[], const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, hipStream_t *stream,
void *buffer, bool zero_copy)
{
#ifdef MULTI_GPU
int face_num;
if (dir == QUDA_BACKWARDS) {
face_num = 0;
}else if (dir == QUDA_FORWARDS) {
face_num = 1;
}else{
face_num = 2;
}
void *packBuffer = buffer ? buffer : ghostFaceBuffer[bufferIndex];
packFaceExtended(packBuffer, *this, zero_copy, nFace, R, dagger, parity, dim, face_num, *stream);
#else
errorQuda("packGhostExtended not built on single-GPU build");
#endif
}
// copy data from host buffer into boundary region of device field
void cudaColorSpinorField::unpackGhostExtended(const void* ghost_spinor, const int nFace, const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, hipStream_t* stream, bool zero_copy)
{
// First call the regular unpackGhost routine to copy data into the `usual' ghost-zone region
// of the data array
unpackGhost(ghost_spinor, nFace, dim, dir, dagger, stream);
// Next step is to copy data from the ghost zone back to the interior region
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int len = nFace*ghostFace[dim]*Nint;
int offset = length + ghostOffset[dim][0];
offset += (dir == QUDA_BACKWARDS) ? 0 : len;
#ifdef MULTI_GPU
const int face_num = 2;
const bool unpack = true;
const int R[4] = {0,0,0,0};
packFaceExtended(ghostFaceBuffer[bufferIndex], *this, zero_copy, nFace, R, dagger, parity, dim, face_num, *stream, unpack);
#else
errorQuda("unpackGhostExtended not built on single-GPU build");
#endif
}
hipStream_t *stream;
void cudaColorSpinorField::createComms(int nFace) {
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
if (!initComms || nFaceComms != nFace || bufferMessageHandler != bufferPinnedResizeCount) {
// if we are requesting a new number of faces destroy and start over
destroyComms();
if (siteSubset != QUDA_PARITY_SITE_SUBSET)
errorQuda("Only supports single parity fields");
#ifdef GPU_COMMS
bool comms = false;
for (int i=0; i<nDimComms; i++) if (commDimPartitioned(i)) comms = true;
#endif
if (nFace > maxNface)
errorQuda("Requested number of faces %d in communicator is greater than supported %d",
nFace, maxNface);
// faceBytes is the sum of all face sizes
size_t faceBytes = 0;
// nbytes is the size in bytes of each face
size_t nbytes[QUDA_MAX_DIM];
// The number of degrees of freedom per site for the given
// field. Currently assumes spin projection of a Wilson-like
// field (so half the number of degrees of freedom).
int Ndof = (2 * nSpin * nColor) / (nSpin==4 ? 2 : 1);
for (int i=0; i<nDimComms; i++) {
nbytes[i] = maxNface*surfaceCB[i]*Ndof*precision;
if (precision == QUDA_HALF_PRECISION) nbytes[i] += maxNface*surfaceCB[i]*sizeof(float);
if (!commDimPartitioned(i)) continue;
faceBytes += 2*nbytes[i];
}
#ifndef GPU_COMMS
// use static pinned memory for face buffers
for (int b=0; b<2; ++b) {
if (faceBytes > 0) {
resizeBufferPinned(2*faceBytes, b); // oversizes for GPU_COMMS case
my_face[b] = bufferPinned[b];
hipHostGetDevicePointer(&my_face_d[b], my_face[b], 0); // set the matching device pointer
from_face[b] = static_cast<char*>(my_face[b]) + faceBytes;
from_face_d[b] = static_cast<char*>(my_face_d[b]) + faceBytes;
} else {
from_face[b] = nullptr;
from_face_d[b] = nullptr;
my_face[b] = nullptr;
my_face_d[b] = nullptr;
}
}
checkCudaError();
// assign pointers for each face - it's ok to alias for different Nface parameters
size_t offset = 0;
#endif
for (int i=0; i<nDimComms; i++) {
if (!commDimPartitioned(i)) continue;
#ifdef GPU_COMMS
for (int b=0; b<2; ++b) {
my_back_face[b][i] = backGhostFaceBuffer[b][i];
from_back_face[b][i] = ghost[b][i];
if (precision == QUDA_HALF_PRECISION) {
my_back_norm_face[b][i] = static_cast<char*>(backGhostFaceBuffer[b][i]) + nFace*ghostFace[i]*Ndof*precision;
from_back_norm_face[b][i] = ghostNorm[b][i];
}
} // loop over b
#else
for (int b=0; b<2; ++b) {
my_back_face[b][i] = static_cast<char*>(my_face[b]) + offset;
from_back_face[b][i] = static_cast<char*>(from_face[b]) + offset;
}
offset += nbytes[i];
#endif
#ifdef GPU_COMMS
for (int b=0; b<2; ++b) {
my_fwd_face[b][i] = fwdGhostFaceBuffer[b][i];
//from_fwd_face[b][i] = ghost[i] + nFace*ghostFace[i]*Ndof*precision;
from_fwd_face[b][i] = ghost_field[b] + ghostOffset[i][1]*precision;
if (precision == QUDA_HALF_PRECISION) {
my_fwd_norm_face[b][i] = static_cast<char*>(fwdGhostFaceBuffer[b][i]) + nFace*ghostFace[i]*Ndof*precision;
// from_fwd_norm_face[b][i] = static_cast<char*>(ghostNorm[i]) + nFace*ghostFace[i]*sizeof(float);
from_fwd_norm_face[b][i] = static_cast<char*>(ghost_field[b]) + ghostNormOffset[i][1]*sizeof(float);
}
} // loop over b
#else
for (int b=0; b<2; ++b) {
my_fwd_face[b][i] = static_cast<char*>(my_face[b]) + offset;
from_fwd_face[b][i] = static_cast<char*>(from_face[b]) + offset;
}
offset += nbytes[i];
#endif
}
checkCudaError();
// create a different message handler for each direction and Nface
for (int b=0; b<2; ++b) {
mh_send_fwd[b] = new MsgHandle**[maxNface];
mh_send_back[b] = new MsgHandle**[maxNface];
mh_recv_fwd[b] = new MsgHandle**[maxNface];
mh_recv_back[b] = new MsgHandle**[maxNface];
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) {
mh_send_norm_fwd[b] = new MsgHandle**[maxNface];
mh_send_norm_back[b] = new MsgHandle**[maxNface];
mh_recv_norm_fwd[b] = new MsgHandle**[maxNface];
mh_recv_norm_back[b] = new MsgHandle**[maxNface];
}
#endif
} // loop over b
for (int j=0; j<maxNface; j++) {
for (int b=0; b<2; ++b) {
mh_send_fwd[b][j] = new MsgHandle*[2*nDimComms];
mh_send_back[b][j] = new MsgHandle*[2*nDimComms];
mh_recv_fwd[b][j] = new MsgHandle*[nDimComms];
mh_recv_back[b][j] = new MsgHandle*[nDimComms];
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) {
mh_send_norm_fwd[b][j] = new MsgHandle*[2*nDimComms];
mh_send_norm_back[b][j] = new MsgHandle*[2*nDimComms];
mh_recv_norm_fwd[b][j] = new MsgHandle*[nDimComms];
mh_recv_norm_back[b][j] = new MsgHandle*[nDimComms];
}
#endif
} // loop over b
checkCudaError();
for (int i=0; i<nDimComms; i++) {
if (!commDimPartitioned(i)) continue;
#ifdef GPU_COMMS
size_t nbytes_Nface = surfaceCB[i]*Ndof*precision*(j+1);
size_t nbytes_Nface_norm = surfaceCB[i]*(j+1)*sizeof(float);
if (i != 3 || getKernelPackT() || getTwistPack()) {
#else
size_t nbytes_Nface = (nbytes[i] / maxNface) * (j+1);
#endif
for (int b=0; b<2; ++b) {
mh_send_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_fwd_face[b][i], i, +1, nbytes_Nface) : NULL;
mh_send_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_back_face[b][i], i, -1, nbytes_Nface) : NULL;
mh_send_fwd[b][j][2*i+1] = mh_send_fwd[b][j][2*i]; // alias pointers
mh_send_back[b][j][2*i+1] = mh_send_back[b][j][2*i]; // alias pointers
}
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) {
for (int b=0; b<2; ++b) {
mh_send_norm_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_fwd_norm_face[b][i], i, +1, nbytes_Nface_norm) : NULL;
mh_send_norm_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_back_norm_face[b][i], i, -1, nbytes_Nface_norm) : NULL;
mh_send_norm_fwd[b][j][2*i+1] = mh_send_norm_fwd[b][j][2*i];
mh_send_norm_back[b][j][2*i+1] = mh_send_norm_back[b][j][2*i];
}
}
} else if (this->TwistFlavor() == QUDA_TWIST_NONDEG_DOUBLET) {
errorQuda("GPU_COMMS for non-degenerate doublet only supported with time-dimension kernel packing enabled.");
} else {
/*
use a strided communicator, here we can't really use
the previously declared my_fwd_face and my_back_face
pointers since they don't really map 1-to-1 so let's
just compute the required base pointers and pass these
directly into the communicator construction
*/
int Nblocks = Ndof / Nvec(); // number of Nvec buffers we have
// start of last time slice chunk we are sending forwards
int endOffset = (volume - (j+1)*ghostFace[i]);
size_t offset[4];
void *base[4];
if (nSpin == 1) { // staggered is invariant with dagger
offset[2*0 + 0] = 0;
offset[2*1 + 0] = endOffset;
offset[2*0 + 1] = offset[2*0 + 0];
offset[2*1 + 1] = offset[2*1 + 0];
} else if (nSpin == 4) {
// !dagger: send last components backwards, send first components forwards
offset[2*0 + 0] = Nblocks*stride;
offset[2*1 + 0] = endOffset;
// dagger: send first components backwards, send last components forwards
offset[2*0 + 1] = 0;
offset[2*1 + 1] = Nblocks*stride + endOffset;
} else {
errorQuda("Unsupported number of spin components");
}
for (int k=0; k<4; k++) {
base[k] = static_cast<char*>(v) + offset[k]*Nvec()*precision; // total offset in bytes
}
size_t blksize = (j+1)*ghostFace[i]*Nvec()*precision; // (j+1) is number of faces
size_t Stride = stride*Nvec()*precision;
if (blksize * Nblocks != nbytes_Nface)
errorQuda("Total strided message size does not match expected size");
//printf("%d strided sends with Nface=%d Nblocks=%d blksize=%d Stride=%d\n", i, j+1, Nblocks, blksize, Stride);
for (int b=0; b<2; ++b) {
// only allocate a communicator for the present face (this needs cleaned up)
mh_send_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[2], i, +1, blksize, Nblocks, Stride) : NULL;
mh_send_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[0], i, -1, blksize, Nblocks, Stride) : NULL;
if (nSpin ==4) { // dagger communicators
mh_send_fwd[b][j][2*i+1] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[3], i, +1, blksize, Nblocks, Stride) : NULL;
mh_send_back[b][j][2*i+1] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[1], i, -1, blksize, Nblocks, Stride) : NULL;
} else {
mh_send_fwd[b][j][2*i+1] = mh_send_fwd[b][j][2*i+0];
mh_send_back[b][j][2*i+1] = mh_send_back[b][j][2*i+0];
}
} // loop over b
if (precision == QUDA_HALF_PRECISION) {
int Nt_minus1_offset = (volume - nFace*ghostFace[3]); // The space-time coordinate of the start of the last time slice
void *norm_fwd = static_cast<float*>(norm) + Nt_minus1_offset;
void *norm_back = norm; // the first time slice has zero offset
for (int b=0; b<2; ++b) {
mh_send_norm_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(norm_fwd, i, +1, surfaceCB[i]*(j+1)*sizeof(float)) : NULL;
mh_send_norm_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(norm_back, i, -1, surfaceCB[i]*(j+1)*sizeof(float)) : NULL;
mh_send_norm_fwd[b][j][2*i+1] = mh_send_norm_fwd[b][j][2*i];
mh_send_norm_back[b][j][2*i+1] = mh_send_norm_back[b][j][2*i];
}
}
}
if (precision == QUDA_HALF_PRECISION) {
for (int b=0; b<2; ++b) {
mh_recv_norm_fwd[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_fwd_norm_face[b][i], i, +1, nbytes_Nface_norm) : NULL;
mh_recv_norm_back[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_back_norm_face[b][i], i, -1, nbytes_Nface_norm) : NULL;
}
}
#endif // GPU_COMMS
for (int b=0; b<2; ++b) {
mh_recv_fwd[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_fwd_face[b][i], i, +1, nbytes_Nface) : NULL;
mh_recv_back[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_back_face[b][i], i, -1, nbytes_Nface) : NULL;
}
} // loop over dimension
}
bufferMessageHandler = bufferPinnedResizeCount;
initComms = true;
nFaceComms = nFace;
checkCudaError();
}
if (ghost_field_reset) destroyIPCComms();
createIPCComms();
}
void cudaColorSpinorField::createIPCComms() {
if ( initIPCComms && !ghost_field_reset ) return;
if (!initComms) errorQuda("Can only be called after create comms");
if ( (!ghost_field[0] || !ghost_field[1]) && comm_size() > 1) errorQuda("ghost_field appears not to be allocated");
// handles for obtained ghost pointers
hipIpcMemHandle_t ipcRemoteGhostDestHandle[2][2][QUDA_MAX_DIM];
for (int b=0; b<2; b++) {
for (int dim=0; dim<4; ++dim) {
if (comm_dim(dim)==1) continue;
for (int dir=0; dir<2; ++dir) {
MsgHandle* sendHandle = NULL;
MsgHandle* receiveHandle = NULL;
int disp = (dir == 1) ? +1 : -1;
// first set up receive
if (comm_peer2peer_enabled(1-dir,dim)) {
receiveHandle = comm_declare_receive_relative(&ipcRemoteGhostDestHandle[b][1-dir][dim],
dim, -disp,
sizeof(ipcRemoteGhostDestHandle[b][1-dir][dim]));
}
// now send
if (comm_peer2peer_enabled(dir,dim)) {
hipIpcMemHandle_t ipcLocalGhostDestHandle;
hipIpcGetMemHandle(&ipcLocalGhostDestHandle, ghost_field[b]);
sendHandle = comm_declare_send_relative(&ipcLocalGhostDestHandle,
dim, disp,
sizeof(ipcLocalGhostDestHandle));
}
if (receiveHandle) comm_start(receiveHandle);
if (sendHandle) comm_start(sendHandle);
if (receiveHandle) comm_wait(receiveHandle);
if (sendHandle) comm_wait(sendHandle);
if (sendHandle) comm_free(sendHandle);
if (receiveHandle) comm_free(receiveHandle);
}
}
checkCudaError();
// open the remote memory handles and set the send ghost pointers
for (int dim=0; dim<4; ++dim) {
if (comm_dim(dim)==1) continue;
const int num_dir = (comm_dim(dim) == 2) ? 1 : 2;
for (int dir=0; dir<num_dir; ++dir) {
if (!comm_peer2peer_enabled(dir,dim)) continue;
void **ghostDest = (dir==0) ? (&backGhostSendDest[b][dim]) : &(fwdGhostSendDest[b][dim]);
hipIpcOpenMemHandle(ghostDest, ipcRemoteGhostDestHandle[b][dir][dim],
hipIpcMemLazyEnablePeerAccess);
}
if (num_dir == 1) fwdGhostSendDest[b][dim] = backGhostSendDest[b][dim];
}
} // buffer index
checkCudaError();
// handles for obtained events
hipIpcEventHandle_t ipcRemoteEventHandle[2][2][QUDA_MAX_DIM];
// Note that no b index is necessary here
// Now communicate the event handles
for (int dim=0; dim<4; ++dim) {
if (comm_dim(dim)==1) continue;
for (int dir=0; dir<2; ++dir) {
for (int b=0; b<2; b++) {
MsgHandle* sendHandle = NULL;
MsgHandle* receiveHandle = NULL;
int disp = (dir == 1) ? +1 : -1;
// first set up receive
if (comm_peer2peer_enabled(1-dir,dim)) {
receiveHandle = comm_declare_receive_relative(&ipcRemoteEventHandle[b][1-dir][dim], dim, -disp,
sizeof(ipcRemoteEventHandle[b][1-dir][dim]));
}
// now send
if (comm_peer2peer_enabled(dir,dim)) {
hipEventCreate(&ipcCopyEvent[b][dir][dim], hipEventDisableTiming | hipEventInterprocess);
hipIpcEventHandle_t ipcLocalEventHandle;
hipIpcGetEventHandle(&ipcLocalEventHandle, ipcCopyEvent[b][dir][dim]);
sendHandle = comm_declare_send_relative(&ipcLocalEventHandle, dim, disp,
sizeof(ipcLocalEventHandle));
}
if (receiveHandle) comm_start(receiveHandle);
if (sendHandle) comm_start(sendHandle);
if (receiveHandle) comm_wait(receiveHandle);
if (sendHandle) comm_wait(sendHandle);
if (sendHandle) comm_free(sendHandle);
if (receiveHandle) comm_free(receiveHandle);
} // buffer index
}
}
checkCudaError();
for (int dim=0; dim<4; ++dim) {
if (comm_dim(dim)==1) continue;
for (int dir=0; dir<2; ++dir) {
if (!comm_peer2peer_enabled(dir,dim)) continue;
for (int b=0; b<2; b++) {
hipIpcOpenEventHandle(&(ipcRemoteCopyEvent[b][dir][dim]), ipcRemoteEventHandle[b][dir][dim]);
}
}
}
// Create message handles for IPC synchronization
for (int dim=0; dim<4; ++dim) {
if (comm_dim(dim)==1) continue;
if (comm_peer2peer_enabled(1,dim)) {
for (int b=0; b<2; b++) {
// send to processor in forward direction
mh_send_p2p_fwd[b][dim] = comm_declare_send_relative(&buffer_send_p2p_fwd[b][dim], dim, +1, sizeof(int));
// receive from processor in forward direction
mh_recv_p2p_fwd[b][dim] = comm_declare_receive_relative(&buffer_recv_p2p_fwd[b][dim], dim, +1, sizeof(int));
}
}
if (comm_peer2peer_enabled(0,dim)) {
for (int b=0; b<2; b++) {
// send to processor in backward direction
mh_send_p2p_back[b][dim] = comm_declare_send_relative(&buffer_recv_p2p_back[b][dim], dim, -1, sizeof(int));
// receive from processor in backward direction
mh_recv_p2p_back[b][dim] = comm_declare_receive_relative(&buffer_recv_p2p_back[b][dim], dim, -1, sizeof(int));
}
}
}
checkCudaError();
initIPCComms = true;
ghost_field_reset = false;
}
void cudaColorSpinorField::destroyIPCComms() {
if (!initIPCComms) return;
checkCudaError();
for (int dim=0; dim<4; ++dim) {
if (comm_dim(dim)==1) continue;
const int num_dir = (comm_dim(dim) == 2) ? 1 : 2;
for (int b=0; b<2; b++) {
if (comm_peer2peer_enabled(1,dim)) {
comm_free(mh_send_p2p_fwd[b][dim]);
comm_free(mh_recv_p2p_fwd[b][dim]);
hipEventDestroy(ipcCopyEvent[b][1][dim]);
// only close this handle if it doesn't alias the back ghost
if (num_dir == 2) hipIpcCloseMemHandle(fwdGhostSendDest[b][dim]);
}
if (comm_peer2peer_enabled(0,dim)) {
comm_free(mh_send_p2p_back[b][dim]);
comm_free(mh_recv_p2p_back[b][dim]);
hipEventDestroy(ipcCopyEvent[b][0][dim]);
hipIpcCloseMemHandle(backGhostSendDest[b][dim]);
}
} // buffer
} // iterate over dim
checkCudaError();
initIPCComms = false;
}
void cudaColorSpinorField::destroyComms()
{
if (initComms) {
for (int b=0; b<2; ++b) {
for (int j=0; j<maxNface; j++) {
for (int i=0; i<nDimComms; i++) {
if (commDimPartitioned(i)) {
if (mh_recv_fwd[b][j][i]) comm_free(mh_recv_fwd[b][j][i]);
if (mh_recv_fwd[b][j][i]) comm_free(mh_recv_back[b][j][i]);
if (mh_send_fwd[b][j][2*i]) comm_free(mh_send_fwd[b][j][2*i]);
if (mh_send_back[b][j][2*i]) comm_free(mh_send_back[b][j][2*i]);
// only in a special case are these not aliasing pointers
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) {
if (mh_recv_norm_fwd[b][j][i]) comm_free(mh_recv_norm_fwd[b][j][i]);
if (mh_recv_norm_back[b][j][i]) comm_free(mh_recv_norm_back[b][j][i]);
if (mh_send_norm_fwd[b][j][2*i]) comm_free(mh_send_norm_fwd[b][j][2*i]);
if (mh_send_norm_back[b][j][2*i]) comm_free(mh_send_norm_back[b][j][2*i]);
}
if (i == 3 && !getKernelPackT() && nSpin == 4) {
if (mh_send_fwd[b][j][2*i+1]) comm_free(mh_send_fwd[b][j][2*i+1]);
if (mh_send_back[b][j][2*i+1]) comm_free(mh_send_back[b][j][2*i+1]);
}
#endif // GPU_COMMS
}
}
delete []mh_recv_fwd[b][j];
delete []mh_recv_back[b][j];
delete []mh_send_fwd[b][j];
delete []mh_send_back[b][j];
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) {
delete []mh_recv_norm_fwd[b][j];
delete []mh_recv_norm_back[b][j];
delete []mh_send_norm_fwd[b][j];
delete []mh_send_norm_back[b][j];
}
#endif
}
delete []mh_recv_fwd[b];
delete []mh_recv_back[b];
delete []mh_send_fwd[b];
delete []mh_send_back[b];
for (int i=0; i<nDimComms; i++) {
my_fwd_face[b][i] = NULL;
my_back_face[b][i] = NULL;
from_fwd_face[b][i] = NULL;
from_back_face[b][i] = NULL;
}
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) {
delete []mh_recv_norm_fwd[b];
delete []mh_recv_norm_back[b];
delete []mh_send_norm_fwd[b];
delete []mh_send_norm_back[b];
}
for (int i=0; i<nDimComms; i++) {
my_fwd_norm_face[b][i] = NULL;
my_back_norm_face[b][i] = NULL;
from_fwd_norm_face[b][i] = NULL;
from_back_norm_face[b][i] = NULL;
}
#endif
} // loop over b
initComms = false;
checkCudaError();
}
}
void cudaColorSpinorField::streamInit(hipStream_t *stream_p) {
stream = stream_p;
}
void cudaColorSpinorField::pack(int nFace, int parity, int dagger, hipStream_t *stream_p,
bool zero_copy, double a, double b) {
createComms(nFace); // must call this first
stream = stream_p;
const int dim=-1; // pack all partitioned dimensions
if (zero_copy) {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[0], my_face_d[bufferIndex], true, a, b);
} else {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[Nstream-1], 0, false, a, b);
}
}
void cudaColorSpinorField::pack(int nFace, int parity, int dagger, int stream_idx,
bool zeroCopyPack, double a, double b) {
createComms(nFace); // must call this first
const int dim=-1; // pack all partitioned dimensions
if (zeroCopyPack) {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[stream_idx], my_face_d[bufferIndex], true, a, b);
} else {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[stream_idx], 0, false, a, b);
}
}
void cudaColorSpinorField::packExtended(const int nFace, const int R[], const int parity,
const int dagger, const int dim,
hipStream_t *stream_p, const bool zero_copy) {
createComms(nFace); // must call this first
stream = stream_p;
if (zero_copy) {
packGhostExtended(nFace, R, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[0], my_face_d[bufferIndex], true);
}else{
packGhostExtended(nFace, R, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[Nstream-1], 0, false);
}
}
void cudaColorSpinorField::gather(int nFace, int dagger, int dir, hipStream_t* stream_p)
{
int dim = dir/2;
// If stream_p != 0, use pack_stream, else use the stream array
hipStream_t *pack_stream = (stream_p) ? stream_p : stream+dir;
if (dir%2 == 0) {
// backwards copy to host
if (comm_peer2peer_enabled(0,dim)) return;
sendGhost(my_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, pack_stream);
} else {
// forwards copy to host
if (comm_peer2peer_enabled(1,dim)) return;
sendGhost(my_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, pack_stream);
}
}
void cudaColorSpinorField::recvStart(int nFace, int dir, int dagger, hipStream_t* stream_p) {
int dim = dir/2;
if (!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
if (comm_peer2peer_enabled(1,dim)) {
// receive from the processor in the +1 direction
comm_start(mh_recv_p2p_fwd[bufferIndex][dim]);
} else {
// Prepost receive
comm_start(mh_recv_fwd[bufferIndex][nFace-1][dim]);
}
} else { //sending forwards
// Prepost receive
if (comm_peer2peer_enabled(0,dim)) {
comm_start(mh_recv_p2p_back[bufferIndex][dim]);
} else {
comm_start(mh_recv_back[bufferIndex][nFace-1][dim]);
}
}
}
void cudaColorSpinorField::sendStart(int nFace, int d, int dagger, hipStream_t* stream_p) {
int dim = d/2;
int dir = d%2;
if (!commDimPartitioned(dim)) return;
int Nvec = (nSpin == 1 || precision == QUDA_DOUBLE_PRECISION) ? 2 : 4;
int Nint = (nColor * nSpin * 2)/(nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int Npad = Nint/Nvec;
if (!comm_peer2peer_enabled(dir,dim)) {
if (dir == 0) comm_start(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
else comm_start(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
} else { // doing peer-to-peer
hipStream_t *copy_stream = (stream_p) ? stream_p : stream + d;
// all goes here
void* ghost_dst = (dir == 0) ? (void*)((char*)(backGhostSendDest[bufferIndex][dim]) + precision*ghostOffset[dim][1]) :
(void*)((char*)(fwdGhostSendDest[bufferIndex][dim]) + precision*ghostOffset[dim][0]);
void *ghost_norm_dst = (dir == 0) ? static_cast<char*>(backGhostSendDest[bufferIndex][dim]) + QUDA_SINGLE_PRECISION*ghostNormOffset[dim][1] :
static_cast<char*>(fwdGhostSendDest[bufferIndex][dim]) + QUDA_SINGLE_PRECISION*ghostNormOffset[dim][0];
if (dim != 3 || getKernelPackT()) {
hipMemcpyAsync(ghost_dst,
dir == 0 ? backGhostFaceBuffer[bufferIndex][dim] : fwdGhostFaceBuffer[bufferIndex][dim],
ghost_face_bytes[dim],
hipMemcpyDeviceToDevice,
*copy_stream); // copy to forward processor
} else if (this->TwistFlavor() != QUDA_TWIST_NONDEG_DOUBLET) {
const int x4 = nDim==5 ? x[4] : 1;
const int Nt_minus_offset = (volumeCB - nFace*ghostFace[3])/x4;
int offset = 0;
if (nSpin == 1) {
offset = (dir == 0) ? 0 : Nt_minus_offset;
} else if (nSpin == 4) {
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false;
if (dir == 1) upper = !upper;
int lower_spin_offset = Npad*stride;
if (dir == 0) {
offset = upper ? 0 : lower_spin_offset;
} else {
offset = (upper) ? Nt_minus_offset : lower_spin_offset + Nt_minus_offset;
}
}
size_t len = nFace*(ghostFace[3]/x4)*Nvec*precision;
size_t dpitch = x4*len;
size_t spitch = stride*Nvec*precision;
for (int s=0; s<x4; s++) {
void *dst = (char*)ghost_dst + s*len;
void *src = (char*)v + (offset + s*(volumeCB/x4))*Nvec*precision;
// start the copy
hipMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, hipMemcpyDeviceToDevice, *copy_stream);
if (precision == QUDA_HALF_PRECISION) {
size_t len = nFace*(ghostFace[3]/x4)*sizeof(float);
int norm_offset = (dir == 0) ? 0 : Nt_minus_offset*sizeof(float);
void *dst = (char*)ghost_norm_dst + s*len;
void *src = static_cast<char*>(norm) + norm_offset + s*(volumeCB/x4)*sizeof(float);
hipMemcpyAsync(dst, src, len, hipMemcpyDeviceToDevice, *copy_stream);
}
}
} else { // twisted doublet
int flavorVolume = volume / 2;
int flavorTFace = ghostFace[3] / 2;
int flavor1_Nt_minus1_offset = (flavorVolume - flavorTFace);
int flavor2_Nt_minus1_offset = (volume - flavorTFace);
int flavor1_offset = 0;
int flavor2_offset = 0;
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == 1) upper = !upper;
int lower_spin_offset = Npad*stride;//ndeg tm: stride=2*flavor_volume+pad
if (upper) {
flavor1_offset = (dir == 0 ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = (dir == 0 ? flavorVolume : flavor2_Nt_minus1_offset);
}else{
flavor1_offset = lower_spin_offset + (dir == 0 ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = lower_spin_offset + (dir == 0 ? flavorVolume : flavor2_Nt_minus1_offset);
}
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
void *src = static_cast<char*>(v) + flavor1_offset*Nvec*precision;
size_t len = flavorTFace*Nvec*precision;
size_t spitch = stride*Nvec*precision;//ndeg tm: stride=2*flavor_volume+pad
size_t dpitch = 2*len;
hipMemcpy2DAsync(ghost_dst, dpitch, src, spitch, len, Npad, hipMemcpyDeviceToDevice, *copy_stream);
src = static_cast<char*>(v) + flavor2_offset*Nvec*precision;
hipMemcpy2DAsync(static_cast<char*>(ghost_dst)+len, dpitch, src, spitch, len, Npad, hipMemcpyDeviceToDevice, *copy_stream);
if (precision == QUDA_HALF_PRECISION) {
int norm_offset = (dir == 0) ? 0 : flavor1_Nt_minus1_offset*sizeof(float);
void *src = static_cast<char*>(norm) + norm_offset;
size_t dpitch = flavorTFace*sizeof(float);
size_t spitch = flavorVolume*sizeof(float);
hipMemcpy2DAsync(ghost_norm_dst, dpitch, src, spitch, flavorTFace*sizeof(float), 2, hipMemcpyDeviceToDevice, *copy_stream);
}
}
if (dir == 0) {
// record the event
hipEventRecord(ipcCopyEvent[bufferIndex][0][dim], *copy_stream);
// send to the propcessor in the -1 direction
comm_start(mh_send_p2p_back[bufferIndex][dim]);
} else {
hipEventRecord(ipcCopyEvent[bufferIndex][1][dim], *copy_stream);
// send to the processor in the +1 direction
comm_start(mh_send_p2p_fwd[bufferIndex][dim]);
}
}
}
void cudaColorSpinorField::commsStart(int nFace, int dir, int dagger, hipStream_t* stream_p) {
recvStart(nFace, dir, dagger, stream_p);
sendStart(nFace, dir, dagger, stream_p);
}
bool cudaColorSpinorField::ipcCopyComplete(int dir, int dim)
{
return (hipSuccess == hipEventQuery(ipcCopyEvent[bufferIndex][dir][dim]) ? true : false);
}
bool cudaColorSpinorField::ipcRemoteCopyComplete(int dir, int dim)
{
return (hipSuccess == hipEventQuery(ipcRemoteCopyEvent[bufferIndex][dir][dim]) ? true : false);
}
static bool complete_recv_fwd[QUDA_MAX_DIM] = { };
static bool complete_recv_back[QUDA_MAX_DIM] = { };
static bool complete_send_fwd[QUDA_MAX_DIM] = { };
static bool complete_send_back[QUDA_MAX_DIM] = { };
int cudaColorSpinorField::commsQuery(int nFace, int dir, int dagger, hipStream_t *stream_p) {
int dim = dir/2;
if (!commDimPartitioned(dim)) return 0;
if (dir%2==0) {
if (comm_peer2peer_enabled(1,dim)) {
if (!complete_recv_fwd[dim]) complete_recv_fwd[dim] = comm_query(mh_recv_p2p_fwd[bufferIndex][dim]);
} else {
if (!complete_recv_fwd[dim]) complete_recv_fwd[dim] = comm_query(mh_recv_fwd[bufferIndex][nFace-1][dim]);
}
if (comm_peer2peer_enabled(0,dim)) {
if (!complete_send_back[dim]) complete_send_back[dim] = comm_query(mh_send_p2p_back[bufferIndex][dim]);
} else {
if (!complete_send_back[dim]) complete_send_back[dim] = comm_query(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
}
if (complete_recv_fwd[dim] && complete_send_back[dim]) {
complete_recv_fwd[dim] = false;
complete_send_back[dim] = false;
return 1;
}
} else { // dir%2 == 1
if (comm_peer2peer_enabled(0,dim)) {
if (!complete_recv_back[dim]) complete_recv_back[dim] = comm_query(mh_recv_p2p_back[bufferIndex][dim]);
} else {
if (!complete_recv_back[dim]) complete_recv_back[dim] = comm_query(mh_recv_back[bufferIndex][nFace-1][dim]);
}
if (comm_peer2peer_enabled(1,dim)) {
if (!complete_send_fwd[dim]) complete_send_fwd[dim] = comm_query(mh_send_p2p_fwd[bufferIndex][dim]);
} else {
if (!complete_send_fwd[dim]) complete_send_fwd[dim] = comm_query(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
if (complete_recv_back[dim] && complete_send_fwd[dim]) {
complete_recv_back[dim] = false;
complete_send_fwd[dim] = false;
return 1;
}
}
return 0;
}
void cudaColorSpinorField::commsWait(int nFace, int dir, int dagger, hipStream_t *stream_p) {
int dim = dir / 2;
if (!commDimPartitioned(dim)) return;
if (dir%2==0) {
if (comm_peer2peer_enabled(1,dim)) {
comm_wait(mh_recv_p2p_fwd[bufferIndex][dim]);
hipEventSynchronize(ipcRemoteCopyEvent[bufferIndex][1][dim]);
} else {
comm_wait(mh_recv_fwd[bufferIndex][nFace-1][dim]);
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) comm_wait(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
#endif
}
if (comm_peer2peer_enabled(0,dim)) {
comm_wait(mh_send_p2p_back[bufferIndex][dim]);
hipEventSynchronize(ipcCopyEvent[bufferIndex][0][dim]);
} else {
comm_wait(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) comm_wait(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
#endif
}
} else {
if (comm_peer2peer_enabled(0,dim)) {
comm_wait(mh_recv_p2p_back[bufferIndex][dim]);
hipEventSynchronize(ipcRemoteCopyEvent[bufferIndex][0][dim]);
} else {
comm_wait(mh_recv_back[bufferIndex][nFace-1][dim]);
#ifdef GPU_COMMS
comm_wait(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
#endif
}
if (comm_peer2peer_enabled(1,dim)) {
comm_wait(mh_send_p2p_fwd[bufferIndex][dim]);
hipEventSynchronize(ipcCopyEvent[bufferIndex][1][dim]);
} else {
comm_wait(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) comm_wait(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
#endif
}
}
return;
}
const hipEvent_t& cudaColorSpinorField::getIPCRemoteCopyEvent(int dir, int dim) const {
return ipcRemoteCopyEvent[bufferIndex][dir][dim];
}
void cudaColorSpinorField::scatter(int nFace, int dagger, int dir, hipStream_t* stream_p)
{
int dim = dir/2;
if (!commDimPartitioned(dim)) return;
if (dir%2==0) {// receive from forwards
if (comm_peer2peer_enabled(1,dim)) return;
unpackGhost(from_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, stream_p);
} else { // receive from backwards
if (comm_peer2peer_enabled(0,dim)) return;
unpackGhost(from_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, stream_p);
}
}
void cudaColorSpinorField::scatter(int nFace, int dagger, int dir)
{
int dim = dir/2;
if (!commDimPartitioned(dim)) return;
if (dir%2==0) {// receive from forwards
if (comm_peer2peer_enabled(1,dim)) return;
unpackGhost(from_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, &stream[2*dim+0]);
} else { // receive from backwards
if (comm_peer2peer_enabled(0,dim)) return;
unpackGhost(from_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, &stream[2*dim+1]);
}
}
void cudaColorSpinorField::scatterExtended(int nFace, int parity, int dagger, int dir)
{
bool zero_copy = false;
int dim = dir/2;
if (!commDimPartitioned(dim)) return;
if (dir%2==0) {// receive from forwards
unpackGhostExtended(from_fwd_face[bufferIndex][dim], nFace, static_cast<QudaParity>(parity), dim, QUDA_FORWARDS, dagger, &stream[2*dim/*+0*/], zero_copy);
} else { // receive from backwards
unpackGhostExtended(from_back_face[bufferIndex][dim], nFace, static_cast<QudaParity>(parity), dim, QUDA_BACKWARDS, dagger, &stream[2*dim/*+1*/], zero_copy);
}
}
void cudaColorSpinorField::exchangeGhost(QudaParity parity, int dagger) const {
void **send = static_cast<void**>(safe_malloc(nDimComms * 2 * sizeof(void*)));
// allocate ghost buffer if not yet allocated
allocateGhostBuffer(send, ghost_fixme);
genericPackGhost(send, *this, parity, dagger);
int nFace = (nSpin == 1) ? 3 : 1;
exchange(ghost_fixme, send, nFace);
host_free(send);
}
std::ostream& operator<<(std::ostream &out, const cudaColorSpinorField &a) {
out << (const ColorSpinorField&)a;
out << "v = " << a.v << std::endl;
out << "norm = " << a.norm << std::endl;
out << "alloc = " << a.alloc << std::endl;
out << "init = " << a.init << std::endl;
return out;
}
//! for composite fields:
cudaColorSpinorField& cudaColorSpinorField::Component(const int idx) const {
if (this->IsComposite()) {
if (idx < this->CompositeDim()) {//setup eigenvector form the set
return *(dynamic_cast<cudaColorSpinorField*>(components[idx]));
}
else{
errorQuda("Incorrect component index...");
}
}
errorQuda("Cannot get requested component");
exit(-1);
}
//copyCuda currently cannot not work with set of spinor fields..
void cudaColorSpinorField::CopySubset(cudaColorSpinorField &dst, const int range, const int first_element) const{
#if 0
if (first_element < 0) errorQuda("\nError: trying to set negative first element.\n");
if (siteSubset == QUDA_PARITY_SITE_SUBSET && this->EigvId() == -1) {
if (first_element == 0 && range == this->EigvDim())
{
if (range != dst.EigvDim())errorQuda("\nError: eigenvector range to big.\n");
checkField(dst, *this);
copyCuda(dst, *this);
}
else if ((first_element+range) < this->EigvDim())
{//setup eigenvector subset
cudaColorSpinorField *eigv_subset;
ColorSpinorParam param;
param.nColor = nColor;
param.nSpin = nSpin;
param.twistFlavor = twistFlavor;
param.precision = precision;
param.nDim = nDim;
param.pad = pad;
param.siteSubset = siteSubset;
param.siteOrder = siteOrder;
param.fieldOrder = fieldOrder;
param.gammaBasis = gammaBasis;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.eigv_dim = range;
param.eigv_id = -1;
param.v = (void*)((char*)v + first_element*eigv_bytes);
param.norm = (void*)((char*)norm + first_element*eigv_norm_bytes);
eigv_subset = new cudaColorSpinorField(param);
//Not really needed:
eigv_subset->eigenvectors.reserve(param.eigv_dim);
for (int id = first_element; id < (first_element+range); id++)
{
param.eigv_id = id;
eigv_subset->eigenvectors.push_back(new cudaColorSpinorField(*this, param));
}
checkField(dst, *eigv_subset);
copyCuda(dst, *eigv_subset);
delete eigv_subset;
} else {
errorQuda("Incorrect eigenvector dimension...");
}
} else{
errorQuda("Eigenvector must be a parity spinor");
exit(-1);
}
#endif
}
void cudaColorSpinorField::getTexObjectInfo() const
{
#ifdef USE_TEXTURE_OBJECTS
printfQuda("\nPrint texture info for the field:\n");
std::cout << *this;
hipResourceDesc resDesc;
//memset(&resDesc, 0, sizeof(resDesc));
hipGetTextureObjectResourceDesc(&resDesc, this->Tex());
printfQuda("\nDevice pointer: %p\n", resDesc.res.linear.devPtr);
printfQuda("\nVolume (in bytes): %lu\n", resDesc.res.linear.sizeInBytes);
if (resDesc.resType == hipResourceTypeLinear) printfQuda("\nResource type: linear \n");
#endif
}
void cudaColorSpinorField::Source(const QudaSourceType sourceType, const int st, const int s, const int c) {
ColorSpinorParam param(*this);
param.fieldOrder = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER;
param.location = QUDA_CPU_FIELD_LOCATION;
param.create = QUDA_NULL_FIELD_CREATE;
cpuColorSpinorField tmp(param);
tmp.Source(sourceType, st, s, c);
*this = tmp;
}
} // namespace quda
| 962f0c362c13d81746f3f92ea091dbe85e8a0bea.cu | #include <stdlib.h>
#include <stdio.h>
#include <typeinfo>
#include <color_spinor_field.h>
#include <blas_quda.h>
#include <string.h>
#include <iostream>
#include <misc_helpers.h>
#include <face_quda.h>
#include <dslash_quda.h>
#ifdef DEVICE_PACK
static const QudaFieldLocation reorder_location_ = QUDA_CUDA_FIELD_LOCATION;
#else
static const QudaFieldLocation reorder_location_ = QUDA_CPU_FIELD_LOCATION;
#endif
int zeroCopy = 0;
namespace quda {
int cudaColorSpinorField::bufferIndex = 0;
bool cudaColorSpinorField::initGhostFaceBuffer = false;
void *cudaColorSpinorField::ghost_field[2] = {nullptr, nullptr};
void* cudaColorSpinorField::ghostFaceBuffer[2] = {nullptr, nullptr}; //gpu memory
void* cudaColorSpinorField::fwdGhostFaceBuffer[2][QUDA_MAX_DIM]; //pointers to ghostFaceBuffer
void* cudaColorSpinorField::backGhostFaceBuffer[2][QUDA_MAX_DIM]; //pointers to ghostFaceBuffer
size_t cudaColorSpinorField::ghostFaceBytes = 0;
bool cudaColorSpinorField::initIPCComms = false;
int cudaColorSpinorField::buffer_send_p2p_fwd[2][QUDA_MAX_DIM];
int cudaColorSpinorField::buffer_recv_p2p_fwd[2][QUDA_MAX_DIM];
int cudaColorSpinorField::buffer_send_p2p_back[2][QUDA_MAX_DIM];
int cudaColorSpinorField::buffer_recv_p2p_back[2][QUDA_MAX_DIM];
MsgHandle* cudaColorSpinorField::mh_send_p2p_fwd[2][QUDA_MAX_DIM];
MsgHandle* cudaColorSpinorField::mh_send_p2p_back[2][QUDA_MAX_DIM];
MsgHandle* cudaColorSpinorField::mh_recv_p2p_fwd[2][QUDA_MAX_DIM];
MsgHandle* cudaColorSpinorField::mh_recv_p2p_back[2][QUDA_MAX_DIM];
cudaEvent_t cudaColorSpinorField::ipcCopyEvent[2][2][QUDA_MAX_DIM];
cudaEvent_t cudaColorSpinorField::ipcRemoteCopyEvent[2][2][QUDA_MAX_DIM];
void* cudaColorSpinorField::fwdGhostSendDest[2][QUDA_MAX_DIM];
void* cudaColorSpinorField::backGhostSendDest[2][QUDA_MAX_DIM];
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorParam ¶m) :
ColorSpinorField(param), alloc(false), init(true), texInit(false),
ghostTexInit(false), ghost_field_tex{nullptr,nullptr}, initComms(false), bufferMessageHandler(0)
{
// this must come before create
if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
v = param.v;
norm = param.norm;
}
create(param.create);
if (param.create == QUDA_NULL_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_ZERO_FIELD_CREATE) {
zero();
} else if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_COPY_FIELD_CREATE) {
errorQuda("not implemented");
}
}
cudaColorSpinorField::cudaColorSpinorField(const cudaColorSpinorField &src) :
ColorSpinorField(src), alloc(false), init(true), texInit(false),
ghostTexInit(false), ghost_field_tex{nullptr,nullptr}, initComms(false), bufferMessageHandler(0)
{
create(QUDA_COPY_FIELD_CREATE);
copySpinorField(src);
}
// creates a copy of src, any differences defined in param
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorField &src,
const ColorSpinorParam ¶m) :
ColorSpinorField(src), alloc(false), init(true), texInit(false),
ghostTexInit(false), ghost_field_tex{nullptr,nullptr}, initComms(false), bufferMessageHandler(0)
{
// can only overide if we are not using a reference or parity special case
if (param.create != QUDA_REFERENCE_FIELD_CREATE ||
(param.create == QUDA_REFERENCE_FIELD_CREATE &&
src.SiteSubset() == QUDA_FULL_SITE_SUBSET &&
param.siteSubset == QUDA_PARITY_SITE_SUBSET &&
typeid(src) == typeid(cudaColorSpinorField) ) ||
(param.create == QUDA_REFERENCE_FIELD_CREATE && (param.is_composite || param.is_component))) {
reset(param);
} else {
errorQuda("Undefined behaviour"); // else silent bug possible?
}
// This must be set before create is called
if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
if (typeid(src) == typeid(cudaColorSpinorField)) {
v = (void*)src.V();
norm = (void*)src.Norm();
} else {
errorQuda("Cannot reference a non-cuda field");
}
if (composite_descr.is_component && !(src.SiteSubset() == QUDA_FULL_SITE_SUBSET && this->SiteSubset() == QUDA_PARITY_SITE_SUBSET))
{//setup eigenvector form the set
v = (void*)((char*)v + composite_descr.id*bytes);
norm = (void*)((char*)norm + composite_descr.id*norm_bytes);
}
}
create(param.create);
if (param.create == QUDA_NULL_FIELD_CREATE) {
// do nothing
} else if (param.create == QUDA_ZERO_FIELD_CREATE) {
zero();
} else if (param.create == QUDA_COPY_FIELD_CREATE) {
copySpinorField(src);
} else if (param.create == QUDA_REFERENCE_FIELD_CREATE) {
// do nothing
} else {
errorQuda("CreateType %d not implemented", param.create);
}
}
cudaColorSpinorField::cudaColorSpinorField(const ColorSpinorField &src)
: ColorSpinorField(src), alloc(false), init(true), texInit(false),
ghostTexInit(false), ghost_field_tex{nullptr,nullptr}, initComms(false), bufferMessageHandler(0)
{
create(QUDA_COPY_FIELD_CREATE);
copySpinorField(src);
}
ColorSpinorField& cudaColorSpinorField::operator=(const ColorSpinorField &src) {
if (typeid(src) == typeid(cudaColorSpinorField)) {
*this = (dynamic_cast<const cudaColorSpinorField&>(src));
} else if (typeid(src) == typeid(cpuColorSpinorField)) {
*this = (dynamic_cast<const cpuColorSpinorField&>(src));
} else {
errorQuda("Unknown input ColorSpinorField %s", typeid(src).name());
}
return *this;
}
cudaColorSpinorField& cudaColorSpinorField::operator=(const cudaColorSpinorField &src) {
if (&src != this) {
// keep current attributes unless unset
if (!ColorSpinorField::init) { // note this will turn a reference field into a regular field
destroy();
destroyComms(); // not sure if this necessary
ColorSpinorField::operator=(src);
create(QUDA_COPY_FIELD_CREATE);
}
copySpinorField(src);
}
return *this;
}
cudaColorSpinorField& cudaColorSpinorField::operator=(const cpuColorSpinorField &src) {
// keep current attributes unless unset
if (!ColorSpinorField::init) { // note this will turn a reference field into a regular field
destroy();
ColorSpinorField::operator=(src);
create(QUDA_COPY_FIELD_CREATE);
}
loadSpinorField(src);
return *this;
}
cudaColorSpinorField::~cudaColorSpinorField() {
destroyComms();
destroy();
}
void cudaColorSpinorField::create(const QudaFieldCreate create) {
if (siteSubset == QUDA_FULL_SITE_SUBSET && siteOrder != QUDA_EVEN_ODD_SITE_ORDER) {
errorQuda("Subset not implemented");
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
v = pool_device_malloc(bytes);
if (precision == QUDA_HALF_PRECISION) norm = pool_device_malloc(norm_bytes);
alloc = true;
}
if (siteSubset == QUDA_FULL_SITE_SUBSET) {
if(composite_descr.is_composite && (create != QUDA_REFERENCE_FIELD_CREATE)) {
if(composite_descr.dim <= 0) errorQuda("\nComposite size is not defined\n");
ColorSpinorParam param;
param.siteSubset = QUDA_FULL_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
param.is_composite = false;
param.composite_dim = 0;
param.is_component = true;
components.reserve(composite_descr.dim);
for(int cid = 0; cid < composite_descr.dim; cid++) {
param.component_id = cid;
components.push_back(new cudaColorSpinorField(*this, param));
}
} else {
// create the associated even and odd subsets
ColorSpinorParam param;
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.x[0] /= 2; // set single parity dimensions
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
param.is_composite = false;
param.composite_dim = 0;
param.is_component = composite_descr.is_component;
param.component_id = composite_descr.id;
even = new cudaColorSpinorField(*this, param);
odd = new cudaColorSpinorField(*this, param);
// need this hackery for the moment (need to locate the odd pointers half way into the full field)
(dynamic_cast<cudaColorSpinorField*>(odd))->v = (void*)((char*)v + bytes/2);
if (precision == QUDA_HALF_PRECISION)
(dynamic_cast<cudaColorSpinorField*>(odd))->norm = (void*)((char*)norm + norm_bytes/2);
#ifdef USE_TEXTURE_OBJECTS
dynamic_cast<cudaColorSpinorField*>(even)->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(even)->createTexObject();
dynamic_cast<cudaColorSpinorField*>(odd)->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(odd)->createTexObject();
#endif
}
} else { //siteSubset == QUDA_PARITY_SITE_SUBSET
//! setup an object for selected eigenvector (the 1st one as a default):
if (composite_descr.is_composite && (create != QUDA_REFERENCE_FIELD_CREATE))
{
if(composite_descr.dim <= 0) errorQuda("\nComposite size is not defined\n");
//if(bytes > 1811939328) warningQuda("\nCUDA API probably won't be able to create texture object for the eigenvector set... Object size is : %u bytes\n", bytes);
if (getVerbosity() == QUDA_DEBUG_VERBOSE) printfQuda("\nEigenvector set constructor...\n");
// create the associated even and odd subsets
ColorSpinorParam param;
param.siteSubset = QUDA_PARITY_SITE_SUBSET;
param.nDim = nDim;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.v = v;
param.norm = norm;
param.is_composite = false;
param.composite_dim = 0;
param.is_component = true;
//reserve eigvector set
components.reserve(composite_descr.dim);
//setup volume, [real_]length and stride for a single eigenvector
for(int cid = 0; cid < composite_descr.dim; cid++)
{
param.component_id = cid;
components.push_back(new cudaColorSpinorField(*this, param));
#ifdef USE_TEXTURE_OBJECTS //(a lot of texture objects...)
dynamic_cast<cudaColorSpinorField*>(components[cid])->destroyTexObject();
dynamic_cast<cudaColorSpinorField*>(components[cid])->createTexObject();
#endif
}
}
}
if (create != QUDA_REFERENCE_FIELD_CREATE) {
if (siteSubset != QUDA_FULL_SITE_SUBSET) {
zeroPad();
} else if(!composite_descr.is_composite) {
(dynamic_cast<cudaColorSpinorField*>(even))->zeroPad();
(dynamic_cast<cudaColorSpinorField*>(odd))->zeroPad();
} else { //temporary hack for the full spinor field sets, manual zeroPad for each component:
for(int cid = 0; cid < composite_descr.dim; cid++) {
(dynamic_cast<cudaColorSpinorField&>(components[cid]->Even())).zeroPad();
(dynamic_cast<cudaColorSpinorField&>(components[cid]->Odd())).zeroPad();
}
}
}
#ifdef USE_TEXTURE_OBJECTS
if (!composite_descr.is_composite || composite_descr.is_component)
createTexObject();
#endif
}
#ifdef USE_TEXTURE_OBJECTS
void cudaColorSpinorField::createTexObject() {
if (isNative()) {
if (texInit) errorQuda("Already bound textures");
// create the texture for the field components
cudaChannelFormatDesc desc;
memset(&desc, 0, sizeof(cudaChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = cudaChannelFormatKindFloat;
else desc.f = cudaChannelFormatKindSigned; // half is short, double is int2
// staggered and coarse fields in half and single are always two component
if ( (nSpin == 1 || nSpin == 2) && (precision == QUDA_HALF_PRECISION || precision == QUDA_SINGLE_PRECISION)) {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = 0;
desc.w = 0;
} else { // all others are four component (double2 is spread across int4)
desc.x = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.y = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.z = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.w = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
}
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = v;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = bytes;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = cudaReadModeNormalizedFloat;
else texDesc.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
// create the texture for the norm components
if (precision == QUDA_HALF_PRECISION) {
cudaChannelFormatDesc desc;
memset(&desc, 0, sizeof(cudaChannelFormatDesc));
desc.f = cudaChannelFormatKindFloat;
desc.x = 8*QUDA_SINGLE_PRECISION; desc.y = 0; desc.z = 0; desc.w = 0;
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = norm;
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = norm_bytes;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texNorm, &resDesc, &texDesc, NULL);
}
texInit = true;
checkCudaError();
}
}
void cudaColorSpinorField::createGhostTexObject() {
// create the ghost texture object
if (isNative() && ghost_bytes) {
if (ghostTexInit) errorQuda("Already bound ghost texture");
for (int b=0; b<2; b++) {
cudaChannelFormatDesc desc;
memset(&desc, 0, sizeof(cudaChannelFormatDesc));
if (precision == QUDA_SINGLE_PRECISION) desc.f = cudaChannelFormatKindFloat;
else desc.f = cudaChannelFormatKindSigned; // half is short, double is int2
// staggered and coarse fields in half and single are always two component
if ( (nSpin == 1 || nSpin == 2) && (precision == QUDA_HALF_PRECISION || precision == QUDA_SINGLE_PRECISION)) {
desc.x = 8*precision;
desc.y = 8*precision;
desc.z = 0;
desc.w = 0;
} else { // all others are four component (double2 is spread across int4)
desc.x = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.y = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.z = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
desc.w = (precision == QUDA_DOUBLE_PRECISION) ? 32 : 8*precision;
}
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = ghost_field[b];
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = ghost_bytes;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
if (precision == QUDA_HALF_PRECISION) texDesc.readMode = cudaReadModeNormalizedFloat;
else texDesc.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&ghostTex[b], &resDesc, &texDesc, NULL);
if (precision == QUDA_HALF_PRECISION) {
cudaChannelFormatDesc desc;
memset(&desc, 0, sizeof(cudaChannelFormatDesc));
desc.f = cudaChannelFormatKindFloat;
desc.x = 8*QUDA_SINGLE_PRECISION; desc.y = 0; desc.z = 0; desc.w = 0;
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = ghost_field[b];
resDesc.res.linear.desc = desc;
resDesc.res.linear.sizeInBytes = ghost_bytes;
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&ghostTexNorm[b], &resDesc, &texDesc, NULL);
}
ghost_field_tex[b] = ghost_field[b];
} // buffer index
ghostTexInit = true;
checkCudaError();
}
}
void cudaColorSpinorField::destroyTexObject() {
if (isNative() && texInit) {
cudaDestroyTextureObject(tex);
if (ghost_bytes) {
cudaDestroyTextureObject(ghostTex[0]);
cudaDestroyTextureObject(ghostTex[1]);
}
if (precision == QUDA_HALF_PRECISION) {
cudaDestroyTextureObject(texNorm);
if (ghost_bytes) {
cudaDestroyTextureObject(ghostTexNorm[0]);
cudaDestroyTextureObject(ghostTexNorm[1]);
}
}
texInit = false;
}
}
void cudaColorSpinorField::destroyGhostTexObject() {
if (isNative() && ghostTexInit) {
cudaDestroyTextureObject(ghostTex[0]);
cudaDestroyTextureObject(ghostTex[1]);
if (precision == QUDA_HALF_PRECISION) {
cudaDestroyTextureObject(ghostTexNorm[0]);
cudaDestroyTextureObject(ghostTexNorm[1]);
}
ghostTexInit = false;
}
}
#endif
void cudaColorSpinorField::destroy() {
if (alloc) {
pool_device_free(v);
if (precision == QUDA_HALF_PRECISION) pool_device_free(norm);
alloc = false;
}
if (composite_descr.is_composite)
{
CompositeColorSpinorField::iterator vec;
for(vec = components.begin(); vec != components.end(); vec++) delete *vec;
}
if ((siteSubset == QUDA_FULL_SITE_SUBSET && !composite_descr.is_composite) || (siteSubset == QUDA_FULL_SITE_SUBSET && composite_descr.is_component)) {
delete even;
delete odd;
}
#ifdef USE_TEXTURE_OBJECTS
if (!composite_descr.is_composite || composite_descr.is_component)
destroyTexObject();
#endif
}
// cuda's floating point format, IEEE-754, represents the floating point
// zero as 4 zero bytes
void cudaColorSpinorField::zero() {
cudaMemsetAsync(v, 0, bytes, streams[Nstream-1]);
if (precision == QUDA_HALF_PRECISION) cudaMemsetAsync(norm, 0, norm_bytes, streams[Nstream-1]);
}
void cudaColorSpinorField::zeroPad() {
size_t pad_bytes = (stride - volume) * precision * fieldOrder;
int Npad = nColor * nSpin * 2 / fieldOrder;
if (composite_descr.is_composite && !composite_descr.is_component){//we consider the whole eigenvector set:
Npad *= composite_descr.dim;
pad_bytes /= composite_descr.dim;
}
size_t pitch = ((!composite_descr.is_composite || composite_descr.is_component) ? stride : composite_descr.stride)*fieldOrder*precision;
char *dst = (char*)v + ((!composite_descr.is_composite || composite_descr.is_component) ? volume : composite_descr.volume)*fieldOrder*precision;
if (pad_bytes) cudaMemset2D(dst, pitch, 0, pad_bytes, Npad);
//for (int i=0; i<Npad; i++) {
// if (pad_bytes) cudaMemset((char*)v + (volume + i*stride)*fieldOrder*precision, 0, pad_bytes);
//}
}
void cudaColorSpinorField::copy(const cudaColorSpinorField &src) {
checkField(*this, src);
if (this->GammaBasis() != src.GammaBasis()) errorQuda("cannot call this copy with different basis");
blas::copy(*this, src);
}
void cudaColorSpinorField::copySpinorField(const ColorSpinorField &src) {
// src is on the device and is native
if (typeid(src) == typeid(cudaColorSpinorField) &&
isNative() && dynamic_cast<const cudaColorSpinorField &>(src).isNative() &&
this->GammaBasis() == src.GammaBasis()) {
copy(dynamic_cast<const cudaColorSpinorField&>(src));
} else if (typeid(src) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION);
} else if (typeid(src) == typeid(cpuColorSpinorField)) { // src is on the host
loadSpinorField(src);
} else {
errorQuda("Unknown input ColorSpinorField %s", typeid(src).name());
}
}
void cudaColorSpinorField::loadSpinorField(const ColorSpinorField &src) {
if (reorder_location_ == QUDA_CPU_FIELD_LOCATION &&typeid(src) == typeid(cpuColorSpinorField)) {
void *buffer = pool_pinned_malloc(bytes + norm_bytes);
memset(buffer, 0, bytes+norm_bytes); // FIXME (temporary?) bug fix for padding
copyGenericColorSpinor(*this, src, QUDA_CPU_FIELD_LOCATION, buffer, 0, static_cast<char*>(buffer)+bytes, 0);
qudaMemcpy(v, buffer, bytes, cudaMemcpyHostToDevice);
qudaMemcpy(norm, static_cast<char*>(buffer)+bytes, norm_bytes, cudaMemcpyHostToDevice);
pool_pinned_free(buffer);
} else if (typeid(src) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION);
} else {
void *Src=nullptr, *srcNorm=nullptr, *buffer=nullptr;
if (!zeroCopy) {
resizeBufferDevice(src.Bytes()+src.NormBytes());
Src = bufferDevice;
srcNorm = (char*)bufferDevice + src.Bytes();
qudaMemcpy(Src, src.V(), src.Bytes(), cudaMemcpyHostToDevice);
qudaMemcpy(srcNorm, src.Norm(), src.NormBytes(), cudaMemcpyHostToDevice);
} else {
buffer = pool_pinned_malloc(src.Bytes()+src.NormBytes());
memcpy(buffer, src.V(), src.Bytes());
memcpy(static_cast<char*>(buffer)+src.Bytes(), src.Norm(), src.NormBytes());
cudaHostGetDevicePointer(&Src, buffer, 0);
srcNorm = (void*)((char*)Src + src.Bytes());
}
cudaMemset(v, 0, bytes); // FIXME (temporary?) bug fix for padding
copyGenericColorSpinor(*this, src, QUDA_CUDA_FIELD_LOCATION, 0, Src, 0, srcNorm);
if (zeroCopy) pool_pinned_free(buffer);
}
return;
}
void cudaColorSpinorField::saveSpinorField(ColorSpinorField &dest) const {
if (reorder_location_ == QUDA_CPU_FIELD_LOCATION && typeid(dest) == typeid(cpuColorSpinorField)) {
void *buffer = pool_pinned_malloc(bytes+norm_bytes);
qudaMemcpy(buffer, v, bytes, cudaMemcpyDeviceToHost);
qudaMemcpy(static_cast<char*>(buffer)+bytes, norm, norm_bytes, cudaMemcpyDeviceToHost);
copyGenericColorSpinor(dest, *this, QUDA_CPU_FIELD_LOCATION, 0, buffer, 0, static_cast<char*>(buffer)+bytes);
pool_pinned_free(buffer);
} else if (typeid(dest) == typeid(cudaColorSpinorField)) {
copyGenericColorSpinor(dest, *this, QUDA_CUDA_FIELD_LOCATION);
} else {
void *dst=nullptr, *dstNorm=nullptr, *buffer=nullptr;
if (!zeroCopy) {
resizeBufferDevice(dest.Bytes()+dest.NormBytes());
dst = bufferDevice;
dstNorm = (char*)bufferDevice+dest.Bytes();
} else {
buffer = pool_pinned_malloc(dest.Bytes()+dest.NormBytes());
cudaHostGetDevicePointer(&dst, buffer, 0);
dstNorm = (char*)dst+dest.Bytes();
}
copyGenericColorSpinor(dest, *this, QUDA_CUDA_FIELD_LOCATION, dst, v, dstNorm, 0);
if (!zeroCopy) {
qudaMemcpy(dest.V(), dst, dest.Bytes(), cudaMemcpyDeviceToHost);
qudaMemcpy(dest.Norm(), dstNorm, dest.NormBytes(), cudaMemcpyDeviceToHost);
} else {
memcpy(dest.V(), buffer, dest.Bytes());
memcpy(dest.Norm(), static_cast<char*>(buffer) + dest.Bytes(), dest.NormBytes());
}
if (zeroCopy) pool_pinned_free(buffer);
}
return;
}
static bool ghost_field_reset = false;
void cudaColorSpinorField::allocateGhostBuffer(int nFace) {
if (!comm_partitioned()) return;
createGhostZone(nFace);
// only allocate if not already allocated or buffer required is bigger than previously
if ( !initGhostFaceBuffer || ghost_bytes > ghostFaceBytes ) {
if (initGhostFaceBuffer) {
#ifdef USE_TEXTURE_OBJECTS
destroyGhostTexObject();
#endif
if (initGhostFaceBuffer && ghost_bytes) {
for (int b=0; b<2; b++) device_pinned_free(ghost_field[b]);
}
for (int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
}
if (ghost_bytes > 0) {
// GPU pinned allocator to avoid this being redirected, e.g., by QDPJIT
if (ghost_bytes) {
for (int b=0; b<2; b++) ghost_field[b] = device_pinned_malloc(ghost_bytes);
}
ghost_field_reset = true;
for (int b=0; b<2; ++b) ghostFaceBuffer[b] = device_malloc(ghost_bytes);
initGhostFaceBuffer = true;
ghostFaceBytes = ghost_bytes;
}
}
#ifdef USE_TEXTURE_OBJECTS
// ghost texture is per object
if (ghost_field_tex[0] != ghost_field[0] || ghost_field_tex[1] != ghost_field[1]) destroyGhostTexObject();
if (!ghostTexInit) createGhostTexObject();
#endif
// always initialize the ghost receive pointers
if (siteSubset == QUDA_PARITY_SITE_SUBSET) {
for (int i=0; i<nDim; ++i) {
if (commDimPartitioned(i)) {
for (int b=0; b<2; b++) {
ghost[b][i] = (char*)ghost_field[b] + ghostOffset[i][0]*precision;
if (precision == QUDA_HALF_PRECISION)
ghostNorm[b][i] = (char*)ghost_field[b] + ghostNormOffset[i][0]*QUDA_SINGLE_PRECISION;
}
}
}
}
// always initialize the ghost send pointers
int Nint = nColor * nSpin * 2 / (nSpin == 4 ? 2 : 1); // number of internal degrees of freedom
size_t offset = 0;
for (int i=0; i<4; i++) {
if (!commDimPartitioned(i)) continue;
// compute size of buffer required
ghost_face_bytes[i] = nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) {
ghost_face_bytes[i] += nFace*ghostFace[i]*sizeof(float);
}
for (int b=0; b<2; ++b) {
backGhostFaceBuffer[b][i] = (void*)(((char*)ghostFaceBuffer[b]) + offset);
}
offset += nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) offset += nFace*ghostFace[i]*sizeof(float);
for (int b=0; b<2; ++b) {
fwdGhostFaceBuffer[b][i] = (void*)(((char*)ghostFaceBuffer[b]) + offset);
}
offset += nFace*ghostFace[i]*Nint*precision;
if (precision == QUDA_HALF_PRECISION) offset += nFace*ghostFace[i]*sizeof(float);
}
}
void cudaColorSpinorField::allocateGhostBuffer(void *send_buf[], void *recv_buf[]) const
{
int num_faces = 1;
if (nSpin == 1) num_faces = 3; // staggered
int spinor_size = 2*nSpin*nColor*precision;
// resize face only if requested size is larger than previously allocated one
size_t faceBytes = 0;
for (int i=0; i<nDimComms; i++) {
if (comm_dim_partitioned(i)) faceBytes += 2*siteSubset*num_faces*surfaceCB[i]*spinor_size;
}
if (!initGhostFaceBuffer || faceBytes > ghostFaceBytes) {
if (initGhostFaceBuffer) {
for (int b=0; b<2; ++b) device_free(ghostFaceBuffer[b]);
}
if (faceBytes > 0) {
for (int b=0; b<2; ++b) ghostFaceBuffer[b] = device_malloc(faceBytes);
initGhostFaceBuffer = true;
ghostFaceBytes = faceBytes;
}
}
size_t offset = 0;
for (int i=0; i<nDimComms; i++) {
if (comm_dim_partitioned(i)) {
// use first buffer for recv and second for send
recv_buf[2*i+0] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[0]) + offset));
send_buf[2*i+0] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[1]) + offset));
offset += siteSubset*num_faces*surfaceCB[i]*spinor_size;
recv_buf[2*i+1] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[0]) + offset));
send_buf[2*i+1] = static_cast<void*>((static_cast<char*>(ghostFaceBuffer[1]) + offset));
offset += siteSubset*num_faces*surfaceCB[i]*spinor_size;
} else {
recv_buf[2*i+0] = nullptr;
recv_buf[2*i+1] = nullptr;
send_buf[2*i+0] = nullptr;
send_buf[2*i+1] = nullptr;
}
}
}
void cudaColorSpinorField::freeGhostBuffer(void)
{
destroyIPCComms();
if (!initGhostFaceBuffer) return;
for (int b=0; b<2; b++) {
if (ghost_field[b]) device_pinned_free(ghost_field[b]);
if (ghostFaceBuffer[b]) device_free(ghostFaceBuffer[b]);
for (int i=0;i < 4; i++) {
if (!commDimPartitioned(i)) continue;
backGhostFaceBuffer[b][i] = NULL;
fwdGhostFaceBuffer[b][i] = NULL;
}
}
initGhostFaceBuffer = false;
}
// pack the ghost zone into a contiguous buffer for communications
void cudaColorSpinorField::packGhost(const int nFace, const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, cudaStream_t *stream,
void *buffer, bool zero_copy, double a, double b)
{
#ifdef MULTI_GPU
int face_num;
if (dir == QUDA_BACKWARDS) {
face_num = 0;
}else if (dir == QUDA_FORWARDS) {
face_num = 1;
}else{
face_num = 2;
}
void *packBuffer = buffer ? buffer : ghostFaceBuffer[bufferIndex];
packFace(packBuffer, *this, zero_copy, nFace, dagger, parity, dim, face_num, *stream, a, b);
#else
errorQuda("packGhost not built on single-GPU build");
#endif
}
// send the ghost zone to the host
void cudaColorSpinorField::sendGhost(void *ghost_spinor, const int nFace, const int dim,
const QudaDirection dir, const int dagger,
cudaStream_t *stream) {
#ifdef MULTI_GPU
int Nvec = (nSpin == 1 || precision == QUDA_DOUBLE_PRECISION) ? 2 : 4;
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int Npad = Nint / Nvec; // number Nvec buffers we have
if (dim !=3 || getKernelPackT() || getTwistPack()) { // use kernels to pack into contiguous buffers then a single cudaMemcpy
size_t bytes = nFace*Nint*ghostFace[dim]*precision;
if (precision == QUDA_HALF_PRECISION) bytes += nFace*ghostFace[dim]*sizeof(float);
void* gpu_buf =
(dir == QUDA_BACKWARDS) ? this->backGhostFaceBuffer[bufferIndex][dim] : this->fwdGhostFaceBuffer[bufferIndex][dim];
cudaMemcpyAsync(ghost_spinor, gpu_buf, bytes, cudaMemcpyDeviceToHost, *stream);
} else if (this->TwistFlavor() != QUDA_TWIST_NONDEG_DOUBLET) { // do multiple cudaMemcpys
const int x4 = nDim==5 ? x[4] : 1;
const int Nt_minus1_offset = (volumeCB - nFace*ghostFace[3])/x4; // N_t -1 = Vh-Vsh
int offset = 0;
if (nSpin == 1) {
offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset;
} else if (nSpin == 4) {
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == QUDA_FORWARDS) upper = !upper;
int lower_spin_offset = Npad*stride;
if (upper) offset = (dir == QUDA_BACKWARDS ? 0 : Nt_minus1_offset);
else offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? 0 : Nt_minus1_offset);
}
size_t len = nFace*(ghostFace[3]/x4)*Nvec*precision;
size_t dpitch = x4*len;
size_t spitch = stride*Nvec*precision;
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
for (int s=0; s<x4; s++) { // loop over multiple 4-d volumes (if they exist)
void *dst = (char*)ghost_spinor + s*len;
void *src = (char*)v + (offset + s*(volumeCB/x4))*Nvec*precision;
cudaMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, cudaMemcpyDeviceToHost, *stream);
if (precision == QUDA_HALF_PRECISION) {
size_t len = nFace*(ghostFace[3]/x4)*sizeof(float);
int norm_offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset*sizeof(float);
void *dst = (char*)ghost_spinor + nFace*Nint*ghostFace[3]*precision + s*len;
void *src = (char*)norm + norm_offset + s*(volumeCB/x4)*sizeof(float);
cudaMemcpyAsync(dst, src, len, cudaMemcpyDeviceToHost, *stream);
}
}
}else{
int flavorVolume = volume / 2;
int flavorTFace = ghostFace[3] / 2;
int flavor1_Nt_minus1_offset = (flavorVolume - flavorTFace);
int flavor2_Nt_minus1_offset = (volume - flavorTFace);
int flavor1_offset = 0;
int flavor2_offset = 0;
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == QUDA_FORWARDS) upper = !upper;
int lower_spin_offset = Npad*stride;//ndeg tm: stride=2*flavor_volume+pad
if (upper) {
flavor1_offset = (dir == QUDA_BACKWARDS ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = (dir == QUDA_BACKWARDS ? flavorVolume : flavor2_Nt_minus1_offset);
}else{
flavor1_offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = lower_spin_offset + (dir == QUDA_BACKWARDS ? flavorVolume : flavor2_Nt_minus1_offset);
}
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
void *dst = (char*)ghost_spinor;
void *src = (char*)v + flavor1_offset*Nvec*precision;
size_t len = flavorTFace*Nvec*precision;
size_t spitch = stride*Nvec*precision;//ndeg tm: stride=2*flavor_volume+pad
size_t dpitch = 2*len;
cudaMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, cudaMemcpyDeviceToHost, *stream);
dst = (char*)ghost_spinor+len;
src = (char*)v + flavor2_offset*Nvec*precision;
cudaMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, cudaMemcpyDeviceToHost, *stream);
if (precision == QUDA_HALF_PRECISION) {
int Nt_minus1_offset = (flavorVolume - flavorTFace);
int norm_offset = (dir == QUDA_BACKWARDS) ? 0 : Nt_minus1_offset*sizeof(float);
void *dst = (char*)ghost_spinor + Nint*ghostFace[3]*precision;
void *src = (char*)norm + norm_offset;
size_t dpitch = flavorTFace*sizeof(float);
size_t spitch = flavorVolume*sizeof(float);
cudaMemcpy2DAsync(dst, dpitch, src, spitch, flavorTFace*sizeof(float), 2, cudaMemcpyDeviceToHost, *stream);
}
}
#else
errorQuda("sendGhost not built on single-GPU build");
#endif
}
void cudaColorSpinorField::unpackGhost(const void* ghost_spinor, const int nFace,
const int dim, const QudaDirection dir,
const int dagger, cudaStream_t* stream)
{
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int len = nFace*ghostFace[dim]*Nint*precision;
const void *src = ghost_spinor;
int ghost_offset = (dir == QUDA_BACKWARDS) ? ghostOffset[dim][0] : ghostOffset[dim][1];
void *ghost_dst = (char*)ghost_field[bufferIndex] + precision*ghost_offset;
if (precision == QUDA_HALF_PRECISION) len += nFace*ghostFace[dim]*sizeof(float);
cudaMemcpyAsync(ghost_dst, src, len, cudaMemcpyHostToDevice, *stream);
}
// pack the ghost zone into a contiguous buffer for communications
void cudaColorSpinorField::packGhostExtended(const int nFace, const int R[], const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, cudaStream_t *stream,
void *buffer, bool zero_copy)
{
#ifdef MULTI_GPU
int face_num;
if (dir == QUDA_BACKWARDS) {
face_num = 0;
}else if (dir == QUDA_FORWARDS) {
face_num = 1;
}else{
face_num = 2;
}
void *packBuffer = buffer ? buffer : ghostFaceBuffer[bufferIndex];
packFaceExtended(packBuffer, *this, zero_copy, nFace, R, dagger, parity, dim, face_num, *stream);
#else
errorQuda("packGhostExtended not built on single-GPU build");
#endif
}
// copy data from host buffer into boundary region of device field
void cudaColorSpinorField::unpackGhostExtended(const void* ghost_spinor, const int nFace, const QudaParity parity,
const int dim, const QudaDirection dir,
const int dagger, cudaStream_t* stream, bool zero_copy)
{
// First call the regular unpackGhost routine to copy data into the `usual' ghost-zone region
// of the data array
unpackGhost(ghost_spinor, nFace, dim, dir, dagger, stream);
// Next step is to copy data from the ghost zone back to the interior region
int Nint = (nColor * nSpin * 2) / (nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int len = nFace*ghostFace[dim]*Nint;
int offset = length + ghostOffset[dim][0];
offset += (dir == QUDA_BACKWARDS) ? 0 : len;
#ifdef MULTI_GPU
const int face_num = 2;
const bool unpack = true;
const int R[4] = {0,0,0,0};
packFaceExtended(ghostFaceBuffer[bufferIndex], *this, zero_copy, nFace, R, dagger, parity, dim, face_num, *stream, unpack);
#else
errorQuda("unpackGhostExtended not built on single-GPU build");
#endif
}
cudaStream_t *stream;
void cudaColorSpinorField::createComms(int nFace) {
allocateGhostBuffer(nFace); // allocate the ghost buffer if not yet allocated
if (!initComms || nFaceComms != nFace || bufferMessageHandler != bufferPinnedResizeCount) {
// if we are requesting a new number of faces destroy and start over
destroyComms();
if (siteSubset != QUDA_PARITY_SITE_SUBSET)
errorQuda("Only supports single parity fields");
#ifdef GPU_COMMS
bool comms = false;
for (int i=0; i<nDimComms; i++) if (commDimPartitioned(i)) comms = true;
#endif
if (nFace > maxNface)
errorQuda("Requested number of faces %d in communicator is greater than supported %d",
nFace, maxNface);
// faceBytes is the sum of all face sizes
size_t faceBytes = 0;
// nbytes is the size in bytes of each face
size_t nbytes[QUDA_MAX_DIM];
// The number of degrees of freedom per site for the given
// field. Currently assumes spin projection of a Wilson-like
// field (so half the number of degrees of freedom).
int Ndof = (2 * nSpin * nColor) / (nSpin==4 ? 2 : 1);
for (int i=0; i<nDimComms; i++) {
nbytes[i] = maxNface*surfaceCB[i]*Ndof*precision;
if (precision == QUDA_HALF_PRECISION) nbytes[i] += maxNface*surfaceCB[i]*sizeof(float);
if (!commDimPartitioned(i)) continue;
faceBytes += 2*nbytes[i];
}
#ifndef GPU_COMMS
// use static pinned memory for face buffers
for (int b=0; b<2; ++b) {
if (faceBytes > 0) {
resizeBufferPinned(2*faceBytes, b); // oversizes for GPU_COMMS case
my_face[b] = bufferPinned[b];
cudaHostGetDevicePointer(&my_face_d[b], my_face[b], 0); // set the matching device pointer
from_face[b] = static_cast<char*>(my_face[b]) + faceBytes;
from_face_d[b] = static_cast<char*>(my_face_d[b]) + faceBytes;
} else {
from_face[b] = nullptr;
from_face_d[b] = nullptr;
my_face[b] = nullptr;
my_face_d[b] = nullptr;
}
}
checkCudaError();
// assign pointers for each face - it's ok to alias for different Nface parameters
size_t offset = 0;
#endif
for (int i=0; i<nDimComms; i++) {
if (!commDimPartitioned(i)) continue;
#ifdef GPU_COMMS
for (int b=0; b<2; ++b) {
my_back_face[b][i] = backGhostFaceBuffer[b][i];
from_back_face[b][i] = ghost[b][i];
if (precision == QUDA_HALF_PRECISION) {
my_back_norm_face[b][i] = static_cast<char*>(backGhostFaceBuffer[b][i]) + nFace*ghostFace[i]*Ndof*precision;
from_back_norm_face[b][i] = ghostNorm[b][i];
}
} // loop over b
#else
for (int b=0; b<2; ++b) {
my_back_face[b][i] = static_cast<char*>(my_face[b]) + offset;
from_back_face[b][i] = static_cast<char*>(from_face[b]) + offset;
}
offset += nbytes[i];
#endif
#ifdef GPU_COMMS
for (int b=0; b<2; ++b) {
my_fwd_face[b][i] = fwdGhostFaceBuffer[b][i];
//from_fwd_face[b][i] = ghost[i] + nFace*ghostFace[i]*Ndof*precision;
from_fwd_face[b][i] = ghost_field[b] + ghostOffset[i][1]*precision;
if (precision == QUDA_HALF_PRECISION) {
my_fwd_norm_face[b][i] = static_cast<char*>(fwdGhostFaceBuffer[b][i]) + nFace*ghostFace[i]*Ndof*precision;
// from_fwd_norm_face[b][i] = static_cast<char*>(ghostNorm[i]) + nFace*ghostFace[i]*sizeof(float);
from_fwd_norm_face[b][i] = static_cast<char*>(ghost_field[b]) + ghostNormOffset[i][1]*sizeof(float);
}
} // loop over b
#else
for (int b=0; b<2; ++b) {
my_fwd_face[b][i] = static_cast<char*>(my_face[b]) + offset;
from_fwd_face[b][i] = static_cast<char*>(from_face[b]) + offset;
}
offset += nbytes[i];
#endif
}
checkCudaError();
// create a different message handler for each direction and Nface
for (int b=0; b<2; ++b) {
mh_send_fwd[b] = new MsgHandle**[maxNface];
mh_send_back[b] = new MsgHandle**[maxNface];
mh_recv_fwd[b] = new MsgHandle**[maxNface];
mh_recv_back[b] = new MsgHandle**[maxNface];
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) {
mh_send_norm_fwd[b] = new MsgHandle**[maxNface];
mh_send_norm_back[b] = new MsgHandle**[maxNface];
mh_recv_norm_fwd[b] = new MsgHandle**[maxNface];
mh_recv_norm_back[b] = new MsgHandle**[maxNface];
}
#endif
} // loop over b
for (int j=0; j<maxNface; j++) {
for (int b=0; b<2; ++b) {
mh_send_fwd[b][j] = new MsgHandle*[2*nDimComms];
mh_send_back[b][j] = new MsgHandle*[2*nDimComms];
mh_recv_fwd[b][j] = new MsgHandle*[nDimComms];
mh_recv_back[b][j] = new MsgHandle*[nDimComms];
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) {
mh_send_norm_fwd[b][j] = new MsgHandle*[2*nDimComms];
mh_send_norm_back[b][j] = new MsgHandle*[2*nDimComms];
mh_recv_norm_fwd[b][j] = new MsgHandle*[nDimComms];
mh_recv_norm_back[b][j] = new MsgHandle*[nDimComms];
}
#endif
} // loop over b
checkCudaError();
for (int i=0; i<nDimComms; i++) {
if (!commDimPartitioned(i)) continue;
#ifdef GPU_COMMS
size_t nbytes_Nface = surfaceCB[i]*Ndof*precision*(j+1);
size_t nbytes_Nface_norm = surfaceCB[i]*(j+1)*sizeof(float);
if (i != 3 || getKernelPackT() || getTwistPack()) {
#else
size_t nbytes_Nface = (nbytes[i] / maxNface) * (j+1);
#endif
for (int b=0; b<2; ++b) {
mh_send_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_fwd_face[b][i], i, +1, nbytes_Nface) : NULL;
mh_send_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_back_face[b][i], i, -1, nbytes_Nface) : NULL;
mh_send_fwd[b][j][2*i+1] = mh_send_fwd[b][j][2*i]; // alias pointers
mh_send_back[b][j][2*i+1] = mh_send_back[b][j][2*i]; // alias pointers
}
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) {
for (int b=0; b<2; ++b) {
mh_send_norm_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_fwd_norm_face[b][i], i, +1, nbytes_Nface_norm) : NULL;
mh_send_norm_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(my_back_norm_face[b][i], i, -1, nbytes_Nface_norm) : NULL;
mh_send_norm_fwd[b][j][2*i+1] = mh_send_norm_fwd[b][j][2*i];
mh_send_norm_back[b][j][2*i+1] = mh_send_norm_back[b][j][2*i];
}
}
} else if (this->TwistFlavor() == QUDA_TWIST_NONDEG_DOUBLET) {
errorQuda("GPU_COMMS for non-degenerate doublet only supported with time-dimension kernel packing enabled.");
} else {
/*
use a strided communicator, here we can't really use
the previously declared my_fwd_face and my_back_face
pointers since they don't really map 1-to-1 so let's
just compute the required base pointers and pass these
directly into the communicator construction
*/
int Nblocks = Ndof / Nvec(); // number of Nvec buffers we have
// start of last time slice chunk we are sending forwards
int endOffset = (volume - (j+1)*ghostFace[i]);
size_t offset[4];
void *base[4];
if (nSpin == 1) { // staggered is invariant with dagger
offset[2*0 + 0] = 0;
offset[2*1 + 0] = endOffset;
offset[2*0 + 1] = offset[2*0 + 0];
offset[2*1 + 1] = offset[2*1 + 0];
} else if (nSpin == 4) {
// !dagger: send last components backwards, send first components forwards
offset[2*0 + 0] = Nblocks*stride;
offset[2*1 + 0] = endOffset;
// dagger: send first components backwards, send last components forwards
offset[2*0 + 1] = 0;
offset[2*1 + 1] = Nblocks*stride + endOffset;
} else {
errorQuda("Unsupported number of spin components");
}
for (int k=0; k<4; k++) {
base[k] = static_cast<char*>(v) + offset[k]*Nvec()*precision; // total offset in bytes
}
size_t blksize = (j+1)*ghostFace[i]*Nvec()*precision; // (j+1) is number of faces
size_t Stride = stride*Nvec()*precision;
if (blksize * Nblocks != nbytes_Nface)
errorQuda("Total strided message size does not match expected size");
//printf("%d strided sends with Nface=%d Nblocks=%d blksize=%d Stride=%d\n", i, j+1, Nblocks, blksize, Stride);
for (int b=0; b<2; ++b) {
// only allocate a communicator for the present face (this needs cleaned up)
mh_send_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[2], i, +1, blksize, Nblocks, Stride) : NULL;
mh_send_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[0], i, -1, blksize, Nblocks, Stride) : NULL;
if (nSpin ==4) { // dagger communicators
mh_send_fwd[b][j][2*i+1] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[3], i, +1, blksize, Nblocks, Stride) : NULL;
mh_send_back[b][j][2*i+1] = (j+1 == nFace) ? comm_declare_strided_send_relative(base[1], i, -1, blksize, Nblocks, Stride) : NULL;
} else {
mh_send_fwd[b][j][2*i+1] = mh_send_fwd[b][j][2*i+0];
mh_send_back[b][j][2*i+1] = mh_send_back[b][j][2*i+0];
}
} // loop over b
if (precision == QUDA_HALF_PRECISION) {
int Nt_minus1_offset = (volume - nFace*ghostFace[3]); // The space-time coordinate of the start of the last time slice
void *norm_fwd = static_cast<float*>(norm) + Nt_minus1_offset;
void *norm_back = norm; // the first time slice has zero offset
for (int b=0; b<2; ++b) {
mh_send_norm_fwd[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(norm_fwd, i, +1, surfaceCB[i]*(j+1)*sizeof(float)) : NULL;
mh_send_norm_back[b][j][2*i+0] = (j+1 == nFace) ? comm_declare_send_relative(norm_back, i, -1, surfaceCB[i]*(j+1)*sizeof(float)) : NULL;
mh_send_norm_fwd[b][j][2*i+1] = mh_send_norm_fwd[b][j][2*i];
mh_send_norm_back[b][j][2*i+1] = mh_send_norm_back[b][j][2*i];
}
}
}
if (precision == QUDA_HALF_PRECISION) {
for (int b=0; b<2; ++b) {
mh_recv_norm_fwd[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_fwd_norm_face[b][i], i, +1, nbytes_Nface_norm) : NULL;
mh_recv_norm_back[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_back_norm_face[b][i], i, -1, nbytes_Nface_norm) : NULL;
}
}
#endif // GPU_COMMS
for (int b=0; b<2; ++b) {
mh_recv_fwd[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_fwd_face[b][i], i, +1, nbytes_Nface) : NULL;
mh_recv_back[b][j][i] = (j+1 == nFace) ? comm_declare_receive_relative(from_back_face[b][i], i, -1, nbytes_Nface) : NULL;
}
} // loop over dimension
}
bufferMessageHandler = bufferPinnedResizeCount;
initComms = true;
nFaceComms = nFace;
checkCudaError();
}
if (ghost_field_reset) destroyIPCComms();
createIPCComms();
}
void cudaColorSpinorField::createIPCComms() {
if ( initIPCComms && !ghost_field_reset ) return;
if (!initComms) errorQuda("Can only be called after create comms");
if ( (!ghost_field[0] || !ghost_field[1]) && comm_size() > 1) errorQuda("ghost_field appears not to be allocated");
// handles for obtained ghost pointers
cudaIpcMemHandle_t ipcRemoteGhostDestHandle[2][2][QUDA_MAX_DIM];
for (int b=0; b<2; b++) {
for (int dim=0; dim<4; ++dim) {
if (comm_dim(dim)==1) continue;
for (int dir=0; dir<2; ++dir) {
MsgHandle* sendHandle = NULL;
MsgHandle* receiveHandle = NULL;
int disp = (dir == 1) ? +1 : -1;
// first set up receive
if (comm_peer2peer_enabled(1-dir,dim)) {
receiveHandle = comm_declare_receive_relative(&ipcRemoteGhostDestHandle[b][1-dir][dim],
dim, -disp,
sizeof(ipcRemoteGhostDestHandle[b][1-dir][dim]));
}
// now send
if (comm_peer2peer_enabled(dir,dim)) {
cudaIpcMemHandle_t ipcLocalGhostDestHandle;
cudaIpcGetMemHandle(&ipcLocalGhostDestHandle, ghost_field[b]);
sendHandle = comm_declare_send_relative(&ipcLocalGhostDestHandle,
dim, disp,
sizeof(ipcLocalGhostDestHandle));
}
if (receiveHandle) comm_start(receiveHandle);
if (sendHandle) comm_start(sendHandle);
if (receiveHandle) comm_wait(receiveHandle);
if (sendHandle) comm_wait(sendHandle);
if (sendHandle) comm_free(sendHandle);
if (receiveHandle) comm_free(receiveHandle);
}
}
checkCudaError();
// open the remote memory handles and set the send ghost pointers
for (int dim=0; dim<4; ++dim) {
if (comm_dim(dim)==1) continue;
const int num_dir = (comm_dim(dim) == 2) ? 1 : 2;
for (int dir=0; dir<num_dir; ++dir) {
if (!comm_peer2peer_enabled(dir,dim)) continue;
void **ghostDest = (dir==0) ? (&backGhostSendDest[b][dim]) : &(fwdGhostSendDest[b][dim]);
cudaIpcOpenMemHandle(ghostDest, ipcRemoteGhostDestHandle[b][dir][dim],
cudaIpcMemLazyEnablePeerAccess);
}
if (num_dir == 1) fwdGhostSendDest[b][dim] = backGhostSendDest[b][dim];
}
} // buffer index
checkCudaError();
// handles for obtained events
cudaIpcEventHandle_t ipcRemoteEventHandle[2][2][QUDA_MAX_DIM];
// Note that no b index is necessary here
// Now communicate the event handles
for (int dim=0; dim<4; ++dim) {
if (comm_dim(dim)==1) continue;
for (int dir=0; dir<2; ++dir) {
for (int b=0; b<2; b++) {
MsgHandle* sendHandle = NULL;
MsgHandle* receiveHandle = NULL;
int disp = (dir == 1) ? +1 : -1;
// first set up receive
if (comm_peer2peer_enabled(1-dir,dim)) {
receiveHandle = comm_declare_receive_relative(&ipcRemoteEventHandle[b][1-dir][dim], dim, -disp,
sizeof(ipcRemoteEventHandle[b][1-dir][dim]));
}
// now send
if (comm_peer2peer_enabled(dir,dim)) {
cudaEventCreate(&ipcCopyEvent[b][dir][dim], cudaEventDisableTiming | cudaEventInterprocess);
cudaIpcEventHandle_t ipcLocalEventHandle;
cudaIpcGetEventHandle(&ipcLocalEventHandle, ipcCopyEvent[b][dir][dim]);
sendHandle = comm_declare_send_relative(&ipcLocalEventHandle, dim, disp,
sizeof(ipcLocalEventHandle));
}
if (receiveHandle) comm_start(receiveHandle);
if (sendHandle) comm_start(sendHandle);
if (receiveHandle) comm_wait(receiveHandle);
if (sendHandle) comm_wait(sendHandle);
if (sendHandle) comm_free(sendHandle);
if (receiveHandle) comm_free(receiveHandle);
} // buffer index
}
}
checkCudaError();
for (int dim=0; dim<4; ++dim) {
if (comm_dim(dim)==1) continue;
for (int dir=0; dir<2; ++dir) {
if (!comm_peer2peer_enabled(dir,dim)) continue;
for (int b=0; b<2; b++) {
cudaIpcOpenEventHandle(&(ipcRemoteCopyEvent[b][dir][dim]), ipcRemoteEventHandle[b][dir][dim]);
}
}
}
// Create message handles for IPC synchronization
for (int dim=0; dim<4; ++dim) {
if (comm_dim(dim)==1) continue;
if (comm_peer2peer_enabled(1,dim)) {
for (int b=0; b<2; b++) {
// send to processor in forward direction
mh_send_p2p_fwd[b][dim] = comm_declare_send_relative(&buffer_send_p2p_fwd[b][dim], dim, +1, sizeof(int));
// receive from processor in forward direction
mh_recv_p2p_fwd[b][dim] = comm_declare_receive_relative(&buffer_recv_p2p_fwd[b][dim], dim, +1, sizeof(int));
}
}
if (comm_peer2peer_enabled(0,dim)) {
for (int b=0; b<2; b++) {
// send to processor in backward direction
mh_send_p2p_back[b][dim] = comm_declare_send_relative(&buffer_recv_p2p_back[b][dim], dim, -1, sizeof(int));
// receive from processor in backward direction
mh_recv_p2p_back[b][dim] = comm_declare_receive_relative(&buffer_recv_p2p_back[b][dim], dim, -1, sizeof(int));
}
}
}
checkCudaError();
initIPCComms = true;
ghost_field_reset = false;
}
void cudaColorSpinorField::destroyIPCComms() {
if (!initIPCComms) return;
checkCudaError();
for (int dim=0; dim<4; ++dim) {
if (comm_dim(dim)==1) continue;
const int num_dir = (comm_dim(dim) == 2) ? 1 : 2;
for (int b=0; b<2; b++) {
if (comm_peer2peer_enabled(1,dim)) {
comm_free(mh_send_p2p_fwd[b][dim]);
comm_free(mh_recv_p2p_fwd[b][dim]);
cudaEventDestroy(ipcCopyEvent[b][1][dim]);
// only close this handle if it doesn't alias the back ghost
if (num_dir == 2) cudaIpcCloseMemHandle(fwdGhostSendDest[b][dim]);
}
if (comm_peer2peer_enabled(0,dim)) {
comm_free(mh_send_p2p_back[b][dim]);
comm_free(mh_recv_p2p_back[b][dim]);
cudaEventDestroy(ipcCopyEvent[b][0][dim]);
cudaIpcCloseMemHandle(backGhostSendDest[b][dim]);
}
} // buffer
} // iterate over dim
checkCudaError();
initIPCComms = false;
}
void cudaColorSpinorField::destroyComms()
{
if (initComms) {
for (int b=0; b<2; ++b) {
for (int j=0; j<maxNface; j++) {
for (int i=0; i<nDimComms; i++) {
if (commDimPartitioned(i)) {
if (mh_recv_fwd[b][j][i]) comm_free(mh_recv_fwd[b][j][i]);
if (mh_recv_fwd[b][j][i]) comm_free(mh_recv_back[b][j][i]);
if (mh_send_fwd[b][j][2*i]) comm_free(mh_send_fwd[b][j][2*i]);
if (mh_send_back[b][j][2*i]) comm_free(mh_send_back[b][j][2*i]);
// only in a special case are these not aliasing pointers
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) {
if (mh_recv_norm_fwd[b][j][i]) comm_free(mh_recv_norm_fwd[b][j][i]);
if (mh_recv_norm_back[b][j][i]) comm_free(mh_recv_norm_back[b][j][i]);
if (mh_send_norm_fwd[b][j][2*i]) comm_free(mh_send_norm_fwd[b][j][2*i]);
if (mh_send_norm_back[b][j][2*i]) comm_free(mh_send_norm_back[b][j][2*i]);
}
if (i == 3 && !getKernelPackT() && nSpin == 4) {
if (mh_send_fwd[b][j][2*i+1]) comm_free(mh_send_fwd[b][j][2*i+1]);
if (mh_send_back[b][j][2*i+1]) comm_free(mh_send_back[b][j][2*i+1]);
}
#endif // GPU_COMMS
}
}
delete []mh_recv_fwd[b][j];
delete []mh_recv_back[b][j];
delete []mh_send_fwd[b][j];
delete []mh_send_back[b][j];
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) {
delete []mh_recv_norm_fwd[b][j];
delete []mh_recv_norm_back[b][j];
delete []mh_send_norm_fwd[b][j];
delete []mh_send_norm_back[b][j];
}
#endif
}
delete []mh_recv_fwd[b];
delete []mh_recv_back[b];
delete []mh_send_fwd[b];
delete []mh_send_back[b];
for (int i=0; i<nDimComms; i++) {
my_fwd_face[b][i] = NULL;
my_back_face[b][i] = NULL;
from_fwd_face[b][i] = NULL;
from_back_face[b][i] = NULL;
}
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) {
delete []mh_recv_norm_fwd[b];
delete []mh_recv_norm_back[b];
delete []mh_send_norm_fwd[b];
delete []mh_send_norm_back[b];
}
for (int i=0; i<nDimComms; i++) {
my_fwd_norm_face[b][i] = NULL;
my_back_norm_face[b][i] = NULL;
from_fwd_norm_face[b][i] = NULL;
from_back_norm_face[b][i] = NULL;
}
#endif
} // loop over b
initComms = false;
checkCudaError();
}
}
void cudaColorSpinorField::streamInit(cudaStream_t *stream_p) {
stream = stream_p;
}
void cudaColorSpinorField::pack(int nFace, int parity, int dagger, cudaStream_t *stream_p,
bool zero_copy, double a, double b) {
createComms(nFace); // must call this first
stream = stream_p;
const int dim=-1; // pack all partitioned dimensions
if (zero_copy) {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[0], my_face_d[bufferIndex], true, a, b);
} else {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[Nstream-1], 0, false, a, b);
}
}
void cudaColorSpinorField::pack(int nFace, int parity, int dagger, int stream_idx,
bool zeroCopyPack, double a, double b) {
createComms(nFace); // must call this first
const int dim=-1; // pack all partitioned dimensions
if (zeroCopyPack) {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[stream_idx], my_face_d[bufferIndex], true, a, b);
} else {
packGhost(nFace, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[stream_idx], 0, false, a, b);
}
}
void cudaColorSpinorField::packExtended(const int nFace, const int R[], const int parity,
const int dagger, const int dim,
cudaStream_t *stream_p, const bool zero_copy) {
createComms(nFace); // must call this first
stream = stream_p;
if (zero_copy) {
packGhostExtended(nFace, R, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[0], my_face_d[bufferIndex], true);
}else{
packGhostExtended(nFace, R, (QudaParity)parity, dim, QUDA_BOTH_DIRS, dagger, &stream[Nstream-1], 0, false);
}
}
void cudaColorSpinorField::gather(int nFace, int dagger, int dir, cudaStream_t* stream_p)
{
int dim = dir/2;
// If stream_p != 0, use pack_stream, else use the stream array
cudaStream_t *pack_stream = (stream_p) ? stream_p : stream+dir;
if (dir%2 == 0) {
// backwards copy to host
if (comm_peer2peer_enabled(0,dim)) return;
sendGhost(my_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, pack_stream);
} else {
// forwards copy to host
if (comm_peer2peer_enabled(1,dim)) return;
sendGhost(my_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, pack_stream);
}
}
void cudaColorSpinorField::recvStart(int nFace, int dir, int dagger, cudaStream_t* stream_p) {
int dim = dir/2;
if (!commDimPartitioned(dim)) return;
if (dir%2 == 0) { // sending backwards
if (comm_peer2peer_enabled(1,dim)) {
// receive from the processor in the +1 direction
comm_start(mh_recv_p2p_fwd[bufferIndex][dim]);
} else {
// Prepost receive
comm_start(mh_recv_fwd[bufferIndex][nFace-1][dim]);
}
} else { //sending forwards
// Prepost receive
if (comm_peer2peer_enabled(0,dim)) {
comm_start(mh_recv_p2p_back[bufferIndex][dim]);
} else {
comm_start(mh_recv_back[bufferIndex][nFace-1][dim]);
}
}
}
void cudaColorSpinorField::sendStart(int nFace, int d, int dagger, cudaStream_t* stream_p) {
int dim = d/2;
int dir = d%2;
if (!commDimPartitioned(dim)) return;
int Nvec = (nSpin == 1 || precision == QUDA_DOUBLE_PRECISION) ? 2 : 4;
int Nint = (nColor * nSpin * 2)/(nSpin == 4 ? 2 : 1); // (spin proj.) degrees of freedom
int Npad = Nint/Nvec;
if (!comm_peer2peer_enabled(dir,dim)) {
if (dir == 0) comm_start(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
else comm_start(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
} else { // doing peer-to-peer
cudaStream_t *copy_stream = (stream_p) ? stream_p : stream + d;
// all goes here
void* ghost_dst = (dir == 0) ? (void*)((char*)(backGhostSendDest[bufferIndex][dim]) + precision*ghostOffset[dim][1]) :
(void*)((char*)(fwdGhostSendDest[bufferIndex][dim]) + precision*ghostOffset[dim][0]);
void *ghost_norm_dst = (dir == 0) ? static_cast<char*>(backGhostSendDest[bufferIndex][dim]) + QUDA_SINGLE_PRECISION*ghostNormOffset[dim][1] :
static_cast<char*>(fwdGhostSendDest[bufferIndex][dim]) + QUDA_SINGLE_PRECISION*ghostNormOffset[dim][0];
if (dim != 3 || getKernelPackT()) {
cudaMemcpyAsync(ghost_dst,
dir == 0 ? backGhostFaceBuffer[bufferIndex][dim] : fwdGhostFaceBuffer[bufferIndex][dim],
ghost_face_bytes[dim],
cudaMemcpyDeviceToDevice,
*copy_stream); // copy to forward processor
} else if (this->TwistFlavor() != QUDA_TWIST_NONDEG_DOUBLET) {
const int x4 = nDim==5 ? x[4] : 1;
const int Nt_minus_offset = (volumeCB - nFace*ghostFace[3])/x4;
int offset = 0;
if (nSpin == 1) {
offset = (dir == 0) ? 0 : Nt_minus_offset;
} else if (nSpin == 4) {
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false;
if (dir == 1) upper = !upper;
int lower_spin_offset = Npad*stride;
if (dir == 0) {
offset = upper ? 0 : lower_spin_offset;
} else {
offset = (upper) ? Nt_minus_offset : lower_spin_offset + Nt_minus_offset;
}
}
size_t len = nFace*(ghostFace[3]/x4)*Nvec*precision;
size_t dpitch = x4*len;
size_t spitch = stride*Nvec*precision;
for (int s=0; s<x4; s++) {
void *dst = (char*)ghost_dst + s*len;
void *src = (char*)v + (offset + s*(volumeCB/x4))*Nvec*precision;
// start the copy
cudaMemcpy2DAsync(dst, dpitch, src, spitch, len, Npad, cudaMemcpyDeviceToDevice, *copy_stream);
if (precision == QUDA_HALF_PRECISION) {
size_t len = nFace*(ghostFace[3]/x4)*sizeof(float);
int norm_offset = (dir == 0) ? 0 : Nt_minus_offset*sizeof(float);
void *dst = (char*)ghost_norm_dst + s*len;
void *src = static_cast<char*>(norm) + norm_offset + s*(volumeCB/x4)*sizeof(float);
cudaMemcpyAsync(dst, src, len, cudaMemcpyDeviceToDevice, *copy_stream);
}
}
} else { // twisted doublet
int flavorVolume = volume / 2;
int flavorTFace = ghostFace[3] / 2;
int flavor1_Nt_minus1_offset = (flavorVolume - flavorTFace);
int flavor2_Nt_minus1_offset = (volume - flavorTFace);
int flavor1_offset = 0;
int flavor2_offset = 0;
// !dagger: send lower components backwards, send upper components forwards
// dagger: send upper components backwards, send lower components forwards
bool upper = dagger ? true : false; // Fwd is !Back
if (dir == 1) upper = !upper;
int lower_spin_offset = Npad*stride;//ndeg tm: stride=2*flavor_volume+pad
if (upper) {
flavor1_offset = (dir == 0 ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = (dir == 0 ? flavorVolume : flavor2_Nt_minus1_offset);
}else{
flavor1_offset = lower_spin_offset + (dir == 0 ? 0 : flavor1_Nt_minus1_offset);
flavor2_offset = lower_spin_offset + (dir == 0 ? flavorVolume : flavor2_Nt_minus1_offset);
}
// QUDA Memcpy NPad's worth.
// -- Dest will point to the right beginning PAD.
// -- Each Pad has size Nvec*Vsh Floats.
// -- There is Nvec*Stride Floats from the start of one PAD to the start of the next
void *src = static_cast<char*>(v) + flavor1_offset*Nvec*precision;
size_t len = flavorTFace*Nvec*precision;
size_t spitch = stride*Nvec*precision;//ndeg tm: stride=2*flavor_volume+pad
size_t dpitch = 2*len;
cudaMemcpy2DAsync(ghost_dst, dpitch, src, spitch, len, Npad, cudaMemcpyDeviceToDevice, *copy_stream);
src = static_cast<char*>(v) + flavor2_offset*Nvec*precision;
cudaMemcpy2DAsync(static_cast<char*>(ghost_dst)+len, dpitch, src, spitch, len, Npad, cudaMemcpyDeviceToDevice, *copy_stream);
if (precision == QUDA_HALF_PRECISION) {
int norm_offset = (dir == 0) ? 0 : flavor1_Nt_minus1_offset*sizeof(float);
void *src = static_cast<char*>(norm) + norm_offset;
size_t dpitch = flavorTFace*sizeof(float);
size_t spitch = flavorVolume*sizeof(float);
cudaMemcpy2DAsync(ghost_norm_dst, dpitch, src, spitch, flavorTFace*sizeof(float), 2, cudaMemcpyDeviceToDevice, *copy_stream);
}
}
if (dir == 0) {
// record the event
cudaEventRecord(ipcCopyEvent[bufferIndex][0][dim], *copy_stream);
// send to the propcessor in the -1 direction
comm_start(mh_send_p2p_back[bufferIndex][dim]);
} else {
cudaEventRecord(ipcCopyEvent[bufferIndex][1][dim], *copy_stream);
// send to the processor in the +1 direction
comm_start(mh_send_p2p_fwd[bufferIndex][dim]);
}
}
}
void cudaColorSpinorField::commsStart(int nFace, int dir, int dagger, cudaStream_t* stream_p) {
recvStart(nFace, dir, dagger, stream_p);
sendStart(nFace, dir, dagger, stream_p);
}
bool cudaColorSpinorField::ipcCopyComplete(int dir, int dim)
{
return (cudaSuccess == cudaEventQuery(ipcCopyEvent[bufferIndex][dir][dim]) ? true : false);
}
bool cudaColorSpinorField::ipcRemoteCopyComplete(int dir, int dim)
{
return (cudaSuccess == cudaEventQuery(ipcRemoteCopyEvent[bufferIndex][dir][dim]) ? true : false);
}
static bool complete_recv_fwd[QUDA_MAX_DIM] = { };
static bool complete_recv_back[QUDA_MAX_DIM] = { };
static bool complete_send_fwd[QUDA_MAX_DIM] = { };
static bool complete_send_back[QUDA_MAX_DIM] = { };
int cudaColorSpinorField::commsQuery(int nFace, int dir, int dagger, cudaStream_t *stream_p) {
int dim = dir/2;
if (!commDimPartitioned(dim)) return 0;
if (dir%2==0) {
if (comm_peer2peer_enabled(1,dim)) {
if (!complete_recv_fwd[dim]) complete_recv_fwd[dim] = comm_query(mh_recv_p2p_fwd[bufferIndex][dim]);
} else {
if (!complete_recv_fwd[dim]) complete_recv_fwd[dim] = comm_query(mh_recv_fwd[bufferIndex][nFace-1][dim]);
}
if (comm_peer2peer_enabled(0,dim)) {
if (!complete_send_back[dim]) complete_send_back[dim] = comm_query(mh_send_p2p_back[bufferIndex][dim]);
} else {
if (!complete_send_back[dim]) complete_send_back[dim] = comm_query(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
}
if (complete_recv_fwd[dim] && complete_send_back[dim]) {
complete_recv_fwd[dim] = false;
complete_send_back[dim] = false;
return 1;
}
} else { // dir%2 == 1
if (comm_peer2peer_enabled(0,dim)) {
if (!complete_recv_back[dim]) complete_recv_back[dim] = comm_query(mh_recv_p2p_back[bufferIndex][dim]);
} else {
if (!complete_recv_back[dim]) complete_recv_back[dim] = comm_query(mh_recv_back[bufferIndex][nFace-1][dim]);
}
if (comm_peer2peer_enabled(1,dim)) {
if (!complete_send_fwd[dim]) complete_send_fwd[dim] = comm_query(mh_send_p2p_fwd[bufferIndex][dim]);
} else {
if (!complete_send_fwd[dim]) complete_send_fwd[dim] = comm_query(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
}
if (complete_recv_back[dim] && complete_send_fwd[dim]) {
complete_recv_back[dim] = false;
complete_send_fwd[dim] = false;
return 1;
}
}
return 0;
}
void cudaColorSpinorField::commsWait(int nFace, int dir, int dagger, cudaStream_t *stream_p) {
int dim = dir / 2;
if (!commDimPartitioned(dim)) return;
if (dir%2==0) {
if (comm_peer2peer_enabled(1,dim)) {
comm_wait(mh_recv_p2p_fwd[bufferIndex][dim]);
cudaEventSynchronize(ipcRemoteCopyEvent[bufferIndex][1][dim]);
} else {
comm_wait(mh_recv_fwd[bufferIndex][nFace-1][dim]);
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) comm_wait(mh_recv_norm_fwd[bufferIndex][nFace-1][dim]);
#endif
}
if (comm_peer2peer_enabled(0,dim)) {
comm_wait(mh_send_p2p_back[bufferIndex][dim]);
cudaEventSynchronize(ipcCopyEvent[bufferIndex][0][dim]);
} else {
comm_wait(mh_send_back[bufferIndex][nFace-1][2*dim+dagger]);
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) comm_wait(mh_send_norm_back[bufferIndex][nFace-1][2*dim+dagger]);
#endif
}
} else {
if (comm_peer2peer_enabled(0,dim)) {
comm_wait(mh_recv_p2p_back[bufferIndex][dim]);
cudaEventSynchronize(ipcRemoteCopyEvent[bufferIndex][0][dim]);
} else {
comm_wait(mh_recv_back[bufferIndex][nFace-1][dim]);
#ifdef GPU_COMMS
comm_wait(mh_recv_norm_back[bufferIndex][nFace-1][dim]);
#endif
}
if (comm_peer2peer_enabled(1,dim)) {
comm_wait(mh_send_p2p_fwd[bufferIndex][dim]);
cudaEventSynchronize(ipcCopyEvent[bufferIndex][1][dim]);
} else {
comm_wait(mh_send_fwd[bufferIndex][nFace-1][2*dim+dagger]);
#ifdef GPU_COMMS
if (precision == QUDA_HALF_PRECISION) comm_wait(mh_send_norm_fwd[bufferIndex][nFace-1][2*dim+dagger]);
#endif
}
}
return;
}
const cudaEvent_t& cudaColorSpinorField::getIPCRemoteCopyEvent(int dir, int dim) const {
return ipcRemoteCopyEvent[bufferIndex][dir][dim];
}
void cudaColorSpinorField::scatter(int nFace, int dagger, int dir, cudaStream_t* stream_p)
{
int dim = dir/2;
if (!commDimPartitioned(dim)) return;
if (dir%2==0) {// receive from forwards
if (comm_peer2peer_enabled(1,dim)) return;
unpackGhost(from_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, stream_p);
} else { // receive from backwards
if (comm_peer2peer_enabled(0,dim)) return;
unpackGhost(from_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, stream_p);
}
}
void cudaColorSpinorField::scatter(int nFace, int dagger, int dir)
{
int dim = dir/2;
if (!commDimPartitioned(dim)) return;
if (dir%2==0) {// receive from forwards
if (comm_peer2peer_enabled(1,dim)) return;
unpackGhost(from_fwd_face[bufferIndex][dim], nFace, dim, QUDA_FORWARDS, dagger, &stream[2*dim+0]);
} else { // receive from backwards
if (comm_peer2peer_enabled(0,dim)) return;
unpackGhost(from_back_face[bufferIndex][dim], nFace, dim, QUDA_BACKWARDS, dagger, &stream[2*dim+1]);
}
}
void cudaColorSpinorField::scatterExtended(int nFace, int parity, int dagger, int dir)
{
bool zero_copy = false;
int dim = dir/2;
if (!commDimPartitioned(dim)) return;
if (dir%2==0) {// receive from forwards
unpackGhostExtended(from_fwd_face[bufferIndex][dim], nFace, static_cast<QudaParity>(parity), dim, QUDA_FORWARDS, dagger, &stream[2*dim/*+0*/], zero_copy);
} else { // receive from backwards
unpackGhostExtended(from_back_face[bufferIndex][dim], nFace, static_cast<QudaParity>(parity), dim, QUDA_BACKWARDS, dagger, &stream[2*dim/*+1*/], zero_copy);
}
}
void cudaColorSpinorField::exchangeGhost(QudaParity parity, int dagger) const {
void **send = static_cast<void**>(safe_malloc(nDimComms * 2 * sizeof(void*)));
// allocate ghost buffer if not yet allocated
allocateGhostBuffer(send, ghost_fixme);
genericPackGhost(send, *this, parity, dagger);
int nFace = (nSpin == 1) ? 3 : 1;
exchange(ghost_fixme, send, nFace);
host_free(send);
}
std::ostream& operator<<(std::ostream &out, const cudaColorSpinorField &a) {
out << (const ColorSpinorField&)a;
out << "v = " << a.v << std::endl;
out << "norm = " << a.norm << std::endl;
out << "alloc = " << a.alloc << std::endl;
out << "init = " << a.init << std::endl;
return out;
}
//! for composite fields:
cudaColorSpinorField& cudaColorSpinorField::Component(const int idx) const {
if (this->IsComposite()) {
if (idx < this->CompositeDim()) {//setup eigenvector form the set
return *(dynamic_cast<cudaColorSpinorField*>(components[idx]));
}
else{
errorQuda("Incorrect component index...");
}
}
errorQuda("Cannot get requested component");
exit(-1);
}
//copyCuda currently cannot not work with set of spinor fields..
void cudaColorSpinorField::CopySubset(cudaColorSpinorField &dst, const int range, const int first_element) const{
#if 0
if (first_element < 0) errorQuda("\nError: trying to set negative first element.\n");
if (siteSubset == QUDA_PARITY_SITE_SUBSET && this->EigvId() == -1) {
if (first_element == 0 && range == this->EigvDim())
{
if (range != dst.EigvDim())errorQuda("\nError: eigenvector range to big.\n");
checkField(dst, *this);
copyCuda(dst, *this);
}
else if ((first_element+range) < this->EigvDim())
{//setup eigenvector subset
cudaColorSpinorField *eigv_subset;
ColorSpinorParam param;
param.nColor = nColor;
param.nSpin = nSpin;
param.twistFlavor = twistFlavor;
param.precision = precision;
param.nDim = nDim;
param.pad = pad;
param.siteSubset = siteSubset;
param.siteOrder = siteOrder;
param.fieldOrder = fieldOrder;
param.gammaBasis = gammaBasis;
memcpy(param.x, x, nDim*sizeof(int));
param.create = QUDA_REFERENCE_FIELD_CREATE;
param.eigv_dim = range;
param.eigv_id = -1;
param.v = (void*)((char*)v + first_element*eigv_bytes);
param.norm = (void*)((char*)norm + first_element*eigv_norm_bytes);
eigv_subset = new cudaColorSpinorField(param);
//Not really needed:
eigv_subset->eigenvectors.reserve(param.eigv_dim);
for (int id = first_element; id < (first_element+range); id++)
{
param.eigv_id = id;
eigv_subset->eigenvectors.push_back(new cudaColorSpinorField(*this, param));
}
checkField(dst, *eigv_subset);
copyCuda(dst, *eigv_subset);
delete eigv_subset;
} else {
errorQuda("Incorrect eigenvector dimension...");
}
} else{
errorQuda("Eigenvector must be a parity spinor");
exit(-1);
}
#endif
}
void cudaColorSpinorField::getTexObjectInfo() const
{
#ifdef USE_TEXTURE_OBJECTS
printfQuda("\nPrint texture info for the field:\n");
std::cout << *this;
cudaResourceDesc resDesc;
//memset(&resDesc, 0, sizeof(resDesc));
cudaGetTextureObjectResourceDesc(&resDesc, this->Tex());
printfQuda("\nDevice pointer: %p\n", resDesc.res.linear.devPtr);
printfQuda("\nVolume (in bytes): %lu\n", resDesc.res.linear.sizeInBytes);
if (resDesc.resType == cudaResourceTypeLinear) printfQuda("\nResource type: linear \n");
#endif
}
void cudaColorSpinorField::Source(const QudaSourceType sourceType, const int st, const int s, const int c) {
ColorSpinorParam param(*this);
param.fieldOrder = QUDA_SPACE_SPIN_COLOR_FIELD_ORDER;
param.location = QUDA_CPU_FIELD_LOCATION;
param.create = QUDA_NULL_FIELD_CREATE;
cpuColorSpinorField tmp(param);
tmp.Source(sourceType, st, s, c);
*this = tmp;
}
} // namespace quda
|
dd7035192db258825187065da474f0a699e77735.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define STEPS 2000027648
#define BLOCKS 512
#define THREADS 128
int threadidx;
double pi = 0;
// Kernel
__global__ void pi_calculation(double* sum, int nsteps, double base, int nthreads, int nblocks)
{
int i;
double x;
int idx = blockIdx.x * blockDim.x + threadIdx.x; // Calculate index for each thread
for (i = idx; i < nsteps; i += nthreads * nblocks)
{
x = (i + 0.5) * base;
sum[idx] += 4.0 / (1.0 + x * x); //Save result to device memory
}
}
int main(void)
{
dim3 dimGrid(BLOCKS, 1, 1); // Grid dimensions
dim3 dimBlock(THREADS, 1, 1); // Block dimensions
double *sum; // Pointer to host & device arrays
double base = 1.0 / STEPS; // base size
size_t size = BLOCKS * THREADS * sizeof(double); //Array memory size
// Memory allocation
hipMallocManaged(&sum, size); // Allocate array on device
// Initialize array in device to 0
hipMemset(sum, 0, size);
clock_t start, end;
start = clock();
// Launch Kernel
pi_calculation << <dimGrid, dimBlock >> > (sum, STEPS, base, THREADS, BLOCKS);
// Sync
hipDeviceSynchronize();
// Do the final reduction.
for (threadidx = 0; threadidx < THREADS * BLOCKS; threadidx++)
pi += sum[threadidx];
// Multiply by base
pi *= base;
end = clock();
// Output Results
printf("Result = %20.18lf (%ld)\n", pi, end - start);
// Cleanup
hipFree(sum);
return 0;
}
| dd7035192db258825187065da474f0a699e77735.cu | #include <stdio.h>
#define STEPS 2000027648
#define BLOCKS 512
#define THREADS 128
int threadidx;
double pi = 0;
// Kernel
__global__ void pi_calculation(double* sum, int nsteps, double base, int nthreads, int nblocks)
{
int i;
double x;
int idx = blockIdx.x * blockDim.x + threadIdx.x; // Calculate index for each thread
for (i = idx; i < nsteps; i += nthreads * nblocks)
{
x = (i + 0.5) * base;
sum[idx] += 4.0 / (1.0 + x * x); //Save result to device memory
}
}
int main(void)
{
dim3 dimGrid(BLOCKS, 1, 1); // Grid dimensions
dim3 dimBlock(THREADS, 1, 1); // Block dimensions
double *sum; // Pointer to host & device arrays
double base = 1.0 / STEPS; // base size
size_t size = BLOCKS * THREADS * sizeof(double); //Array memory size
// Memory allocation
cudaMallocManaged(&sum, size); // Allocate array on device
// Initialize array in device to 0
cudaMemset(sum, 0, size);
clock_t start, end;
start = clock();
// Launch Kernel
pi_calculation << <dimGrid, dimBlock >> > (sum, STEPS, base, THREADS, BLOCKS);
// Sync
cudaDeviceSynchronize();
// Do the final reduction.
for (threadidx = 0; threadidx < THREADS * BLOCKS; threadidx++)
pi += sum[threadidx];
// Multiply by base
pi *= base;
end = clock();
// Output Results
printf("Result = %20.18lf (%ld)\n", pi, end - start);
// Cleanup
cudaFree(sum);
return 0;
}
|
54d485c21135bd097dc4abf630e4fc144473c1a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file GeoTrackView.test.cu
//---------------------------------------------------------------------------//
#include "GeoTrackView.test.hh"
#include <thrust/device_vector.h>
#include "base/KernelParamCalculator.cuda.hh"
#include "geometry/GeoTrackView.hh"
using thrust::raw_pointer_cast;
namespace celeritas_test
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
__global__ void vgg_test_kernel(const GeoParamsPointers shared,
const GeoStatePointers state,
const int size,
const VGGTestInit* start,
const int max_segments,
VolumeId* ids,
double* distances)
{
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= size)
return;
GeoTrackView geo(shared, state, tid);
geo = start[tid.get()];
for (int seg = 0; seg < max_segments; ++seg)
{
if (geo.is_outside())
break;
geo.find_next_step();
// Save current ID and distance to travel
ids[tid.get() * max_segments + seg] = geo.volume_id();
distances[tid.get() * max_segments + seg] = geo.next_step();
// Move next step
geo.move_next_step();
}
}
//---------------------------------------------------------------------------//
// TESTING INTERFACE
//---------------------------------------------------------------------------//
//! Run on device and return results
VGGTestOutput vgg_test(VGGTestInput input)
{
CELER_EXPECT(input.shared);
CELER_EXPECT(input.state);
CELER_EXPECT(input.init.size() == input.state.size);
CELER_EXPECT(input.max_segments > 0);
// Temporary device data for kernel
thrust::device_vector<VGGTestInit> init(input.init.begin(),
input.init.end());
thrust::device_vector<VolumeId> ids(input.init.size() * input.max_segments);
thrust::device_vector<double> distances(ids.size(), -1.0);
// Run kernel
celeritas::KernelParamCalculator calc_launch_params;
auto params = calc_launch_params(init.size());
hipLaunchKernelGGL(( vgg_test_kernel), dim3(params.grid_size), dim3(params.block_size), 0, 0,
input.shared,
input.state,
init.size(),
raw_pointer_cast(init.data()),
input.max_segments,
raw_pointer_cast(ids.data()),
raw_pointer_cast(distances.data()));
CELER_CUDA_CALL(hipDeviceSynchronize());
// Copy result back to CPU
VGGTestOutput result;
for (auto id : thrust::host_vector<VolumeId>(ids))
{
result.ids.push_back(id ? static_cast<int>(id.get()) : -1);
}
result.distances.resize(distances.size());
thrust::copy(distances.begin(), distances.end(), result.distances.begin());
return result;
}
//---------------------------------------------------------------------------//
} // namespace celeritas_test
| 54d485c21135bd097dc4abf630e4fc144473c1a0.cu | //---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file GeoTrackView.test.cu
//---------------------------------------------------------------------------//
#include "GeoTrackView.test.hh"
#include <thrust/device_vector.h>
#include "base/KernelParamCalculator.cuda.hh"
#include "geometry/GeoTrackView.hh"
using thrust::raw_pointer_cast;
namespace celeritas_test
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
__global__ void vgg_test_kernel(const GeoParamsPointers shared,
const GeoStatePointers state,
const int size,
const VGGTestInit* start,
const int max_segments,
VolumeId* ids,
double* distances)
{
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= size)
return;
GeoTrackView geo(shared, state, tid);
geo = start[tid.get()];
for (int seg = 0; seg < max_segments; ++seg)
{
if (geo.is_outside())
break;
geo.find_next_step();
// Save current ID and distance to travel
ids[tid.get() * max_segments + seg] = geo.volume_id();
distances[tid.get() * max_segments + seg] = geo.next_step();
// Move next step
geo.move_next_step();
}
}
//---------------------------------------------------------------------------//
// TESTING INTERFACE
//---------------------------------------------------------------------------//
//! Run on device and return results
VGGTestOutput vgg_test(VGGTestInput input)
{
CELER_EXPECT(input.shared);
CELER_EXPECT(input.state);
CELER_EXPECT(input.init.size() == input.state.size);
CELER_EXPECT(input.max_segments > 0);
// Temporary device data for kernel
thrust::device_vector<VGGTestInit> init(input.init.begin(),
input.init.end());
thrust::device_vector<VolumeId> ids(input.init.size() * input.max_segments);
thrust::device_vector<double> distances(ids.size(), -1.0);
// Run kernel
celeritas::KernelParamCalculator calc_launch_params;
auto params = calc_launch_params(init.size());
vgg_test_kernel<<<params.grid_size, params.block_size>>>(
input.shared,
input.state,
init.size(),
raw_pointer_cast(init.data()),
input.max_segments,
raw_pointer_cast(ids.data()),
raw_pointer_cast(distances.data()));
CELER_CUDA_CALL(cudaDeviceSynchronize());
// Copy result back to CPU
VGGTestOutput result;
for (auto id : thrust::host_vector<VolumeId>(ids))
{
result.ids.push_back(id ? static_cast<int>(id.get()) : -1);
}
result.distances.resize(distances.size());
thrust::copy(distances.begin(), distances.end(), result.distances.begin());
return result;
}
//---------------------------------------------------------------------------//
} // namespace celeritas_test
|
9b49b9e15cccf8b132dee8a2958a67ab92b59290.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
int divUp(int a, int b);
__device__ float max_val(float x, float y)
{
return x > y ? x : y;
}
__global__ void cube2equi(const cv::cuda::PtrStep<uchar3> posy, const cv::cuda::PtrStep<uchar3> negx, const cv::cuda::PtrStep<uchar3> posx,
const cv::cuda::PtrStep<uchar3> negz, const cv::cuda::PtrStep<uchar3> negy, const cv::cuda::PtrStep<uchar3> posz,
cv::cuda::PtrStep<uchar3> dst, int rows, int cols, int dims)
{
float PI = 3.14159265358979323846;
float xPixel = 0;
float yPixel = 0;
float yTemp = 0;
float imageSelect = 0;
float increment = (dims * 2) / 100;
float counter = 0;
float percentCounter = 0;
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
float v;
float phi;
float u;
float theta;
float x;
float y;
float z;
float a;
float xx;
float yy;
float zz;
if (dst_x < cols && dst_y < rows)
{
if (counter <= dst_y)
{
percentCounter += 1;
counter += increment;
}
v = 1.0 - ((float(dst_y)) / (dims * 2));
phi = v * PI;
u = (float(dst_x)) / (dims * 4);
theta = u * 2 * PI;
// all of these range between 0 and 1
x = cos(theta) * sin(phi);
y = sin(theta) * sin(phi);
z = cos(phi);
a = max_val(max_val(abs(x), abs(y)), abs(z));
// one of these will equal either - 1 or +1
xx = x / a;
yy = y / a;
zz = z / a;
// format is left, front, right, back, bottom, top;
// therefore negx, posz, posx, negz, negy, posy
// square 1 left
if (yy == -1)
{
xPixel = (((-1.0 * tan(atan(x / y)) + 1.0) / 2.0) * dims);
yTemp = (((-1.0 * tan(atan(z / y)) + 1.0) / 2.0) * (dims - 1.0));
imageSelect = 1;
}
// square 2; front
else if (xx == 1)
{
xPixel = (((tan(atan(y / x)) + 1.0) / 2.0) * dims);
yTemp = (((tan(atan(z / x)) + 1.0) / 2.0) * dims);
imageSelect = 2;
}
// square 3; right
else if (yy == 1)
{
xPixel = (((-1 * tan(atan(x / y)) + 1.0) / 2.0) * dims);
yTemp = (((tan(atan(z / y)) + 1.0) / 2.0) * (dims - 1));
imageSelect = 3;
}
// square 4; back
else if (xx == -1) {
xPixel = (((tan(atan(y / x)) + 1.0) / 2.0) * dims);
yTemp = (((-1 * tan(atan(z / x)) + 1.0) / 2.0) * (dims - 1));
imageSelect = 4;
}
// square 5; bottom
else if (zz == 1)
{
xPixel = (((tan(atan(y / z)) + 1.0) / 2.0) * dims);
yTemp = (((-1 * tan(atan(x / z)) + 1.0) / 2.0) * (dims - 1));
imageSelect = 5;
}
// square 6; top
else if (zz == -1)
{
xPixel = (((-1 * tan(atan(y / z)) + 1.0) / 2.0) * dims);
yTemp = (((-1 * tan(atan(x / z)) + 1.0) / 2.0) * (dims - 1));
imageSelect = 6;
}
yPixel = yTemp > dims - 1 ? (dims - 1) : yTemp;
if (yPixel > dims - 1)
yPixel = dims - 1;
if (xPixel > dims - 1)
xPixel = dims - 1;
if (imageSelect == 1)
{
dst(dst_y, dst_x).x = posy(int(yPixel), int(xPixel)).x;
dst(dst_y, dst_x).y = posy(int(yPixel), int(xPixel)).y;
dst(dst_y, dst_x).z = posy(int(yPixel), int(xPixel)).z;
}
else if (imageSelect == 2)
{
dst(dst_y, dst_x).x = posx(int(yPixel), int(xPixel)).x;
dst(dst_y, dst_x).y = posx(int(yPixel), int(xPixel)).y;
dst(dst_y, dst_x).z = posx(int(yPixel), int(xPixel)).z;
}
else if (imageSelect == 3)
{
dst(dst_y, dst_x).x = negy(int(yPixel), int(xPixel)).x;
dst(dst_y, dst_x).y = negy(int(yPixel), int(xPixel)).y;
dst(dst_y, dst_x).z = negy(int(yPixel), int(xPixel)).z;
}
else if (imageSelect == 4)
{
dst(dst_y, dst_x).x = negx(int(yPixel), int(xPixel)).x;
dst(dst_y, dst_x).y = negx(int(yPixel), int(xPixel)).y;
dst(dst_y, dst_x).z = negx(int(yPixel), int(xPixel)).z;
}
else if (imageSelect == 5)
{
dst(dst_y, dst_x).x = negz(int(yPixel), int(xPixel)).x;
dst(dst_y, dst_x).y = negz(int(yPixel), int(xPixel)).y;
dst(dst_y, dst_x).z = negz(int(yPixel), int(xPixel)).z;
}
else if (imageSelect == 6)
{
dst(dst_y, dst_x).x = posz(int(yPixel), int(xPixel)).x;
dst(dst_y, dst_x).y = posz(int(yPixel), int(xPixel)).y;
dst(dst_y, dst_x).z = posz(int(yPixel), int(xPixel)).z;
}
}
}
void cube2equiCUDA(cv::cuda::GpuMat& posy, cv::cuda::GpuMat& negx, cv::cuda::GpuMat& posx, cv::cuda::GpuMat& negz, cv::cuda::GpuMat& negy, cv::cuda::GpuMat& posz, cv::cuda::GpuMat& dst, int dims)
{
const dim3 block(32, 32);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
cube2equi << <grid, block >> > (posy, negx, posx, negz, negy, posz, dst, dst.rows, dst.cols, dims);
} | 9b49b9e15cccf8b132dee8a2958a67ab92b59290.cu | #include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
int divUp(int a, int b);
__device__ float max_val(float x, float y)
{
return x > y ? x : y;
}
__global__ void cube2equi(const cv::cuda::PtrStep<uchar3> posy, const cv::cuda::PtrStep<uchar3> negx, const cv::cuda::PtrStep<uchar3> posx,
const cv::cuda::PtrStep<uchar3> negz, const cv::cuda::PtrStep<uchar3> negy, const cv::cuda::PtrStep<uchar3> posz,
cv::cuda::PtrStep<uchar3> dst, int rows, int cols, int dims)
{
float PI = 3.14159265358979323846;
float xPixel = 0;
float yPixel = 0;
float yTemp = 0;
float imageSelect = 0;
float increment = (dims * 2) / 100;
float counter = 0;
float percentCounter = 0;
const int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
const int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
float v;
float phi;
float u;
float theta;
float x;
float y;
float z;
float a;
float xx;
float yy;
float zz;
if (dst_x < cols && dst_y < rows)
{
if (counter <= dst_y)
{
percentCounter += 1;
counter += increment;
}
v = 1.0 - ((float(dst_y)) / (dims * 2));
phi = v * PI;
u = (float(dst_x)) / (dims * 4);
theta = u * 2 * PI;
// all of these range between 0 and 1
x = cos(theta) * sin(phi);
y = sin(theta) * sin(phi);
z = cos(phi);
a = max_val(max_val(abs(x), abs(y)), abs(z));
// one of these will equal either - 1 or +1
xx = x / a;
yy = y / a;
zz = z / a;
// format is left, front, right, back, bottom, top;
// therefore negx, posz, posx, negz, negy, posy
// square 1 left
if (yy == -1)
{
xPixel = (((-1.0 * tan(atan(x / y)) + 1.0) / 2.0) * dims);
yTemp = (((-1.0 * tan(atan(z / y)) + 1.0) / 2.0) * (dims - 1.0));
imageSelect = 1;
}
// square 2; front
else if (xx == 1)
{
xPixel = (((tan(atan(y / x)) + 1.0) / 2.0) * dims);
yTemp = (((tan(atan(z / x)) + 1.0) / 2.0) * dims);
imageSelect = 2;
}
// square 3; right
else if (yy == 1)
{
xPixel = (((-1 * tan(atan(x / y)) + 1.0) / 2.0) * dims);
yTemp = (((tan(atan(z / y)) + 1.0) / 2.0) * (dims - 1));
imageSelect = 3;
}
// square 4; back
else if (xx == -1) {
xPixel = (((tan(atan(y / x)) + 1.0) / 2.0) * dims);
yTemp = (((-1 * tan(atan(z / x)) + 1.0) / 2.0) * (dims - 1));
imageSelect = 4;
}
// square 5; bottom
else if (zz == 1)
{
xPixel = (((tan(atan(y / z)) + 1.0) / 2.0) * dims);
yTemp = (((-1 * tan(atan(x / z)) + 1.0) / 2.0) * (dims - 1));
imageSelect = 5;
}
// square 6; top
else if (zz == -1)
{
xPixel = (((-1 * tan(atan(y / z)) + 1.0) / 2.0) * dims);
yTemp = (((-1 * tan(atan(x / z)) + 1.0) / 2.0) * (dims - 1));
imageSelect = 6;
}
yPixel = yTemp > dims - 1 ? (dims - 1) : yTemp;
if (yPixel > dims - 1)
yPixel = dims - 1;
if (xPixel > dims - 1)
xPixel = dims - 1;
if (imageSelect == 1)
{
dst(dst_y, dst_x).x = posy(int(yPixel), int(xPixel)).x;
dst(dst_y, dst_x).y = posy(int(yPixel), int(xPixel)).y;
dst(dst_y, dst_x).z = posy(int(yPixel), int(xPixel)).z;
}
else if (imageSelect == 2)
{
dst(dst_y, dst_x).x = posx(int(yPixel), int(xPixel)).x;
dst(dst_y, dst_x).y = posx(int(yPixel), int(xPixel)).y;
dst(dst_y, dst_x).z = posx(int(yPixel), int(xPixel)).z;
}
else if (imageSelect == 3)
{
dst(dst_y, dst_x).x = negy(int(yPixel), int(xPixel)).x;
dst(dst_y, dst_x).y = negy(int(yPixel), int(xPixel)).y;
dst(dst_y, dst_x).z = negy(int(yPixel), int(xPixel)).z;
}
else if (imageSelect == 4)
{
dst(dst_y, dst_x).x = negx(int(yPixel), int(xPixel)).x;
dst(dst_y, dst_x).y = negx(int(yPixel), int(xPixel)).y;
dst(dst_y, dst_x).z = negx(int(yPixel), int(xPixel)).z;
}
else if (imageSelect == 5)
{
dst(dst_y, dst_x).x = negz(int(yPixel), int(xPixel)).x;
dst(dst_y, dst_x).y = negz(int(yPixel), int(xPixel)).y;
dst(dst_y, dst_x).z = negz(int(yPixel), int(xPixel)).z;
}
else if (imageSelect == 6)
{
dst(dst_y, dst_x).x = posz(int(yPixel), int(xPixel)).x;
dst(dst_y, dst_x).y = posz(int(yPixel), int(xPixel)).y;
dst(dst_y, dst_x).z = posz(int(yPixel), int(xPixel)).z;
}
}
}
void cube2equiCUDA(cv::cuda::GpuMat& posy, cv::cuda::GpuMat& negx, cv::cuda::GpuMat& posx, cv::cuda::GpuMat& negz, cv::cuda::GpuMat& negy, cv::cuda::GpuMat& posz, cv::cuda::GpuMat& dst, int dims)
{
const dim3 block(32, 32);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
cube2equi << <grid, block >> > (posy, negx, posx, negz, negy, posz, dst, dst.rows, dst.cols, dims);
} |
707f063492e45bc97e7332e2297a597a21c1380f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorScatterGather.cu"
#else
#define RUN(TYPE, DIMS, REAL) \
hipLaunchKernelGGL(( THCudaTensor_gatherKernel<TYPE, REAL, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCTensor_(gather)(THCState* state, THCTensor *tensor,
THCTensor *src, int dim, THCudaLongTensor *index) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 4,
"Index tensor must have same dimensions as input tensor");
THArgCheck(tensor->sizes().equals(index->sizes()), 4,
"Index tensor must have the same size as output tensor.");
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3,
"Index dimension is out of bounds");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Input tensor must have same dimensions as output tensor");
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCTensor_(sizeLegacyNoScalars)(state, tensor, d) == THCTensor_(sizeLegacyNoScalars)(state, src, d), 2,
"Input tensor must have same size as output tensor apart from the specified dimension");
}
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (totalElements > 0) {
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<scalar_t, unsigned int> srcInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
THCudaCheck(hipGetLastError());
break;
case 2:
RUN(unsigned int, 2, scalar_t);
THCudaCheck(hipGetLastError());
break;
case 3:
RUN(unsigned int, 3, scalar_t);
THCudaCheck(hipGetLastError());
break;
default:
RUN(unsigned int, -1, scalar_t);
THCudaCheck(hipGetLastError());
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<scalar_t, uint64_t> srcInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t);
THCudaCheck(hipGetLastError());
}
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
THCudaCheck(hipGetLastError());
}
#undef RUN
#define RUN(TYPE, DIMS, REAL) \
hipLaunchKernelGGL(( THCudaTensor_scatterKernel<TYPE, REAL, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3,
"Index tensor must have same dimensions as input tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4,
"Input tensor must have same dimensions as output tensor");
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
int64_t indexSizeD = THCudaLongTensor_sizeLegacyNoScalars(state, index, d);
if (d != dim) {
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, tensor, d), 3,
"Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s",
dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str);
}
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, src, d), 3,
"Index tensor must not have larger size than input tensor, but got index %s input %s",
THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str);
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (totalElements > 0) {
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<scalar_t, unsigned int> srcInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
break;
case 2:
RUN(unsigned int, 2, scalar_t);
break;
case 3:
RUN(unsigned int, 3, scalar_t);
break;
default:
RUN(unsigned int, -1, scalar_t);
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<scalar_t, uint64_t> srcInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t)
}
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
THCudaCheck(hipGetLastError());
}
#undef RUN
#define RUN(TYPE, DIMS, REAL) \
hipLaunchKernelGGL(( THCudaTensor_scatterAddKernel<TYPE, REAL, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3,
"Index tensor must have same dimensions as input tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4,
"Input tensor must have same dimensions as output tensor");
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
int64_t indexSizeD = THCudaLongTensor_sizeLegacyNoScalars(state, index, d);
if (d != dim) {
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, tensor, d), 3,
"Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s",
dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str);
}
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, src, d), 3,
"Index tensor must not have larger size than input tensor, but got index %s input %s",
THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str);
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (totalElements > 0) {
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<scalar_t, unsigned int> srcInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
break;
case 2:
RUN(unsigned int, 2, scalar_t);
break;
case 3:
RUN(unsigned int, 3, scalar_t);
break;
default:
RUN(unsigned int, -1, scalar_t);
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<scalar_t, uint64_t> srcInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t)
}
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
THCudaCheck(hipGetLastError());
}
#undef RUN
#define RUN(TYPE, DIMS, REAL) \
hipLaunchKernelGGL(( THCudaTensor_scatterFillKernel<TYPE, REAL, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStreamOnDevice(state, curDevice), \
tensorInfo, indexInfo, value, dim, (TYPE)totalElements);
void
THCTensor_(scatterFill)(THCState* state, THCTensor *tensor,
int dim, THCudaLongTensor *index, scalar_t value) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) ==
THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3,
"Index tensor must have same dimensions as output tensor");
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCTensor_(sizeLegacyNoScalars)(state, tensor, d) ==
THCudaLongTensor_sizeLegacyNoScalars(state, index, d), 4,
"Index tensor must have same size as output tensor apart from the specified dimension");
}
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
hipGetDevice(&curDevice);
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
break;
case 2:
RUN(unsigned int, 2, scalar_t);
break;
case 3:
RUN(unsigned int, 3, scalar_t);
break;
default:
RUN(unsigned int, -1, scalar_t);
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t);
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
THCudaCheck(hipGetLastError());
}
#undef RUN
#endif
| 707f063492e45bc97e7332e2297a597a21c1380f.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorScatterGather.cu"
#else
#define RUN(TYPE, DIMS, REAL) \
THCudaTensor_gatherKernel<TYPE, REAL, DIMS> \
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCTensor_(gather)(THCState* state, THCTensor *tensor,
THCTensor *src, int dim, THCudaLongTensor *index) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 4,
"Index tensor must have same dimensions as input tensor");
THArgCheck(tensor->sizes().equals(index->sizes()), 4,
"Index tensor must have the same size as output tensor.");
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3,
"Index dimension is out of bounds");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Input tensor must have same dimensions as output tensor");
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCTensor_(sizeLegacyNoScalars)(state, tensor, d) == THCTensor_(sizeLegacyNoScalars)(state, src, d), 2,
"Input tensor must have same size as output tensor apart from the specified dimension");
}
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (totalElements > 0) {
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<scalar_t, unsigned int> srcInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
THCudaCheck(cudaGetLastError());
break;
case 2:
RUN(unsigned int, 2, scalar_t);
THCudaCheck(cudaGetLastError());
break;
case 3:
RUN(unsigned int, 3, scalar_t);
THCudaCheck(cudaGetLastError());
break;
default:
RUN(unsigned int, -1, scalar_t);
THCudaCheck(cudaGetLastError());
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<scalar_t, uint64_t> srcInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t);
THCudaCheck(cudaGetLastError());
}
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
THCudaCheck(cudaGetLastError());
}
#undef RUN
#define RUN(TYPE, DIMS, REAL) \
THCudaTensor_scatterKernel<TYPE, REAL, DIMS> \
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCTensor_(scatter)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3,
"Index tensor must have same dimensions as input tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4,
"Input tensor must have same dimensions as output tensor");
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
int64_t indexSizeD = THCudaLongTensor_sizeLegacyNoScalars(state, index, d);
if (d != dim) {
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, tensor, d), 3,
"Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s",
dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str);
}
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, src, d), 3,
"Index tensor must not have larger size than input tensor, but got index %s input %s",
THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str);
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (totalElements > 0) {
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<scalar_t, unsigned int> srcInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
break;
case 2:
RUN(unsigned int, 2, scalar_t);
break;
case 3:
RUN(unsigned int, 3, scalar_t);
break;
default:
RUN(unsigned int, -1, scalar_t);
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<scalar_t, uint64_t> srcInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t)
}
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
THCudaCheck(cudaGetLastError());
}
#undef RUN
#define RUN(TYPE, DIMS, REAL) \
THCudaTensor_scatterAddKernel<TYPE, REAL, DIMS> \
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \
tensorInfo, srcInfo, indexInfo, dim, (TYPE)totalElements);
void THCTensor_(scatterAdd)(THCState* state, THCTensor *tensor, int dim, THCudaLongTensor *index, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == THCTensor_(nDimensionLegacyNoScalars)(state, src), 3,
"Index tensor must have same dimensions as input tensor");
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) == THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 4,
"Input tensor must have same dimensions as output tensor");
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
int64_t indexSizeD = THCudaLongTensor_sizeLegacyNoScalars(state, index, d);
if (d != dim) {
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, tensor, d), 3,
"Index tensor must not have larger size than output tensor apart from the specified dimension %d, but got index %s output %s",
dim, THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, tensor).str);
}
THArgCheck(indexSizeD <= THCTensor_(sizeLegacyNoScalars)(state, src, d), 3,
"Index tensor must not have larger size than input tensor, but got index %s input %s",
THCudaLongTensor_sizeDesc(state, index).str, THCTensor_(sizeDesc)(state, src).str);
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (totalElements > 0) {
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<scalar_t, unsigned int> srcInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
break;
case 2:
RUN(unsigned int, 2, scalar_t);
break;
case 3:
RUN(unsigned int, 3, scalar_t);
break;
default:
RUN(unsigned int, -1, scalar_t);
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<scalar_t, uint64_t> srcInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t)
}
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
THCudaCheck(cudaGetLastError());
}
#undef RUN
#define RUN(TYPE, DIMS, REAL) \
THCudaTensor_scatterFillKernel<TYPE, REAL, DIMS> \
<<<grid, block, 0, THCState_getCurrentStreamOnDevice(state, curDevice)>>>( \
tensorInfo, indexInfo, value, dim, (TYPE)totalElements);
void
THCTensor_(scatterFill)(THCState* state, THCTensor *tensor,
int dim, THCudaLongTensor *index, scalar_t value) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(dim >= 0 && dim < THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 2,
"Index dimension is out of bounds");
THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) ==
THCTensor_(nDimensionLegacyNoScalars)(state, tensor), 3,
"Index tensor must have same dimensions as output tensor");
for (int d = 0; d < THCTensor_(nDimensionLegacyNoScalars)(state, tensor); d++) {
if (d != dim) {
THArgCheck(THCTensor_(sizeLegacyNoScalars)(state, tensor, d) ==
THCudaLongTensor_sizeLegacyNoScalars(state, index, d), 4,
"Index tensor must have same size as output tensor apart from the specified dimension");
}
}
THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, tensor) <= MAX_CUTORCH_DIMS,
1, CUTORCH_DIM_WARNING);
const ptrdiff_t totalElements = THCudaLongTensor_nElement(state, index);
const dim3 block = getApplyBlock();
dim3 grid;
int curDevice = -1;
cudaGetDevice(&curDevice);
THArgCheck(getApplyGrid(state, totalElements, grid, curDevice), 1, CUTORCH_DIM_WARNING);
THCTensor* oldTensor = NULL;
if (THCTensor_maybeOverlappingIndices(state, tensor)) {
oldTensor = tensor;
tensor = THCTensor_(newContiguous)(state, tensor);
}
if (THCTensor_canUse32BitIndexMath(state, tensor) &&
THCTensor_canUse32BitIndexMath(state, index)) {
TensorInfo<scalar_t, unsigned int> tensorInfo =
getTensorInfo<scalar_t, THCTensor, unsigned int>(state, tensor);
TensorInfo<int64_t, unsigned int> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, index);
// Specialize for a small number of dimensions.
switch (indexInfo.dims) {
case 1:
RUN(unsigned int, 1, scalar_t);
break;
case 2:
RUN(unsigned int, 2, scalar_t);
break;
case 3:
RUN(unsigned int, 3, scalar_t);
break;
default:
RUN(unsigned int, -1, scalar_t);
break;
}
} else {
TensorInfo<scalar_t, uint64_t> tensorInfo =
getTensorInfo<scalar_t, THCTensor, uint64_t>(state, tensor);
TensorInfo<int64_t, uint64_t> indexInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, index);
RUN(uint64_t, -1, scalar_t);
}
if (oldTensor) {
THCTensor_copyIgnoringOverlaps<scalar_t>(state, oldTensor, tensor);
THCTensor_(free)(state, tensor);
tensor = oldTensor;
}
THCudaCheck(cudaGetLastError());
}
#undef RUN
#endif
|
2aba5a1ecda873671722a0cef0e7beadf0e2d85d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S2_18.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
check_cuda_error(hipFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(hipFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5952182591768,0.00128266400523176,0.780370393090429,0.780208222766858,0.000174041905078485,0.485370727173588,0.00293466121399432,0.999998357055344,1.92482840573537e-08,1.88428105751378e-05,0.999770837182767,1.00699532179645,0.999993733315635,4.75139548173797e-05,0.266377866651071,10.2975786179389,139.536672800382}; for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.5369194152843,0.000421161732329444,0.000123555730992675,0.000438546024943873,0.268273630830681,0.123585165023946,0.171035514336793,5.02847725301225,0.0110176202871206,1.84752137000130,1095.52052508604,0.000393152126659795,0.528629865494676,0.00975540076461500,0.00491948125354052,8.11442676720905e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| 2aba5a1ecda873671722a0cef0e7beadf0e2d85d.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S2_18.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
check_cuda_error(cudaFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(cudaFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5952182591768,0.00128266400523176,0.780370393090429,0.780208222766858,0.000174041905078485,0.485370727173588,0.00293466121399432,0.999998357055344,1.92482840573537e-08,1.88428105751378e-05,0.999770837182767,1.00699532179645,0.999993733315635,4.75139548173797e-05,0.266377866651071,10.2975786179389,139.536672800382}; for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.5369194152843,0.000421161732329444,0.000123555730992675,0.000438546024943873,0.268273630830681,0.123585165023946,0.171035514336793,5.02847725301225,0.0110176202871206,1.84752137000130,1095.52052508604,0.000393152126659795,0.528629865494676,0.00975540076461500,0.00491948125354052,8.11442676720905e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
b8a8fd222ffdbb5e5fc8192a18f465d07a6365ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _MSC_VER
#define _USE_MATH_DEFINES // For M_PI
#endif // _MSC_VER
#include <cmath>
#include "caffe2/operators/roi_align_rotated_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignRotatedForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data,
bool continuous_coordinate) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not round
T roi_offset = continuous_coordinate ? T(0.5) : 0;
T roi_center_w = offset_bottom_rois[1] * spatial_scale - roi_offset;
T roi_center_h = offset_bottom_rois[2] * spatial_scale - roi_offset;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
if (!continuous_coordinate) { // backward compatibility
// Force malformed ROIs to be 1x1
roi_width = c10::hip::compat::max(roi_width, (T)1.);
roi_height = c10::hip::compat::max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
} // namespace
template <>
C10_EXPORT bool RoIAlignRotatedOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
CAFFE_ENFORCE_EQ(order_, StorageOrder::NCHW, "RoIAlign CUDA impl needs NCHW");
if (R.numel() == 0) {
// Handle empty rois
Output(
0,
{0, X.dim32(1), pooled_height_, pooled_width_},
at::dtype<float>()); // RoI pooled data
return true;
}
CAFFE_ENFORCE_EQ(R.dim(), 2);
CAFFE_ENFORCE_EQ(R.dim32(1), 6);
assert(sampling_ratio_ >= 0);
auto* Y = Output(
0,
{R.dim32(0), X.dim32(1), pooled_height_, pooled_width_},
at::dtype<float>()); // RoI pooled data
int output_size = Y->numel();
hipLaunchKernelGGL(( RoIAlignRotatedForward<float>)
, dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
X.data<float>(),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
R.data<float>(),
Y->mutable_data<float>(),
aligned_);
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(RoIAlignRotated, RoIAlignRotatedOp<float, CUDAContext>);
} // namespace caffe2
using RoIAlignRotatedOpFloatCUDA =
caffe2::RoIAlignRotatedOp<float, caffe2::CUDAContext>;
C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(RoIAlignRotated, RoIAlignRotatedOpFloatCUDA);
| b8a8fd222ffdbb5e5fc8192a18f465d07a6365ab.cu | #ifdef _MSC_VER
#define _USE_MATH_DEFINES // For M_PI
#endif // _MSC_VER
#include <cmath>
#include "caffe2/operators/roi_align_rotated_op.h"
#include <stdio.h>
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* bottom_data,
const int height,
const int width,
T y,
T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignRotatedForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
T* top_data,
bool continuous_coordinate) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 6;
int roi_batch_ind = offset_bottom_rois[0];
// Do not round
T roi_offset = continuous_coordinate ? T(0.5) : 0;
T roi_center_w = offset_bottom_rois[1] * spatial_scale - roi_offset;
T roi_center_h = offset_bottom_rois[2] * spatial_scale - roi_offset;
T roi_width = offset_bottom_rois[3] * spatial_scale;
T roi_height = offset_bottom_rois[4] * spatial_scale;
T theta = offset_bottom_rois[5] * M_PI / 180.0;
if (!continuous_coordinate) { // backward compatibility
// Force malformed ROIs to be 1x1
roi_width = c10::cuda::compat::max(roi_width, (T)1.);
roi_height = c10::cuda::compat::max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
// Appropriate translation needs to be applied after.
T roi_start_h = -roi_height / 2.0;
T roi_start_w = -roi_width / 2.0;
T cosTheta = cos(theta);
T sinTheta = sin(theta);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
// Rotate by theta around the center and translate
T x = xx * cosTheta + yy * sinTheta + roi_center_w;
T y = yy * cosTheta - xx * sinTheta + roi_center_h;
T val = bilinear_interpolate(
offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
} // namespace
template <>
C10_EXPORT bool RoIAlignRotatedOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
CAFFE_ENFORCE_EQ(order_, StorageOrder::NCHW, "RoIAlign CUDA impl needs NCHW");
if (R.numel() == 0) {
// Handle empty rois
Output(
0,
{0, X.dim32(1), pooled_height_, pooled_width_},
at::dtype<float>()); // RoI pooled data
return true;
}
CAFFE_ENFORCE_EQ(R.dim(), 2);
CAFFE_ENFORCE_EQ(R.dim32(1), 6);
assert(sampling_ratio_ >= 0);
auto* Y = Output(
0,
{R.dim32(0), X.dim32(1), pooled_height_, pooled_width_},
at::dtype<float>()); // RoI pooled data
int output_size = Y->numel();
RoIAlignRotatedForward<float>
<<<CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
X.data<float>(),
spatial_scale_,
X.dim32(1),
X.dim32(2),
X.dim32(3),
pooled_height_,
pooled_width_,
sampling_ratio_,
R.data<float>(),
Y->mutable_data<float>(),
aligned_);
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(RoIAlignRotated, RoIAlignRotatedOp<float, CUDAContext>);
} // namespace caffe2
using RoIAlignRotatedOpFloatCUDA =
caffe2::RoIAlignRotatedOp<float, caffe2::CUDAContext>;
C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(RoIAlignRotated, RoIAlignRotatedOpFloatCUDA);
|
f31bc83ac31156c5ed64c75e702dd10e73ca4704.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(const T* bottom_data,
const int height, const int width,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
const T* bottom_rois, T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height, const int width,
T y, T x,
T & w1, T & w2, T & w3, T & w4,
int & x_low, int & x_high, int & y_low, int & y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int) y;
x_low = (int) x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignBackwardFeature(const int nthreads, const T* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0)
{
atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
at::Tensor ROIAlign_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(hipGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] {
hipLaunchKernelGGL(( RoIAlignForward<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(THCCeilDiv(grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(hipGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] {
hipLaunchKernelGGL(( RoIAlignBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
THCudaCheck(hipGetLastError());
return grad_input;
}
| f31bc83ac31156c5ed64c75e702dd10e73ca4704.cu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
// TODO make it in a common file
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
template <typename T>
__device__ T bilinear_interpolate(const T* bottom_data,
const int height, const int width,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void RoIAlignForward(const int nthreads, const T* bottom_data,
const T spatial_scale, const int channels,
const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
const T* bottom_rois, T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
top_data[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
const int height, const int width,
T y, T x,
T & w1, T & w2, T & w3, T & w4,
int & x_low, int & x_high, int & y_low, int & y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int) y;
x_low = (int) x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = bottom_data[y_low * width + x_low];
// T v2 = bottom_data[y_low * width + x_high];
// T v3 = bottom_data[y_high * width + x_low];
// T v4 = bottom_data[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__global__ void RoIAlignBackwardFeature(const int nthreads, const T* top_diff,
const int num_rois, const T spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
const int sampling_ratio,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, (T)1.);
T roi_height = max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_top_diff = top_diff + top_offset;
const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++)
{
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = top_diff_this_bin * w1 / count;
T g2 = top_diff_this_bin * w2 / count;
T g3 = top_diff_this_bin * w3 / count;
T g4 = top_diff_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0)
{
atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
} // RoIAlignBackward
at::Tensor ROIAlign_forward_cuda(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv(output_size, 512L), 4096L));
dim3 block(512);
if (output.numel() == 0) {
THCudaCheck(cudaGetLastError());
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] {
RoIAlignForward<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input.contiguous().data<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.contiguous().data<scalar_t>(),
output.data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return output;
}
// TODO remove the dependency on input and use instead its sizes -> save memory
at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int batch_size,
const int channels,
const int height,
const int width,
const int sampling_ratio) {
AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor");
AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor");
auto num_rois = rois.size(0);
auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(THCCeilDiv(grad.numel(), 512L), 4096L));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
THCudaCheck(cudaGetLastError());
return grad_input;
}
AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "ROIAlign_backward", [&] {
RoIAlignBackwardFeature<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.contiguous().data<scalar_t>(),
num_rois,
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
grad_input.data<scalar_t>(),
rois.contiguous().data<scalar_t>());
});
THCudaCheck(cudaGetLastError());
return grad_input;
}
|
1a5b268d535d09d00e3a64cc52ea44ae6bd97989.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
#define N ( 1 << 27 )
#define THREADS_PER_BLOCK 128
#define FLOATTYPE_T float
__global__ void sumReduction(int n, FLOATTYPE_T *in, FLOATTYPE_T *out)
{
__shared__ FLOATTYPE_T sArray[THREADS_PER_BLOCK];
/* calculate global index in the array */
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
/* zero out the smem array */
sArray[threadIdx.x] = 0.0;
/* grid stride handling case where array is larger than number of threads
* launched
* Loop over the grid stride so that each thread adds up its relevant
* elements of the array and saves them to SMEM
*/
for( int i = globalIndex; i < n; i += blockDim.x * gridDim.x )
{
sArray[threadIdx.x] += FIXME;
} /* end for */
/* do the final reduction in SMEM */
for( int i = 1; i < blockDim.x; i = FIXME )
{
if( threadIdx.x % (FIXME) == 0 )
{
sArray[FIXME] += sArray[FIXME];
} /* end if */
} /* end for */
/* thread0 writes the thread block reduced value back to global memory */
if( threadIdx.x == 0 ) out[blockIdx.x] = sArray[0];
return;
}
int main()
{
FLOATTYPE_T *h_in, h_sum, cpu_sum;
FLOATTYPE_T *d_in, *d_sum, *d_tempArray;
int size = N;
int memBytes = size * sizeof( FLOATTYPE_T );
int tempArraySize = 32768;
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of in, out */
checkCUDA( hipMalloc( &d_in, memBytes ) );
checkCUDA( hipMalloc( &d_sum, sizeof(FLOATTYPE_T) ) );
checkCUDA( hipMalloc( &d_tempArray, tempArraySize * sizeof(FLOATTYPE_T) ) );
/* allocate space for host copies of in, out and setup input values */
h_in = (FLOATTYPE_T *)malloc( memBytes );
for( int i = 0; i < size; i++ )
{
h_in[i] = FLOATTYPE_T( rand() ) / ( FLOATTYPE_T (RAND_MAX) + 1.0 );
if( i % 2 == 0 ) h_in[i] = -h_in[i];
}
h_sum = 0.0;
cpu_sum = 0.0;
/* copy inputs to device */
checkCUDA( hipMemcpy( d_in, h_in, memBytes, hipMemcpyHostToDevice ) );
checkCUDA( hipMemset( d_sum, 0, sizeof(FLOATTYPE_T) ) );
checkCUDA( hipMemset( d_tempArray, 0,
tempArraySize * sizeof(FLOATTYPE_T) ) );
/* calculate block and grid sizes */
dim3 threads1( THREADS_PER_BLOCK, 1, 1 );
int blk = min( (size / threads1.x), tempArraySize );
dim3 blocks( blk, 1, 1);
dim3 threads2( min(blocks.x,threads1.x), 1, 1 );
/* start the timers */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
hipLaunchKernelGGL(( sumReduction), dim3(blocks), dim3(FIXME) , 0, 0, size, d_in, d_tempArray );
checkKERNEL()
hipLaunchKernelGGL(( sumReduction), dim3(1), dim3(FIXME) , 0, 0, blocks.x, d_tempArray, d_sum );
checkKERNEL()
/* stop the timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total elements is %d, %f GB\n", size, sizeof(FLOATTYPE_T)*
(double)size * 1.e-9 );
printf("GPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
/* copy result back to host */
checkCUDA( hipMemcpy( &h_sum, d_sum, sizeof(FLOATTYPE_T),
hipMemcpyDeviceToHost ) );
checkCUDA( hipEventRecord( start, 0 ) );
for( int i = 0; i < size; i++ )
{
cpu_sum += h_in[i];
} /* end for */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf("CPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
FLOATTYPE_T diff = abs( cpu_sum - h_sum );
if( diff / abs(h_sum) < 0.001 ) printf("PASS\n");
else
{
printf("FAIL\n");
printf("Error is %f\n", diff / h_sum );
printf("GPU result is %f, CPU result is %f\n",h_sum, cpu_sum );
} /* end else */
/* clean up */
free(h_in);
checkCUDA( hipFree( d_in ) );
checkCUDA( hipFree( d_sum ) );
checkCUDA( hipFree( d_tempArray ) );
checkCUDA( hipDeviceReset() );
return 0;
} /* end main */
| 1a5b268d535d09d00e3a64cc52ea44ae6bd97989.cu | /*
* Copyright 2016 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
#define N ( 1 << 27 )
#define THREADS_PER_BLOCK 128
#define FLOATTYPE_T float
__global__ void sumReduction(int n, FLOATTYPE_T *in, FLOATTYPE_T *out)
{
__shared__ FLOATTYPE_T sArray[THREADS_PER_BLOCK];
/* calculate global index in the array */
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
/* zero out the smem array */
sArray[threadIdx.x] = 0.0;
/* grid stride handling case where array is larger than number of threads
* launched
* Loop over the grid stride so that each thread adds up its relevant
* elements of the array and saves them to SMEM
*/
for( int i = globalIndex; i < n; i += blockDim.x * gridDim.x )
{
sArray[threadIdx.x] += FIXME;
} /* end for */
/* do the final reduction in SMEM */
for( int i = 1; i < blockDim.x; i = FIXME )
{
if( threadIdx.x % (FIXME) == 0 )
{
sArray[FIXME] += sArray[FIXME];
} /* end if */
} /* end for */
/* thread0 writes the thread block reduced value back to global memory */
if( threadIdx.x == 0 ) out[blockIdx.x] = sArray[0];
return;
}
int main()
{
FLOATTYPE_T *h_in, h_sum, cpu_sum;
FLOATTYPE_T *d_in, *d_sum, *d_tempArray;
int size = N;
int memBytes = size * sizeof( FLOATTYPE_T );
int tempArraySize = 32768;
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of in, out */
checkCUDA( cudaMalloc( &d_in, memBytes ) );
checkCUDA( cudaMalloc( &d_sum, sizeof(FLOATTYPE_T) ) );
checkCUDA( cudaMalloc( &d_tempArray, tempArraySize * sizeof(FLOATTYPE_T) ) );
/* allocate space for host copies of in, out and setup input values */
h_in = (FLOATTYPE_T *)malloc( memBytes );
for( int i = 0; i < size; i++ )
{
h_in[i] = FLOATTYPE_T( rand() ) / ( FLOATTYPE_T (RAND_MAX) + 1.0 );
if( i % 2 == 0 ) h_in[i] = -h_in[i];
}
h_sum = 0.0;
cpu_sum = 0.0;
/* copy inputs to device */
checkCUDA( cudaMemcpy( d_in, h_in, memBytes, cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemset( d_sum, 0, sizeof(FLOATTYPE_T) ) );
checkCUDA( cudaMemset( d_tempArray, 0,
tempArraySize * sizeof(FLOATTYPE_T) ) );
/* calculate block and grid sizes */
dim3 threads1( THREADS_PER_BLOCK, 1, 1 );
int blk = min( (size / threads1.x), tempArraySize );
dim3 blocks( blk, 1, 1);
dim3 threads2( min(blocks.x,threads1.x), 1, 1 );
/* start the timers */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
sumReduction<<< blocks, FIXME >>>( size, d_in, d_tempArray );
checkKERNEL()
sumReduction<<< 1, FIXME >>>( blocks.x, d_tempArray, d_sum );
checkKERNEL()
/* stop the timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total elements is %d, %f GB\n", size, sizeof(FLOATTYPE_T)*
(double)size * 1.e-9 );
printf("GPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
/* copy result back to host */
checkCUDA( cudaMemcpy( &h_sum, d_sum, sizeof(FLOATTYPE_T),
cudaMemcpyDeviceToHost ) );
checkCUDA( cudaEventRecord( start, 0 ) );
for( int i = 0; i < size; i++ )
{
cpu_sum += h_in[i];
} /* end for */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf("CPU total time is %f ms, bandwidth %f GB/s\n", elapsedTime,
sizeof(FLOATTYPE_T) * (double) size /
( (double) elapsedTime / 1000.0 ) * 1.e-9);
FLOATTYPE_T diff = abs( cpu_sum - h_sum );
if( diff / abs(h_sum) < 0.001 ) printf("PASS\n");
else
{
printf("FAIL\n");
printf("Error is %f\n", diff / h_sum );
printf("GPU result is %f, CPU result is %f\n",h_sum, cpu_sum );
} /* end else */
/* clean up */
free(h_in);
checkCUDA( cudaFree( d_in ) );
checkCUDA( cudaFree( d_sum ) );
checkCUDA( cudaFree( d_tempArray ) );
checkCUDA( cudaDeviceReset() );
return 0;
} /* end main */
|
f92b8b8d5555f4d52f40208f6a900f930489997c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/solver/adagrad.hpp>
#include "./mixed_precision_training.cuh"
#include "./weight_decay.cuh"
namespace nbla {
template <typename T>
__global__ void kernel_adagrad_update(const int num, T *data, const T *grad,
T *g, const float lr, const float eps) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
g[idx] += grad[idx] * grad[idx];
data[idx] -= lr * grad[idx] / (sqrt(g[idx]) + eps);
}
}
template <typename T>
void AdagradCuda<T>::update_impl(const string &key, VariablePtr param) {
Size_t size = param->size();
auto &state = this->states_.at(key);
VariablePtr g_ = state.pstate["v"];
T *g = g_->cast_data_and_get_pointer<T>(this->ctx_);
auto &t = state.t;
const T *grad = param->get_grad_pointer<T>(this->ctx_);
T *data = param->cast_data_and_get_pointer<T>(this->ctx_);
t = ::min(t + 1, std::numeric_limits<uint32_t>::max() - 1);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_adagrad_update, size, data, grad, g,
this->lr_, this->eps_);
}
NBLA_DEF_WEIGHT_DECAY(AdagradCuda, weight_decay_cuda);
NBLA_DEF_CHECK_INF_GRAD(AdagradCuda, check_inf_grad_cuda);
NBLA_DEF_CHECK_NAN_GRAD(AdagradCuda, check_nan_grad_cuda);
NBLA_DEF_CHECK_INF_OR_NAN_GRAD(AdagradCuda, check_inf_or_nan_grad_cuda);
NBLA_DEF_SCALE_GRAD(AdagradCuda, scale_grad_impl_cuda);
}
| f92b8b8d5555f4d52f40208f6a900f930489997c.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/solver/adagrad.hpp>
#include "./mixed_precision_training.cuh"
#include "./weight_decay.cuh"
namespace nbla {
template <typename T>
__global__ void kernel_adagrad_update(const int num, T *data, const T *grad,
T *g, const float lr, const float eps) {
NBLA_CUDA_KERNEL_LOOP(idx, num) {
g[idx] += grad[idx] * grad[idx];
data[idx] -= lr * grad[idx] / (sqrt(g[idx]) + eps);
}
}
template <typename T>
void AdagradCuda<T>::update_impl(const string &key, VariablePtr param) {
Size_t size = param->size();
auto &state = this->states_.at(key);
VariablePtr g_ = state.pstate["v"];
T *g = g_->cast_data_and_get_pointer<T>(this->ctx_);
auto &t = state.t;
const T *grad = param->get_grad_pointer<T>(this->ctx_);
T *data = param->cast_data_and_get_pointer<T>(this->ctx_);
t = std::min(t + 1, std::numeric_limits<uint32_t>::max() - 1);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_adagrad_update, size, data, grad, g,
this->lr_, this->eps_);
}
NBLA_DEF_WEIGHT_DECAY(AdagradCuda, weight_decay_cuda);
NBLA_DEF_CHECK_INF_GRAD(AdagradCuda, check_inf_grad_cuda);
NBLA_DEF_CHECK_NAN_GRAD(AdagradCuda, check_nan_grad_cuda);
NBLA_DEF_CHECK_INF_OR_NAN_GRAD(AdagradCuda, check_inf_or_nan_grad_cuda);
NBLA_DEF_SCALE_GRAD(AdagradCuda, scale_grad_impl_cuda);
}
|
27e2b0efae118ecc8526c78c24b6ce57f2c50744.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> s d c
*/
#include "common_magma.h"
#include "commonblas_z.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
//==============================================================================
__global__
void magma_zlarfx_kernel( int m, magmaDoubleComplex *v, magmaDoubleComplex *tau,
magmaDoubleComplex *c, int ldc, double *xnorm,
magmaDoubleComplex *T, int it )
{
if ( !MAGMA_Z_EQUAL(*tau, MAGMA_Z_ZERO) ) {
const int tx = threadIdx.x;
//magmaDoubleComplex *dc = c + (blockIdx.x-it-1) * ldc;
magmaDoubleComplex *dc = c + (blockIdx.x) * ldc;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex lsum;
/* NOTE HERE C is the C at position C(i, 0)
* if blockIdx.x<it it performs the V(i:n,i)' * V(i:n,1:i-1)' used for computing T
* if blockIdx.x>it it perform w := v**H * C */
lsum = MAGMA_Z_ZERO;
for( int j = tx; j < m; j += BLOCK_SIZE ){
if (j==0){
lsum += MAGMA_Z_MUL( MAGMA_Z_ONE, dc[j] );
v[j] = MAGMA_Z_ONE;
}
else
lsum += MAGMA_Z_MUL( MAGMA_Z_CNJG( v[j] ), dc[j] );
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
magmaDoubleComplex z__1 = - MAGMA_Z_CNJG(*tau) * sum[0];
if (blockIdx.x>it){
for( int j = m-tx-1; j>=0 ; j -= BLOCK_SIZE )
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
/*
if (tx==0){
double temp = MAGMA_Z_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
*/
}
else
{
if (blockIdx.x==it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_Z_CNJG(z__1);
}
}
else if (blockIdx.x<=it)// in case tau is zero put the corresponding column of T to zero
{
*(T+blockIdx.x) = MAGMA_Z_ZERO;
}
}
//==============================================================================
extern "C"
__global__
void magma_ztrmv_kernel(const magmaDoubleComplex *T, int ldt, magmaDoubleComplex *t)
{
const int tx = threadIdx.x;
T += tx;
__shared__ magmaDoubleComplex tlocal[ BLOCK_SIZE ];
magmaDoubleComplex res = MAGMA_Z_MAKE(0., 0.);
tlocal[tx] = t[tx];
__syncthreads();
#pragma unroll
for(int j=0; j<blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[tx] = res;
}
extern "C"
__global__
void magma_ztrmv_kernel2(const magmaDoubleComplex *T, int ldt, magmaDoubleComplex *t,
magmaDoubleComplex *y, magmaDoubleComplex *tau)
{
const int tx = threadIdx.x;
T += blockIdx.x;
__shared__ magmaDoubleComplex sum[ 128 ];
sum[tx] = T[tx*ldt]*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx==0){
y[blockIdx.x] = sum[0];
if (blockIdx.x==0)
y[gridDim.x] = tau[0];
}
}
//==============================================================================
extern "C"
__global__
void magma_ztrmv_tkernel(magmaDoubleComplex *T, int ldt, magmaDoubleComplex *t, magmaDoubleComplex *y)
{
const int tx = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ magmaDoubleComplex sum[ 128 ];
sum[tx] = MAGMA_Z_CNJG(T[tx])*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx==0)
y[blockIdx.x] = sum[0];
}
//==============================================================================
/*
Apply a complex elementary reflector H to a complex M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a complex scalar and v is a complex vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's zlarf routine.
*/
extern "C" void
magma_zlarfx_gpu(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr tau,
magmaDoubleComplex_ptr C, magma_int_t ldc,
magmaDouble_ptr xnorm,
magmaDoubleComplex_ptr dT, magma_int_t iter,
magmaDoubleComplex_ptr work )
{
magma_int_t N = n + iter + 1;
if (iter==0)
hipLaunchKernelGGL(( magma_zlarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, C, ldc, xnorm, dT+iter*N, iter);
else
hipLaunchKernelGGL(( magma_zlarfx_kernel), dim3(N), dim3(BLOCK_SIZE), 0, magma_stream , m, v, tau, C, ldc, xnorm, work, iter);
if (iter > 0){
//magma_ztrmv_kernel<<< 1, iter, 0, magma_stream >>>( dT, N, dT+iter*N);
hipLaunchKernelGGL(( magma_ztrmv_kernel2), dim3(iter), dim3(iter), 0, magma_stream , dT, N, work, dT+iter*N, tau);
}
}
//==============================================================================
| 27e2b0efae118ecc8526c78c24b6ce57f2c50744.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> s d c
*/
#include "common_magma.h"
#include "commonblas_z.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define BLOCK_SIZEx 32
#define BLOCK_SIZEy 16
//==============================================================================
__global__
void magma_zlarfx_kernel( int m, magmaDoubleComplex *v, magmaDoubleComplex *tau,
magmaDoubleComplex *c, int ldc, double *xnorm,
magmaDoubleComplex *T, int it )
{
if ( !MAGMA_Z_EQUAL(*tau, MAGMA_Z_ZERO) ) {
const int tx = threadIdx.x;
//magmaDoubleComplex *dc = c + (blockIdx.x-it-1) * ldc;
magmaDoubleComplex *dc = c + (blockIdx.x) * ldc;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex lsum;
/* NOTE HERE C is the C at position C(i, 0)
* if blockIdx.x<it it performs the V(i:n,i)' * V(i:n,1:i-1)' used for computing T
* if blockIdx.x>it it perform w := v**H * C */
lsum = MAGMA_Z_ZERO;
for( int j = tx; j < m; j += BLOCK_SIZE ){
if (j==0){
lsum += MAGMA_Z_MUL( MAGMA_Z_ONE, dc[j] );
v[j] = MAGMA_Z_ONE;
}
else
lsum += MAGMA_Z_MUL( MAGMA_Z_CNJG( v[j] ), dc[j] );
}
sum[tx] = lsum;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
/* C := C - v * w */
__syncthreads();
magmaDoubleComplex z__1 = - MAGMA_Z_CNJG(*tau) * sum[0];
if (blockIdx.x>it){
for( int j = m-tx-1; j>=0 ; j -= BLOCK_SIZE )
dc[j] += z__1 * v[j];
__syncthreads();
/* Adjust the rest of the column norms */
/*
if (tx==0){
double temp = MAGMA_Z_ABS( dc[0] ) / xnorm[blockIdx.x-it-1];
temp = (temp + 1.) * (1. - temp);
xnorm[blockIdx.x-it-1] = xnorm[blockIdx.x-it-1] * sqrt(temp);
}
*/
}
else
{
if (blockIdx.x==it)
*(T+it) = *tau;
else
*(T+blockIdx.x) = MAGMA_Z_CNJG(z__1);
}
}
else if (blockIdx.x<=it)// in case tau is zero put the corresponding column of T to zero
{
*(T+blockIdx.x) = MAGMA_Z_ZERO;
}
}
//==============================================================================
extern "C"
__global__
void magma_ztrmv_kernel(const magmaDoubleComplex *T, int ldt, magmaDoubleComplex *t)
{
const int tx = threadIdx.x;
T += tx;
__shared__ magmaDoubleComplex tlocal[ BLOCK_SIZE ];
magmaDoubleComplex res = MAGMA_Z_MAKE(0., 0.);
tlocal[tx] = t[tx];
__syncthreads();
#pragma unroll
for(int j=0; j<blockDim.x; j++)
res += T[j*ldt]*tlocal[j];
t[tx] = res;
}
extern "C"
__global__
void magma_ztrmv_kernel2(const magmaDoubleComplex *T, int ldt, magmaDoubleComplex *t,
magmaDoubleComplex *y, magmaDoubleComplex *tau)
{
const int tx = threadIdx.x;
T += blockIdx.x;
__shared__ magmaDoubleComplex sum[ 128 ];
sum[tx] = T[tx*ldt]*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx==0){
y[blockIdx.x] = sum[0];
if (blockIdx.x==0)
y[gridDim.x] = tau[0];
}
}
//==============================================================================
extern "C"
__global__
void magma_ztrmv_tkernel(magmaDoubleComplex *T, int ldt, magmaDoubleComplex *t, magmaDoubleComplex *y)
{
const int tx = threadIdx.x;
T += blockIdx.x*ldt;
__shared__ magmaDoubleComplex sum[ 128 ];
sum[tx] = MAGMA_Z_CNJG(T[tx])*t[tx];
magma_sum_reduce_n(blockDim.x, tx, sum);
__syncthreads();
if (tx==0)
y[blockIdx.x] = sum[0];
}
//==============================================================================
/*
Apply a complex elementary reflector H to a complex M-by-N
matrix C from the left. H is represented in the form
H = I - tau * v * v**H
where tau is a complex scalar and v is a complex vector.
If tau = 0, then H is taken to be the unit matrix.
To apply H**H (the conjugate transpose of H), supply conjg(tau)
instead tau.
The norms of v(:, 1:n) are given as input in xnorm(1:n). On exit, the norms
are adjusted to hold the norms of v(2:m,2:n). This is a difference with the
LAPACK's zlarf routine.
*/
extern "C" void
magma_zlarfx_gpu(
magma_int_t m, magma_int_t n,
magmaDoubleComplex_ptr v,
magmaDoubleComplex_ptr tau,
magmaDoubleComplex_ptr C, magma_int_t ldc,
magmaDouble_ptr xnorm,
magmaDoubleComplex_ptr dT, magma_int_t iter,
magmaDoubleComplex_ptr work )
{
magma_int_t N = n + iter + 1;
if (iter==0)
magma_zlarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, C, ldc, xnorm, dT+iter*N, iter);
else
magma_zlarfx_kernel<<< N, BLOCK_SIZE, 0, magma_stream >>>( m, v, tau, C, ldc, xnorm, work, iter);
if (iter > 0){
//magma_ztrmv_kernel<<< 1, iter, 0, magma_stream >>>( dT, N, dT+iter*N);
magma_ztrmv_kernel2<<< iter, iter, 0, magma_stream >>>( dT, N, work, dT+iter*N, tau);
}
}
//==============================================================================
|
b30ac36d234397612f7a6152a15e05bddb3f53c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/flip_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/utils/array.h"
namespace phi {
template <typename T, size_t Rank>
__global__ void flip_cuda_kernel(const int64_t N,
const T* in_data,
T* out_data,
phi::Array<int64_t, Rank> shape,
phi::Array<int64_t, Rank> stride,
phi::Array<int, Rank> flip_dims,
int flip_dims_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
int cur_indices = idx, rem = 0, dst_offset = 0;
for (int i = 0; i < Rank; ++i) {
int64_t temp = cur_indices;
cur_indices = cur_indices / stride[i];
rem = temp - cur_indices * stride[i];
// flip the indices if it is in flip_dims
for (int j = 0; j < flip_dims_size; ++j) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * stride[i];
cur_indices = rem;
}
out_data[idx] = in_data[dst_offset];
}
template <typename T, typename Context, size_t N>
void LaunchFlipCudaKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& axis,
DenseTensor* out) {
auto* in_data = x.data<T>();
auto* out_data = dev_ctx.template Alloc<T>(out);
auto x_dims = x.dims();
const int total_dims = x_dims.size();
const int64_t numel = x.numel();
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel);
auto x_stride = phi::stride(x_dims);
phi::Array<int64_t, N> stride_a;
phi::Array<int64_t, N> shape_a;
phi::Array<int, N> flip_dims_a;
size_t flip_dims_size = axis.size();
for (size_t idx = 0; idx < N; ++idx) {
stride_a[idx] = x_stride[idx];
shape_a[idx] = x_dims[idx];
flip_dims_a[idx] = idx < flip_dims_size ? axis[idx] : 0;
}
for (size_t i = 0; i < flip_dims_a.size(); ++i) {
if (flip_dims_a[i] < 0) {
flip_dims_a[i] += total_dims;
}
}
hipLaunchKernelGGL(( flip_cuda_kernel<T, N>)
, dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(),
numel,
in_data,
out_data,
shape_a,
stride_a,
flip_dims_a,
flip_dims_size);
}
template <typename T, typename Context>
void FlipKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& axis,
DenseTensor* out) {
const size_t total_dims = x.dims().size();
switch (total_dims) {
case 0:
LaunchFlipCudaKernel<T, Context, 0>(dev_ctx, x, axis, out);
break;
case 1:
LaunchFlipCudaKernel<T, Context, 1>(dev_ctx, x, axis, out);
break;
case 2:
LaunchFlipCudaKernel<T, Context, 2>(dev_ctx, x, axis, out);
break;
case 3:
LaunchFlipCudaKernel<T, Context, 3>(dev_ctx, x, axis, out);
break;
case 4:
LaunchFlipCudaKernel<T, Context, 4>(dev_ctx, x, axis, out);
break;
case 5:
LaunchFlipCudaKernel<T, Context, 5>(dev_ctx, x, axis, out);
break;
case 6:
LaunchFlipCudaKernel<T, Context, 6>(dev_ctx, x, axis, out);
break;
case 7:
LaunchFlipCudaKernel<T, Context, 7>(dev_ctx, x, axis, out);
break;
case 8:
LaunchFlipCudaKernel<T, Context, 8>(dev_ctx, x, axis, out);
break;
case 9:
LaunchFlipCudaKernel<T, Context, 9>(dev_ctx, x, axis, out);
break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"dims of input tensor should be less than 10, But received"
"%d",
x.dims().size()));
}
}
} // namespace phi
PD_REGISTER_KERNEL(flip,
GPU,
ALL_LAYOUT,
phi::FlipKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int,
int64_t,
bool,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
| b30ac36d234397612f7a6152a15e05bddb3f53c4.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/flip_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/utils/array.h"
namespace phi {
template <typename T, size_t Rank>
__global__ void flip_cuda_kernel(const int64_t N,
const T* in_data,
T* out_data,
phi::Array<int64_t, Rank> shape,
phi::Array<int64_t, Rank> stride,
phi::Array<int, Rank> flip_dims,
int flip_dims_size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) {
return;
}
int cur_indices = idx, rem = 0, dst_offset = 0;
for (int i = 0; i < Rank; ++i) {
int64_t temp = cur_indices;
cur_indices = cur_indices / stride[i];
rem = temp - cur_indices * stride[i];
// flip the indices if it is in flip_dims
for (int j = 0; j < flip_dims_size; ++j) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * stride[i];
cur_indices = rem;
}
out_data[idx] = in_data[dst_offset];
}
template <typename T, typename Context, size_t N>
void LaunchFlipCudaKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& axis,
DenseTensor* out) {
auto* in_data = x.data<T>();
auto* out_data = dev_ctx.template Alloc<T>(out);
auto x_dims = x.dims();
const int total_dims = x_dims.size();
const int64_t numel = x.numel();
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel);
auto x_stride = phi::stride(x_dims);
phi::Array<int64_t, N> stride_a;
phi::Array<int64_t, N> shape_a;
phi::Array<int, N> flip_dims_a;
size_t flip_dims_size = axis.size();
for (size_t idx = 0; idx < N; ++idx) {
stride_a[idx] = x_stride[idx];
shape_a[idx] = x_dims[idx];
flip_dims_a[idx] = idx < flip_dims_size ? axis[idx] : 0;
}
for (size_t i = 0; i < flip_dims_a.size(); ++i) {
if (flip_dims_a[i] < 0) {
flip_dims_a[i] += total_dims;
}
}
flip_cuda_kernel<T, N>
<<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>(
numel,
in_data,
out_data,
shape_a,
stride_a,
flip_dims_a,
flip_dims_size);
}
template <typename T, typename Context>
void FlipKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int>& axis,
DenseTensor* out) {
const size_t total_dims = x.dims().size();
switch (total_dims) {
case 0:
LaunchFlipCudaKernel<T, Context, 0>(dev_ctx, x, axis, out);
break;
case 1:
LaunchFlipCudaKernel<T, Context, 1>(dev_ctx, x, axis, out);
break;
case 2:
LaunchFlipCudaKernel<T, Context, 2>(dev_ctx, x, axis, out);
break;
case 3:
LaunchFlipCudaKernel<T, Context, 3>(dev_ctx, x, axis, out);
break;
case 4:
LaunchFlipCudaKernel<T, Context, 4>(dev_ctx, x, axis, out);
break;
case 5:
LaunchFlipCudaKernel<T, Context, 5>(dev_ctx, x, axis, out);
break;
case 6:
LaunchFlipCudaKernel<T, Context, 6>(dev_ctx, x, axis, out);
break;
case 7:
LaunchFlipCudaKernel<T, Context, 7>(dev_ctx, x, axis, out);
break;
case 8:
LaunchFlipCudaKernel<T, Context, 8>(dev_ctx, x, axis, out);
break;
case 9:
LaunchFlipCudaKernel<T, Context, 9>(dev_ctx, x, axis, out);
break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"dims of input tensor should be less than 10, But received"
"%d",
x.dims().size()));
}
}
} // namespace phi
PD_REGISTER_KERNEL(flip,
GPU,
ALL_LAYOUT,
phi::FlipKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16,
int,
int64_t,
bool,
phi::dtype::complex<float>,
phi::dtype::complex<double>) {}
|
cf216ce39226a31c88efc6b8915f891459f60976.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/solver/adam.hpp>
#include "./mixed_precision_training.cuh"
#include "./weight_decay.cuh"
namespace nbla {
template <typename T>
__global__ void kernel_adam_update(const int num, T *theta, T *m, T *v,
const T *g, const float alpha_t,
const float beta1, const float beta2,
const float eps) {
NBLA_CUDA_KERNEL_LOOP(s, num) {
// Updating running mean and var.
m[s] = beta1 * m[s] + (1 - beta1) * g[s];
v[s] = beta2 * v[s] + (1 - beta2) * g[s] * g[s];
// Update parameters.
theta[s] = theta[s] - alpha_t * m[s] / (std::sqrt(v[s]) + eps);
}
}
template <typename T>
void AdamCuda<T>::update_impl(const string &key, VariablePtr param) {
cuda_set_device(std::stoi(this->ctx_.device_id));
Size_t size = param->size();
auto &state = this->states_.at(key);
uint32_t &t = state.t;
const T *g = param->get_grad_pointer<T>(this->ctx_);
shared_ptr<Variable> mean_ =
state.pstate["mean"]; // To prevent compile error.
shared_ptr<Variable> var_ = state.pstate["var"]; // To prevent compile error.
T *m = mean_->cast_data_and_get_pointer<T>(this->ctx_);
T *v = var_->cast_data_and_get_pointer<T>(this->ctx_);
T *theta = param->cast_data_and_get_pointer<T>(this->ctx_);
t = ::min(t + 1, std::numeric_limits<uint32_t>::max() - 1);
const T bias_correction = std::sqrt(1 - ::pow(this->beta2_, t)) /
(1 - ::pow(this->beta1_, t));
const T alpha_t = this->alpha_ * bias_correction;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_adam_update, size, theta, m, v, g,
alpha_t, this->beta1_, this->beta2_,
this->eps_);
}
NBLA_DEF_WEIGHT_DECAY(AdamCuda, weight_decay_cuda);
NBLA_DEF_CHECK_INF_GRAD(AdamCuda, check_inf_grad_cuda);
NBLA_DEF_CHECK_NAN_GRAD(AdamCuda, check_nan_grad_cuda);
NBLA_DEF_CHECK_INF_OR_NAN_GRAD(AdamCuda, check_inf_or_nan_grad_cuda);
NBLA_DEF_SCALE_GRAD(AdamCuda, scale_grad_impl_cuda);
}
| cf216ce39226a31c88efc6b8915f891459f60976.cu | // Copyright (c) 2017 Sony Corporation. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/solver/adam.hpp>
#include "./mixed_precision_training.cuh"
#include "./weight_decay.cuh"
namespace nbla {
template <typename T>
__global__ void kernel_adam_update(const int num, T *theta, T *m, T *v,
const T *g, const float alpha_t,
const float beta1, const float beta2,
const float eps) {
NBLA_CUDA_KERNEL_LOOP(s, num) {
// Updating running mean and var.
m[s] = beta1 * m[s] + (1 - beta1) * g[s];
v[s] = beta2 * v[s] + (1 - beta2) * g[s] * g[s];
// Update parameters.
theta[s] = theta[s] - alpha_t * m[s] / (std::sqrt(v[s]) + eps);
}
}
template <typename T>
void AdamCuda<T>::update_impl(const string &key, VariablePtr param) {
cuda_set_device(std::stoi(this->ctx_.device_id));
Size_t size = param->size();
auto &state = this->states_.at(key);
uint32_t &t = state.t;
const T *g = param->get_grad_pointer<T>(this->ctx_);
shared_ptr<Variable> mean_ =
state.pstate["mean"]; // To prevent compile error.
shared_ptr<Variable> var_ = state.pstate["var"]; // To prevent compile error.
T *m = mean_->cast_data_and_get_pointer<T>(this->ctx_);
T *v = var_->cast_data_and_get_pointer<T>(this->ctx_);
T *theta = param->cast_data_and_get_pointer<T>(this->ctx_);
t = std::min(t + 1, std::numeric_limits<uint32_t>::max() - 1);
const T bias_correction = std::sqrt(1 - std::pow(this->beta2_, t)) /
(1 - std::pow(this->beta1_, t));
const T alpha_t = this->alpha_ * bias_correction;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_adam_update, size, theta, m, v, g,
alpha_t, this->beta1_, this->beta2_,
this->eps_);
}
NBLA_DEF_WEIGHT_DECAY(AdamCuda, weight_decay_cuda);
NBLA_DEF_CHECK_INF_GRAD(AdamCuda, check_inf_grad_cuda);
NBLA_DEF_CHECK_NAN_GRAD(AdamCuda, check_nan_grad_cuda);
NBLA_DEF_CHECK_INF_OR_NAN_GRAD(AdamCuda, check_inf_or_nan_grad_cuda);
NBLA_DEF_SCALE_GRAD(AdamCuda, scale_grad_impl_cuda);
}
|
d7e8fee95344077607a8f655b77a80a813601fa8.hip | // !!! This is a file automatically generated by hipify!!!
/* This code will multiply two vectors and
check the result.
*/
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <iostream>
#include <stdio.h>
#define CUDA_CHECK {hipDeviceSynchronize(); \
hipError_t err = hipGetLastError();\
if(err){\
std::cout << "Error: " << hipGetErrorString(err) << " line " << __LINE__ << std::endl; \
exit(1);\
}}
//#define CUDA_CHECK
/* Fill in your dotProduct kernel here...
*/
// Naive (and wrong) way
__global__ void calcDotProductKern1(float *x, float *y, float *res, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < N){
(*res) += x[i] * y[i];
}
}
// Naive, correct, but slow way
__global__ void calcDotProductKern2(float *x, float *y, float *res, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < N){
atomicAdd(res, x[i] * y[i]);
}
}
// Better reduction first pass
// res needs to point to at least 'blocks' floats.
__global__ void calcDotProductKern3(float *x, float *y, float *res, int N)
{
__shared__ float product[512];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int locI = threadIdx.x;
if(i < N)
{
product[locI] = x[i] * y[i];
}
else{
product[locI] = 0;
}
__syncthreads();
int blockSize = blockDim.x;
if( blockSize >= 1024 && locI < 512){
product[locI] = product[locI] + product[locI+512];
__syncthreads();
}
if( blockSize >= 512 && locI < 256){
product[locI] = product[locI] + product[locI+256];
__syncthreads();
}
if( blockSize >= 256 && locI < 128){
product[locI] = product[locI] + product[locI+128];
__syncthreads();
}
if( blockSize >= 128 && locI < 64){
product[locI] = product[locI] + product[locI+64];
__syncthreads();
}
if( blockSize >= 64 && locI < 32){
product[locI] = product[locI] + product[locI+32];
__syncthreads();
}
if( blockSize >= 32 && locI < 16){
product[locI] = product[locI] + product[locI+16];
__syncthreads();
}
if( blockSize >= 16 && locI < 8){
product[locI] = product[locI] + product[locI+8];
}
if( blockSize >= 8 && locI < 4){
product[locI] = product[locI] + product[locI+4];
}
if( blockSize >= 4 && locI < 2){
product[locI] = product[locI] + product[locI+2];
}
if( blockSize >= 2 && locI < 1){
product[locI] = product[locI] + product[locI+1];
}
if( locI == 0){
res[blockIdx.x] = product[0];
}
}
// Generic reduction
// x[] is of size N, and y[] is of size N/blockDim.x
__global__ void reduce(float *x, float *y, int N)
{
__shared__ float result[512];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int locI = threadIdx.x;
int blockSize = blockDim.x;
if(i < N){
result[locI] = x[i];
}else{
result[locI] = 0;
}
__syncthreads();
if( blockSize >= 1024 && locI < 512){
result[locI] = result[locI] + result[locI+512];
__syncthreads();
}
if( blockSize >= 512 && locI < 256){
result[locI] = result[locI] + result[locI+256];
__syncthreads();
}
if( blockSize >= 256 && locI < 128){
result[locI] = result[locI] + result[locI+128];
__syncthreads();
}
if( blockSize >= 128 && locI < 64){
result[locI] = result[locI] + result[locI+64];
__syncthreads();
}
if( blockSize >= 64 && locI < 32){
result[locI] = result[locI] + result[locI+32];
__syncthreads();
}
if( blockSize >= 32 && locI < 16){
result[locI] = result[locI] + result[locI+16];
__syncthreads();
}
if( blockSize >= 16 && locI < 8){
result[locI] = result[locI] + result[locI+8];
}
if( blockSize >= 8 && locI < 4){
result[locI] = result[locI] + result[locI+4];
}
if( blockSize >= 4 && locI < 2){
result[locI] = result[locI] + result[locI+2];
}
if( blockSize >= 2 && locI < 1){
result[locI] = result[locI] + result[locI+1];
}
if(locI == 0){
y[blockIdx.x] = result[0];
}
}
float calcDotProduct1(float* x, float* y, int N){
int threads = 512;
int blocks = (N + threads - 1)/ threads;
float* res;
hipMalloc(&res, sizeof(float));
float resHost = 0;
hipMemcpy(res, &resHost, sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
hipLaunchKernelGGL(( calcDotProductKern1), dim3(blocks), dim3(threads), 0, 0, x, y, res, N);
CUDA_CHECK;
hipMemcpy(&resHost, res, sizeof(float), hipMemcpyDeviceToHost);
hipFree(res);
CUDA_CHECK;
return resHost;
}
float calcDotProduct2(float* x, float* y, int N){
int threads = 512;
int blocks = (N + threads - 1)/ threads;
float* res;
hipMalloc(&res, sizeof(float));
float resHost = 0;
hipMemcpy(res, &resHost, sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
hipLaunchKernelGGL(( calcDotProductKern2), dim3(blocks), dim3(threads), 0, 0, x, y, res, N);
CUDA_CHECK;
hipMemcpy(&resHost, res, sizeof(float), hipMemcpyDeviceToHost);
hipFree(res);
CUDA_CHECK;
return resHost;
}
float calcDotProduct3(float* x, float* y, int N){
int threads = 512;
int blocks = (N + threads - 1)/ threads;
float* res;
hipMalloc(&res, blocks*sizeof(float));
CUDA_CHECK;
hipLaunchKernelGGL(( calcDotProductKern3), dim3(blocks), dim3(threads), 0, 0, x, y, res, N);
CUDA_CHECK;
float* resHost = new float[blocks];
hipMemcpy(resHost, res, sizeof(float) * blocks, hipMemcpyDeviceToHost);
CUDA_CHECK;
float p=0;
for(int i=0 ; i < blocks ; i++){
p += resHost[i];
}
delete[] resHost;
hipFree(res);
CUDA_CHECK;
return p;
}
float calcDotProduct3Reduce(float* x, float* y, int N){
int threads = 512;
int blocks = (N + threads - 1)/ threads;
float* res;
hipMalloc(&res, blocks*sizeof(float));
CUDA_CHECK;
hipLaunchKernelGGL(( calcDotProductKern3), dim3(blocks), dim3(threads), 0, 0, x, y, res, N);
CUDA_CHECK;
while(blocks > 1){
int blocksOrig = blocks;
blocks = ceil((float)blocks / threads);
float* resOrig = res;
hipMalloc(&res, blocks*sizeof(float));
hipLaunchKernelGGL(( reduce), dim3(blocks), dim3(threads), 0, 0, resOrig, res, blocksOrig);
hipFree(resOrig);
}
CUDA_CHECK;
float resHost;
hipMemcpy(&resHost, res, sizeof(float), hipMemcpyDeviceToHost);
CUDA_CHECK;
hipFree(res);
return resHost;
}
float calcDotProductThrust(float* x, float* y, int N){
thrust::device_ptr<float> xThStart(x);
thrust::device_ptr<float> yThStart(y);
thrust::device_ptr<float> xThEnd(x + N);
thrust::device_ptr<float> yThEnd(y + N);
return thrust::inner_product(xThStart, xThEnd, yThStart, 0.0f);
}
float timeDotProduct(float (*kernel)(float*, float*, int), float *x, float *y, int N, float ans)
{
CUDA_CHECK;
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
CUDA_CHECK;
hipEventRecord(start, 0);
double prod = kernel(x, y, N);
hipEventRecord(end, 0);
CUDA_CHECK;
hipEventSynchronize(end);
CUDA_CHECK;
hipError_t err = hipGetLastError();
if(err){
std::cout << "Error: " << hipGetErrorString(err) << std::endl;
}
if( fabs(prod - ans) / fabs(ans) < 1e-4 )
{
std::cout << "Multiplication correct! " << prod << " = " << ans << std::endl;
}
else
{
std::cout << "Multiplication wrong! " << prod << " != " << ans << std::endl;
}
float timeInMs;
hipEventElapsedTime(&timeInMs, start, end);
std::cout << "Time: " << timeInMs << "ms" << std::endl << std::endl;
CUDA_CHECK;
hipEventDestroy(start);
hipEventDestroy(end);
CUDA_CHECK;
return 0;
}
int main(void)
{
const int N = 20000000;
float *x_host = new float[N];
float *y_host = new float[N];
// Fill matrix and vector on host
for(int i=0 ; i < N ; i++)
{
x_host[i] = sin(i*0.013);
y_host[i] = cos(i*0.019);
}
float *x;
float *y;
hipMalloc(&x, N*sizeof(float));
hipMalloc(&y, N*sizeof(float));
CUDA_CHECK;
// Copy x and y to device
hipMemcpy(x, x_host, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(y, y_host, N*sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
//hipMemcpy(y_host, y, N*sizeof(float), hipMemcpyDeviceToHost);
// Check result
clock_t st = clock();
float prod = 0;
for(int i=0 ; i < N ; i++)
{
prod += y_host[i] * x_host[i];
}
clock_t end = clock();
std::cout << "CPU time = " << (end - st) / (float)CLOCKS_PER_SEC * 1000 << " ms" << std::endl;
std::cout << "Naive approach - wrong" << std::endl;
timeDotProduct(calcDotProduct1, x, y, N, prod);
std::cout << "Using atomic operations" << std::endl;
timeDotProduct(calcDotProduct2, x, y, N, prod);
std::cout << "Reduction across one thread block only" << std::endl;
timeDotProduct(calcDotProduct3, x, y, N, prod);
std::cout << "Repeated reduction" << std::endl;
timeDotProduct(calcDotProduct3Reduce, x, y, N, prod);
std::cout << "Thrust" << std::endl;
timeDotProduct(calcDotProductThrust, x, y, N, prod);
hipFree(x);
hipFree(y);
delete[] x_host;
delete[] y_host;
}
| d7e8fee95344077607a8f655b77a80a813601fa8.cu | /* This code will multiply two vectors and
check the result.
*/
#include <cuda.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <iostream>
#include <stdio.h>
#define CUDA_CHECK {cudaThreadSynchronize(); \
cudaError_t err = cudaGetLastError();\
if(err){\
std::cout << "Error: " << cudaGetErrorString(err) << " line " << __LINE__ << std::endl; \
exit(1);\
}}
//#define CUDA_CHECK
/* Fill in your dotProduct kernel here...
*/
// Naive (and wrong) way
__global__ void calcDotProductKern1(float *x, float *y, float *res, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < N){
(*res) += x[i] * y[i];
}
}
// Naive, correct, but slow way
__global__ void calcDotProductKern2(float *x, float *y, float *res, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < N){
atomicAdd(res, x[i] * y[i]);
}
}
// Better reduction first pass
// res needs to point to at least 'blocks' floats.
__global__ void calcDotProductKern3(float *x, float *y, float *res, int N)
{
__shared__ float product[512];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int locI = threadIdx.x;
if(i < N)
{
product[locI] = x[i] * y[i];
}
else{
product[locI] = 0;
}
__syncthreads();
int blockSize = blockDim.x;
if( blockSize >= 1024 && locI < 512){
product[locI] = product[locI] + product[locI+512];
__syncthreads();
}
if( blockSize >= 512 && locI < 256){
product[locI] = product[locI] + product[locI+256];
__syncthreads();
}
if( blockSize >= 256 && locI < 128){
product[locI] = product[locI] + product[locI+128];
__syncthreads();
}
if( blockSize >= 128 && locI < 64){
product[locI] = product[locI] + product[locI+64];
__syncthreads();
}
if( blockSize >= 64 && locI < 32){
product[locI] = product[locI] + product[locI+32];
__syncthreads();
}
if( blockSize >= 32 && locI < 16){
product[locI] = product[locI] + product[locI+16];
__syncthreads();
}
if( blockSize >= 16 && locI < 8){
product[locI] = product[locI] + product[locI+8];
}
if( blockSize >= 8 && locI < 4){
product[locI] = product[locI] + product[locI+4];
}
if( blockSize >= 4 && locI < 2){
product[locI] = product[locI] + product[locI+2];
}
if( blockSize >= 2 && locI < 1){
product[locI] = product[locI] + product[locI+1];
}
if( locI == 0){
res[blockIdx.x] = product[0];
}
}
// Generic reduction
// x[] is of size N, and y[] is of size N/blockDim.x
__global__ void reduce(float *x, float *y, int N)
{
__shared__ float result[512];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int locI = threadIdx.x;
int blockSize = blockDim.x;
if(i < N){
result[locI] = x[i];
}else{
result[locI] = 0;
}
__syncthreads();
if( blockSize >= 1024 && locI < 512){
result[locI] = result[locI] + result[locI+512];
__syncthreads();
}
if( blockSize >= 512 && locI < 256){
result[locI] = result[locI] + result[locI+256];
__syncthreads();
}
if( blockSize >= 256 && locI < 128){
result[locI] = result[locI] + result[locI+128];
__syncthreads();
}
if( blockSize >= 128 && locI < 64){
result[locI] = result[locI] + result[locI+64];
__syncthreads();
}
if( blockSize >= 64 && locI < 32){
result[locI] = result[locI] + result[locI+32];
__syncthreads();
}
if( blockSize >= 32 && locI < 16){
result[locI] = result[locI] + result[locI+16];
__syncthreads();
}
if( blockSize >= 16 && locI < 8){
result[locI] = result[locI] + result[locI+8];
}
if( blockSize >= 8 && locI < 4){
result[locI] = result[locI] + result[locI+4];
}
if( blockSize >= 4 && locI < 2){
result[locI] = result[locI] + result[locI+2];
}
if( blockSize >= 2 && locI < 1){
result[locI] = result[locI] + result[locI+1];
}
if(locI == 0){
y[blockIdx.x] = result[0];
}
}
float calcDotProduct1(float* x, float* y, int N){
int threads = 512;
int blocks = (N + threads - 1)/ threads;
float* res;
cudaMalloc(&res, sizeof(float));
float resHost = 0;
cudaMemcpy(res, &resHost, sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
calcDotProductKern1<<<blocks, threads>>>(x, y, res, N);
CUDA_CHECK;
cudaMemcpy(&resHost, res, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(res);
CUDA_CHECK;
return resHost;
}
float calcDotProduct2(float* x, float* y, int N){
int threads = 512;
int blocks = (N + threads - 1)/ threads;
float* res;
cudaMalloc(&res, sizeof(float));
float resHost = 0;
cudaMemcpy(res, &resHost, sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
calcDotProductKern2<<<blocks, threads>>>(x, y, res, N);
CUDA_CHECK;
cudaMemcpy(&resHost, res, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(res);
CUDA_CHECK;
return resHost;
}
float calcDotProduct3(float* x, float* y, int N){
int threads = 512;
int blocks = (N + threads - 1)/ threads;
float* res;
cudaMalloc(&res, blocks*sizeof(float));
CUDA_CHECK;
calcDotProductKern3<<<blocks, threads>>>(x, y, res, N);
CUDA_CHECK;
float* resHost = new float[blocks];
cudaMemcpy(resHost, res, sizeof(float) * blocks, cudaMemcpyDeviceToHost);
CUDA_CHECK;
float p=0;
for(int i=0 ; i < blocks ; i++){
p += resHost[i];
}
delete[] resHost;
cudaFree(res);
CUDA_CHECK;
return p;
}
float calcDotProduct3Reduce(float* x, float* y, int N){
int threads = 512;
int blocks = (N + threads - 1)/ threads;
float* res;
cudaMalloc(&res, blocks*sizeof(float));
CUDA_CHECK;
calcDotProductKern3<<<blocks, threads>>>(x, y, res, N);
CUDA_CHECK;
while(blocks > 1){
int blocksOrig = blocks;
blocks = ceil((float)blocks / threads);
float* resOrig = res;
cudaMalloc(&res, blocks*sizeof(float));
reduce<<<blocks, threads>>>(resOrig, res, blocksOrig);
cudaFree(resOrig);
}
CUDA_CHECK;
float resHost;
cudaMemcpy(&resHost, res, sizeof(float), cudaMemcpyDeviceToHost);
CUDA_CHECK;
cudaFree(res);
return resHost;
}
float calcDotProductThrust(float* x, float* y, int N){
thrust::device_ptr<float> xThStart(x);
thrust::device_ptr<float> yThStart(y);
thrust::device_ptr<float> xThEnd(x + N);
thrust::device_ptr<float> yThEnd(y + N);
return thrust::inner_product(xThStart, xThEnd, yThStart, 0.0f);
}
float timeDotProduct(float (*kernel)(float*, float*, int), float *x, float *y, int N, float ans)
{
CUDA_CHECK;
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
CUDA_CHECK;
cudaEventRecord(start, 0);
double prod = kernel(x, y, N);
cudaEventRecord(end, 0);
CUDA_CHECK;
cudaEventSynchronize(end);
CUDA_CHECK;
cudaError_t err = cudaGetLastError();
if(err){
std::cout << "Error: " << cudaGetErrorString(err) << std::endl;
}
if( fabs(prod - ans) / fabs(ans) < 1e-4 )
{
std::cout << "Multiplication correct! " << prod << " = " << ans << std::endl;
}
else
{
std::cout << "Multiplication wrong! " << prod << " != " << ans << std::endl;
}
float timeInMs;
cudaEventElapsedTime(&timeInMs, start, end);
std::cout << "Time: " << timeInMs << "ms" << std::endl << std::endl;
CUDA_CHECK;
cudaEventDestroy(start);
cudaEventDestroy(end);
CUDA_CHECK;
return 0;
}
int main(void)
{
const int N = 20000000;
float *x_host = new float[N];
float *y_host = new float[N];
// Fill matrix and vector on host
for(int i=0 ; i < N ; i++)
{
x_host[i] = sin(i*0.013);
y_host[i] = cos(i*0.019);
}
float *x;
float *y;
cudaMalloc(&x, N*sizeof(float));
cudaMalloc(&y, N*sizeof(float));
CUDA_CHECK;
// Copy x and y to device
cudaMemcpy(x, x_host, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y, y_host, N*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
//cudaMemcpy(y_host, y, N*sizeof(float), cudaMemcpyDeviceToHost);
// Check result
clock_t st = clock();
float prod = 0;
for(int i=0 ; i < N ; i++)
{
prod += y_host[i] * x_host[i];
}
clock_t end = clock();
std::cout << "CPU time = " << (end - st) / (float)CLOCKS_PER_SEC * 1000 << " ms" << std::endl;
std::cout << "Naive approach - wrong" << std::endl;
timeDotProduct(calcDotProduct1, x, y, N, prod);
std::cout << "Using atomic operations" << std::endl;
timeDotProduct(calcDotProduct2, x, y, N, prod);
std::cout << "Reduction across one thread block only" << std::endl;
timeDotProduct(calcDotProduct3, x, y, N, prod);
std::cout << "Repeated reduction" << std::endl;
timeDotProduct(calcDotProduct3Reduce, x, y, N, prod);
std::cout << "Thrust" << std::endl;
timeDotProduct(calcDotProductThrust, x, y, N, prod);
cudaFree(x);
cudaFree(y);
delete[] x_host;
delete[] y_host;
}
|
b018d4f25d48395c0cac85f259ced45084fd516c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
int M,N; //M rows, N cols
int *h_row, *h_col, *d_row, *d_col, nnz;
double *h_A, *h_value, *d_A, *d_value;
//CPU Compute
void compute_nnz_row(double *A, int *row){
for(int rows=0; rows<M; rows++){
for(int cols=0; cols<N; cols++){
if(A[rows * N + cols] != 0){
row[rows+1] += 1;
}
}
}
}
__global__ void compute_nnz_row_gpu(double *A, int *row, int M, int N){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < M){
for(int cols = 0; cols<N; cols++){
if(A[idx * N + cols] != 0) row[idx+1] += 1;
}
}
}
int compute_nnz(int *row){
for(int idx = 0; idx < M; idx++){
row[idx+1] += row[idx];
}
return row[M];
}
__global__ void compute_nnz_gpu(int *row, int M){
//int idx = blockDim.x * blockIdx.x + threadIdx.x;
for(int idx = 0; idx<M; idx++){
row[idx+1] += row[idx];
__syncthreads();
}
}
void compute_nnz_col(double *A, int *col, double *value){
int id = 0;
for(int rows = 0; rows < M; rows++){
for(int cols = 0; cols < N; cols++){
if(A[rows * N + cols] != 0){
value[id] = A[rows * N + cols];
col[id] = cols;
id += 1;
}
}
}
}
__global__ void compute_nnz_col_gpu(double *A, int *col, double *value, int M, int N){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int id = 0;
if(idx < M){
for(int cols = 0; cols < N; cols++){
if(A[idx * N + cols] != 0){
value[id] = A[idx * N + cols];
col[id] = cols;
id += 1;
}
}
}
}
int main(int argc, char *argv[]){
M = atoi(argv[1]);
N = atoi(argv[1]);
// M = 30000;
// N = 30000;
h_A = (double *)malloc( M*N*sizeof(double));
h_row = (int *)malloc( (M+1)*sizeof(int));
hipMalloc((void**) &d_A, M*N*sizeof(double));
hipMalloc((void**) &d_row, (M+1)*sizeof(int));
//Initial Matrix
for(int i=0;i<M;i++){
for(int j=0;j<N;j++){
if( (i+j)%2 == 0 ){
h_A[ i * N + j] = 1;
}
else{
h_A[ i * N + j] = 0;
}
}
}
for(int i=0; i<M+1; i++) h_row[i] = 0;
hipMemcpy(d_A, h_A, M*N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_row, h_row, (M+1)*sizeof(int), hipMemcpyHostToDevice);
/* double start, end;
start = clock();
//First, we compute nonzero element in every row.
compute_nnz_row(h_A, h_row);
//Report the total nonzero element by sum of each row.
nnz = compute_nnz(h_row);
free(h_row);
//After we know the nnz, we can allocate the nnz_value, nnz_col.
h_value = (double *)malloc( nnz * sizeof(double));
h_col = (int *)malloc( nnz * sizeof(int));
//Compute every nonzero element column index.
compute_nnz_col(h_A, h_col, h_value);
end = clock(); */
//////////////
// GPU Compute Start
hipEvent_t start1, stop1, start2, stop2;
hipEventCreate(&start1);
hipEventCreate(&stop1);
hipEventRecord(start1);
hipLaunchKernelGGL(( compute_nnz_row_gpu), dim3((M+255)/256), dim3(256), 0, 0, d_A, d_row, M, N);
// compute_nnz_gpu<<<(M+255)/256, 256>>>(d_row, M);
hipEventRecord(stop1);
hipEventSynchronize(stop1);
////////////
hipMemcpy(h_row, d_row, (M+1)*sizeof(int), hipMemcpyDeviceToHost);
double start3, end3;
start3 = clock();
compute_nnz(h_row);
end3 = clock();
nnz = h_row[M];
hipMalloc((void**) &d_value, nnz*sizeof(double));
hipMalloc((void**) &d_col, nnz*sizeof(int));
hipEventCreate(&start2);
hipEventCreate(&stop2);
hipEventRecord(start2);
hipLaunchKernelGGL(( compute_nnz_col_gpu), dim3((nnz+255)/256), dim3(256), 0, 0, d_A, d_col, d_value, M, N);
hipEventRecord(stop2);
hipEventSynchronize(stop2);
hipMemcpy(h_value, d_value, nnz*sizeof(double), hipMemcpyDeviceToHost);
hipMemcpy(h_col, d_col, nnz*sizeof(int), hipMemcpyDeviceToHost);
float milliseconds1 = 0;
float milliseconds2 = 0;
hipEventElapsedTime(&milliseconds1, start1, stop1);
hipEventElapsedTime(&milliseconds2, start2, stop2);
printf("runtime : %f (s)\n", (milliseconds1 + milliseconds2)*1e-3 + (end3 - start3)/CLOCKS_PER_SEC);
//GPU Part End
//Print all of nonzero index
printf("we have %i nonzero values. \n",nnz);
//printf("Dense to CSR elapsed time = %f\n", (end - start)/CLOCKS_PER_SEC);
/*
for(int vecs = 0; vecs < M; vecs++){
for(int i = h_row[vecs];i<h_row[vecs+1];i++){
printf("(%i,%i) = %f\n", vecs, h_col[i], h_value[i]);
}
}
*/
//Free all memory we use.
hipFree(d_A);
hipFree(d_value);
hipFree(d_row);
hipFree(d_col);
free(h_A);
free(h_value);
free(h_row);
free(h_col);
} | b018d4f25d48395c0cac85f259ced45084fd516c.cu | #include <stdio.h>
#include <stdlib.h>
int M,N; //M rows, N cols
int *h_row, *h_col, *d_row, *d_col, nnz;
double *h_A, *h_value, *d_A, *d_value;
//CPU Compute
void compute_nnz_row(double *A, int *row){
for(int rows=0; rows<M; rows++){
for(int cols=0; cols<N; cols++){
if(A[rows * N + cols] != 0){
row[rows+1] += 1;
}
}
}
}
__global__ void compute_nnz_row_gpu(double *A, int *row, int M, int N){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < M){
for(int cols = 0; cols<N; cols++){
if(A[idx * N + cols] != 0) row[idx+1] += 1;
}
}
}
int compute_nnz(int *row){
for(int idx = 0; idx < M; idx++){
row[idx+1] += row[idx];
}
return row[M];
}
__global__ void compute_nnz_gpu(int *row, int M){
//int idx = blockDim.x * blockIdx.x + threadIdx.x;
for(int idx = 0; idx<M; idx++){
row[idx+1] += row[idx];
__syncthreads();
}
}
void compute_nnz_col(double *A, int *col, double *value){
int id = 0;
for(int rows = 0; rows < M; rows++){
for(int cols = 0; cols < N; cols++){
if(A[rows * N + cols] != 0){
value[id] = A[rows * N + cols];
col[id] = cols;
id += 1;
}
}
}
}
__global__ void compute_nnz_col_gpu(double *A, int *col, double *value, int M, int N){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int id = 0;
if(idx < M){
for(int cols = 0; cols < N; cols++){
if(A[idx * N + cols] != 0){
value[id] = A[idx * N + cols];
col[id] = cols;
id += 1;
}
}
}
}
int main(int argc, char *argv[]){
M = atoi(argv[1]);
N = atoi(argv[1]);
// M = 30000;
// N = 30000;
h_A = (double *)malloc( M*N*sizeof(double));
h_row = (int *)malloc( (M+1)*sizeof(int));
cudaMalloc((void**) &d_A, M*N*sizeof(double));
cudaMalloc((void**) &d_row, (M+1)*sizeof(int));
//Initial Matrix
for(int i=0;i<M;i++){
for(int j=0;j<N;j++){
if( (i+j)%2 == 0 ){
h_A[ i * N + j] = 1;
}
else{
h_A[ i * N + j] = 0;
}
}
}
for(int i=0; i<M+1; i++) h_row[i] = 0;
cudaMemcpy(d_A, h_A, M*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_row, h_row, (M+1)*sizeof(int), cudaMemcpyHostToDevice);
/* double start, end;
start = clock();
//First, we compute nonzero element in every row.
compute_nnz_row(h_A, h_row);
//Report the total nonzero element by sum of each row.
nnz = compute_nnz(h_row);
free(h_row);
//After we know the nnz, we can allocate the nnz_value, nnz_col.
h_value = (double *)malloc( nnz * sizeof(double));
h_col = (int *)malloc( nnz * sizeof(int));
//Compute every nonzero element column index.
compute_nnz_col(h_A, h_col, h_value);
end = clock(); */
//////////////
// GPU Compute Start
cudaEvent_t start1, stop1, start2, stop2;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventRecord(start1);
compute_nnz_row_gpu<<< (M+255)/256, 256>>>(d_A, d_row, M, N);
// compute_nnz_gpu<<<(M+255)/256, 256>>>(d_row, M);
cudaEventRecord(stop1);
cudaEventSynchronize(stop1);
////////////
cudaMemcpy(h_row, d_row, (M+1)*sizeof(int), cudaMemcpyDeviceToHost);
double start3, end3;
start3 = clock();
compute_nnz(h_row);
end3 = clock();
nnz = h_row[M];
cudaMalloc((void**) &d_value, nnz*sizeof(double));
cudaMalloc((void**) &d_col, nnz*sizeof(int));
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
cudaEventRecord(start2);
compute_nnz_col_gpu<<< (nnz+255)/256, 256>>>(d_A, d_col, d_value, M, N);
cudaEventRecord(stop2);
cudaEventSynchronize(stop2);
cudaMemcpy(h_value, d_value, nnz*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(h_col, d_col, nnz*sizeof(int), cudaMemcpyDeviceToHost);
float milliseconds1 = 0;
float milliseconds2 = 0;
cudaEventElapsedTime(&milliseconds1, start1, stop1);
cudaEventElapsedTime(&milliseconds2, start2, stop2);
printf("runtime : %f (s)\n", (milliseconds1 + milliseconds2)*1e-3 + (end3 - start3)/CLOCKS_PER_SEC);
//GPU Part End
//Print all of nonzero index
printf("we have %i nonzero values. \n",nnz);
//printf("Dense to CSR elapsed time = %f\n", (end - start)/CLOCKS_PER_SEC);
/*
for(int vecs = 0; vecs < M; vecs++){
for(int i = h_row[vecs];i<h_row[vecs+1];i++){
printf("(%i,%i) = %f\n", vecs, h_col[i], h_value[i]);
}
}
*/
//Free all memory we use.
cudaFree(d_A);
cudaFree(d_value);
cudaFree(d_row);
cudaFree(d_col);
free(h_A);
free(h_value);
free(h_row);
free(h_col);
} |
8b1d0004926097700222460bbf9688da5cd8ed19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "extrautils.cuh"
void sparse_histogram(float * input, int num, device_vector<float>& histogram_values, device_vector<int>& histogram_counts)
{
typedef float ValueType; // input value type
typedef int IndexType; // histogram index type
// copy input data
thrust::device_ptr<float> d_ptr(input);
thrust::device_vector<ValueType> data(d_ptr, d_ptr + num);
// sort data to bring equal elements together
thrust::sort(data.begin(), data.end());
// number of histogram bins is equal to number of unique values (assumes data.size() > 0)
IndexType num_bins = thrust::inner_product(data.begin(), data.end() - 1,
data.begin() + 1,
IndexType(1),
thrust::plus<IndexType>(),
thrust::not_equal_to<ValueType>());
// resize histogram storage
histogram_values.resize(num_bins);
histogram_counts.resize(num_bins);
// compact find the end of each bin of values
thrust::reduce_by_key(data.begin(), data.end(), thrust::constant_iterator<IndexType>(1), histogram_values.begin(), histogram_counts.begin());
}
// target is of size channels x numLabels
// counts and values is of size 1 x numLabels
// acts is of size channels*imagePixels x numCases
// labels is of size 1 x numCases
// B_Y and B_X is the dimension of the block.
// numBlocksX = DIVUP(numCases, B_X)
// numBlocksY = channels * DIVUP(imagePixels, B_Y*PixelsPerThread)
// temporary (numBlocksY, numBlocksX*numLabels)
// Pass the num of Labels as the init size of the hist shared memory
template <int B_Y, int B_X, int PixelsPerThread>
__global__ void CalculateSqrtSumSquare(float * labels, float * acts, float * values, int * counts, float * temp, int channels, int imagePixels, int numCases, int numLabels)
{
extern __shared__ float hist[];
__shared__ float shLabel[B_X];
__shared__ float partial[B_Y][B_X];
partial[threadIdx.y][threadIdx.x] = 0;
if (threadIdx.y == 0 && threadIdx.x+blockIdx.x*B_X < numCases)
shLabel[threadIdx.x] = labels[threadIdx.x + blockIdx.x * B_X];
__syncthreads();
int numBlocksPerChannel = DIVUP(imagePixels, B_Y * PixelsPerThread);
int blkIdxInChannel = blockIdx.y % numBlocksPerChannel;
int channelIndex = blockIdx.y / numBlocksPerChannel;
int numBlocksX = DIVUP(numCases, B_X);
acts += threadIdx.x + blockIdx.x * B_X + // offset by cases
threadIdx.y * numCases + // offset by thread in one block
channelIndex * imagePixels * numCases + // offset by channel
blkIdxInChannel * B_Y * PixelsPerThread * numCases; //offset by block inside of channel
#pragma unroll
for (int i=0; i<PixelsPerThread*B_Y; i+=B_Y) {
if (blkIdxInChannel * B_Y * PixelsPerThread + threadIdx.y + i < imagePixels && threadIdx.x + blockIdx.x * B_X < numCases) {
partial[threadIdx.y][threadIdx.x] += acts[i*numCases] * acts[i*numCases];
}
}
__syncthreads();
// Since now the data are all in the shared memory, we don't need to consider the coalesced operation on the memory.
int tidx = threadIdx.y * B_X + threadIdx.x;
for (int i=0; i<numLabels; i+=B_X*B_Y)
if (i+tidx<numLabels)
hist[tidx+i] = 0;
__syncthreads();
for (int i=0; i<numLabels; i+=B_X*B_Y) {
if (i + tidx < numLabels) {
#pragma unroll
for (int j=0; j<B_X; j++) {
if (shLabel[j] == values[i+tidx]) {
#pragma unroll
for (int k=0; k<B_Y; k++) {
hist[i+tidx] += partial[k][j];
}
}
}
hist[i+tidx] *= counts[i+tidx];
}
}
__syncthreads();
float * tmp = temp + (channelIndex * numBlocksPerChannel * numBlocksX + blkIdxInChannel * numBlocksX + blockIdx.x) * numLabels;
for (int i=0; i<numLabels; i+=B_X*B_Y) {
if (tidx+i<numLabels) {
tmp[tidx+i] = hist[tidx+i];
}
}
// Took me whole day to realize that racing condition can occur if there are no synchronization
// between cuda blocks when writing global memory. Painful debugging.
// Terminate the current kernel to sync the blocks and leave the rest operation in a new kernel!
}
// We need to assume the target passed to this function is already set to zero.
// Call target.apply(Zero) to initialize the target.
// B_Y is 1, only one dimension of thread is needed.
// The grid size should be:
// grid size y should be the same as the channel number
// grid size x should be DIVUP(numLabels, B_X)
template <int B_X>
__global__ void MergeTemp(float * temp, int RowsPerChannel, int numLabels, float * target) {
int labelIdx = threadIdx.x + B_X * blockIdx.x;
if (labelIdx < numLabels) {
for (int i=0; i<RowsPerChannel; i++)
target[blockIdx.y*numLabels+labelIdx] += temp[(blockIdx.y*RowsPerChannel+i)*numLabels+labelIdx];
target[blockIdx.y*numLabels+labelIdx] = sqrt(target[blockIdx.y*numLabels+labelIdx]);
}
}
float CalculateSqrtSumSquareMatrix(NVMatrix& labels, NVMatrix& acts, thrust::device_vector<float>& values, thrust::device_vector<int>& counts, NVMatrix& target, int channels, int imagePixels)
{
assert(acts.getNumRows() == channels*imagePixels);
int B_X = 32;
int B_Y = 8;
int numCases = acts.getNumCols();
int PixelsPerThread = 32;
int gridydim = channels * DIVUP(imagePixels, B_Y*PixelsPerThread);
int gridxdim = DIVUP(numCases, B_X);
int numBlocksPerChannel = DIVUP(imagePixels, B_Y * PixelsPerThread);
int numBlocksX = DIVUP(numCases, B_X);
NVMatrix temp(numBlocksX*numBlocksPerChannel*channels, counts.size());
dim3 blocks(gridxdim, gridydim); // The dimension of the grid
dim3 threads(B_X, B_Y); // The dimension of the block, 32 x 16 = 512, which is the thread number available inside a block for compute compatibility<2.0.
float * values_ptr = thrust::raw_pointer_cast(values.data());
int * counts_ptr = thrust::raw_pointer_cast(counts.data());
int numLabels = counts.size();
hipFuncSetCacheConfig(CalculateSqrtSumSquare<8, 32, 32>, hipFuncCachePreferShared);
// Calculate temp
hipLaunchKernelGGL(( CalculateSqrtSumSquare<8, 32, 32>), dim3(blocks), dim3(threads), numLabels*4, 0, labels.getDevData(), acts.getDevData(), values_ptr, counts_ptr, temp.getDevData(), channels, imagePixels, numCases, numLabels);
// Calculate target
dim3 blocksm(DIVUP(numLabels, B_X), channels);
dim3 threadsm(B_X);
hipLaunchKernelGGL(( MergeTemp<32>), dim3(blocksm), dim3(threadsm), 0, 0, temp.getDevData(), numBlocksX*numBlocksPerChannel, numLabels, target.getDevData());
return target.sum();
}
// Blocks Y are used for pixels and channels
// numBlocksY = channels * DIVUP(imgPixels, PixelsPerThread * B_Y)
// Blocks X are used for Samples
// numBlocksX = DIVUP(numCases, B_X)
// The shared memory size should be three times the size of numLabels.
template <int B_Y, int B_X, int PixelsPerThread>
__global__ void kCalculateGradient(float * acts, float * labels, float * sqrts, float * values, int * counts, int numLabels, int channels, int imagePixels, int numCases, float * target)
{
int numBlocksPerChannel = DIVUP(imagePixels, B_Y * PixelsPerThread);
int blkIdxInChannel = blockIdx.y % numBlocksPerChannel;
int channelIndex = blockIdx.y / numBlocksPerChannel;
// int numBlocksX = DIVUP(numCases, B_X);
extern __shared__ float shSqrt[];
int tidx = threadIdx.x + threadIdx.y * B_X;
for (int i=0; i<numLabels; i+=B_X*B_Y)
if (i + tidx < numLabels) {
shSqrt[i+tidx] = sqrts[numLabels*channelIndex + i + tidx];
shSqrt[i+tidx+numLabels] = values[i+tidx];
shSqrt[i+tidx+2*numLabels] = counts[i+tidx];
}
__syncthreads();
target += (channelIndex * imagePixels + blkIdxInChannel * B_Y * PixelsPerThread + threadIdx.y) * numCases + threadIdx.x + B_X * blockIdx.x;
acts += (channelIndex * imagePixels + blkIdxInChannel * B_Y * PixelsPerThread + threadIdx.y) * numCases + threadIdx.x + B_X * blockIdx.x;
int caseIndex = threadIdx.x + B_X * blockIdx.x;
if (caseIndex < numCases) {
for (int i=0; i<PixelsPerThread*B_Y; i+=B_Y)
if (blkIdxInChannel * B_Y * PixelsPerThread + threadIdx.y + i < imagePixels)
for (int j=0; j<numLabels; j++){
if (shSqrt[numLabels+j]==labels[caseIndex]) {
target[i*numCases] = ((float)shSqrt[2*numLabels+j])*acts[i*numCases] / (shSqrt[j] + 2.5e-7); // EPS
}
}
}
}
void CalculateGradient(NVMatrix& acts, NVMatrix& labels, NVMatrix& sqrts, thrust::device_vector<float>& values, thrust::device_vector<int>& counts, int channels, int imagePixels, int numCases, NVMatrix& target)
{
int B_X = 32;
int B_Y = 8;
int PixelsPerThread = 16;
int gridydim = channels * DIVUP(imagePixels, B_Y*PixelsPerThread);
int gridxdim = DIVUP(numCases, B_X);
dim3 blocks(gridxdim, gridydim); // The dimension of the grid
dim3 threads(B_X, B_Y); // The dimension of the block, 32 x 16 = 512, which is the thread number available inside a block for compute compatibility<2.0.
hipLaunchKernelGGL(( kCalculateGradient<8,32,16>), dim3(blocks), dim3(threads), counts.size()*12, 0, acts.getDevData(), labels.getDevData(), sqrts.getDevData(), thrust::raw_pointer_cast(values.data()), thrust::raw_pointer_cast(counts.data()), counts.size(), channels, imagePixels, numCases, target.getDevData());
}
| 8b1d0004926097700222460bbf9688da5cd8ed19.cu | #include "extrautils.cuh"
void sparse_histogram(float * input, int num, device_vector<float>& histogram_values, device_vector<int>& histogram_counts)
{
typedef float ValueType; // input value type
typedef int IndexType; // histogram index type
// copy input data
thrust::device_ptr<float> d_ptr(input);
thrust::device_vector<ValueType> data(d_ptr, d_ptr + num);
// sort data to bring equal elements together
thrust::sort(data.begin(), data.end());
// number of histogram bins is equal to number of unique values (assumes data.size() > 0)
IndexType num_bins = thrust::inner_product(data.begin(), data.end() - 1,
data.begin() + 1,
IndexType(1),
thrust::plus<IndexType>(),
thrust::not_equal_to<ValueType>());
// resize histogram storage
histogram_values.resize(num_bins);
histogram_counts.resize(num_bins);
// compact find the end of each bin of values
thrust::reduce_by_key(data.begin(), data.end(), thrust::constant_iterator<IndexType>(1), histogram_values.begin(), histogram_counts.begin());
}
// target is of size channels x numLabels
// counts and values is of size 1 x numLabels
// acts is of size channels*imagePixels x numCases
// labels is of size 1 x numCases
// B_Y and B_X is the dimension of the block.
// numBlocksX = DIVUP(numCases, B_X)
// numBlocksY = channels * DIVUP(imagePixels, B_Y*PixelsPerThread)
// temporary (numBlocksY, numBlocksX*numLabels)
// Pass the num of Labels as the init size of the hist shared memory
template <int B_Y, int B_X, int PixelsPerThread>
__global__ void CalculateSqrtSumSquare(float * labels, float * acts, float * values, int * counts, float * temp, int channels, int imagePixels, int numCases, int numLabels)
{
extern __shared__ float hist[];
__shared__ float shLabel[B_X];
__shared__ float partial[B_Y][B_X];
partial[threadIdx.y][threadIdx.x] = 0;
if (threadIdx.y == 0 && threadIdx.x+blockIdx.x*B_X < numCases)
shLabel[threadIdx.x] = labels[threadIdx.x + blockIdx.x * B_X];
__syncthreads();
int numBlocksPerChannel = DIVUP(imagePixels, B_Y * PixelsPerThread);
int blkIdxInChannel = blockIdx.y % numBlocksPerChannel;
int channelIndex = blockIdx.y / numBlocksPerChannel;
int numBlocksX = DIVUP(numCases, B_X);
acts += threadIdx.x + blockIdx.x * B_X + // offset by cases
threadIdx.y * numCases + // offset by thread in one block
channelIndex * imagePixels * numCases + // offset by channel
blkIdxInChannel * B_Y * PixelsPerThread * numCases; //offset by block inside of channel
#pragma unroll
for (int i=0; i<PixelsPerThread*B_Y; i+=B_Y) {
if (blkIdxInChannel * B_Y * PixelsPerThread + threadIdx.y + i < imagePixels && threadIdx.x + blockIdx.x * B_X < numCases) {
partial[threadIdx.y][threadIdx.x] += acts[i*numCases] * acts[i*numCases];
}
}
__syncthreads();
// Since now the data are all in the shared memory, we don't need to consider the coalesced operation on the memory.
int tidx = threadIdx.y * B_X + threadIdx.x;
for (int i=0; i<numLabels; i+=B_X*B_Y)
if (i+tidx<numLabels)
hist[tidx+i] = 0;
__syncthreads();
for (int i=0; i<numLabels; i+=B_X*B_Y) {
if (i + tidx < numLabels) {
#pragma unroll
for (int j=0; j<B_X; j++) {
if (shLabel[j] == values[i+tidx]) {
#pragma unroll
for (int k=0; k<B_Y; k++) {
hist[i+tidx] += partial[k][j];
}
}
}
hist[i+tidx] *= counts[i+tidx];
}
}
__syncthreads();
float * tmp = temp + (channelIndex * numBlocksPerChannel * numBlocksX + blkIdxInChannel * numBlocksX + blockIdx.x) * numLabels;
for (int i=0; i<numLabels; i+=B_X*B_Y) {
if (tidx+i<numLabels) {
tmp[tidx+i] = hist[tidx+i];
}
}
// Took me whole day to realize that racing condition can occur if there are no synchronization
// between cuda blocks when writing global memory. Painful debugging.
// Terminate the current kernel to sync the blocks and leave the rest operation in a new kernel!
}
// We need to assume the target passed to this function is already set to zero.
// Call target.apply(Zero) to initialize the target.
// B_Y is 1, only one dimension of thread is needed.
// The grid size should be:
// grid size y should be the same as the channel number
// grid size x should be DIVUP(numLabels, B_X)
template <int B_X>
__global__ void MergeTemp(float * temp, int RowsPerChannel, int numLabels, float * target) {
int labelIdx = threadIdx.x + B_X * blockIdx.x;
if (labelIdx < numLabels) {
for (int i=0; i<RowsPerChannel; i++)
target[blockIdx.y*numLabels+labelIdx] += temp[(blockIdx.y*RowsPerChannel+i)*numLabels+labelIdx];
target[blockIdx.y*numLabels+labelIdx] = sqrt(target[blockIdx.y*numLabels+labelIdx]);
}
}
float CalculateSqrtSumSquareMatrix(NVMatrix& labels, NVMatrix& acts, thrust::device_vector<float>& values, thrust::device_vector<int>& counts, NVMatrix& target, int channels, int imagePixels)
{
assert(acts.getNumRows() == channels*imagePixels);
int B_X = 32;
int B_Y = 8;
int numCases = acts.getNumCols();
int PixelsPerThread = 32;
int gridydim = channels * DIVUP(imagePixels, B_Y*PixelsPerThread);
int gridxdim = DIVUP(numCases, B_X);
int numBlocksPerChannel = DIVUP(imagePixels, B_Y * PixelsPerThread);
int numBlocksX = DIVUP(numCases, B_X);
NVMatrix temp(numBlocksX*numBlocksPerChannel*channels, counts.size());
dim3 blocks(gridxdim, gridydim); // The dimension of the grid
dim3 threads(B_X, B_Y); // The dimension of the block, 32 x 16 = 512, which is the thread number available inside a block for compute compatibility<2.0.
float * values_ptr = thrust::raw_pointer_cast(values.data());
int * counts_ptr = thrust::raw_pointer_cast(counts.data());
int numLabels = counts.size();
cudaFuncSetCacheConfig(CalculateSqrtSumSquare<8, 32, 32>, cudaFuncCachePreferShared);
// Calculate temp
CalculateSqrtSumSquare<8, 32, 32><<<blocks, threads, numLabels*4>>>(labels.getDevData(), acts.getDevData(), values_ptr, counts_ptr, temp.getDevData(), channels, imagePixels, numCases, numLabels);
// Calculate target
dim3 blocksm(DIVUP(numLabels, B_X), channels);
dim3 threadsm(B_X);
MergeTemp<32><<<blocksm, threadsm>>>(temp.getDevData(), numBlocksX*numBlocksPerChannel, numLabels, target.getDevData());
return target.sum();
}
// Blocks Y are used for pixels and channels
// numBlocksY = channels * DIVUP(imgPixels, PixelsPerThread * B_Y)
// Blocks X are used for Samples
// numBlocksX = DIVUP(numCases, B_X)
// The shared memory size should be three times the size of numLabels.
template <int B_Y, int B_X, int PixelsPerThread>
__global__ void kCalculateGradient(float * acts, float * labels, float * sqrts, float * values, int * counts, int numLabels, int channels, int imagePixels, int numCases, float * target)
{
int numBlocksPerChannel = DIVUP(imagePixels, B_Y * PixelsPerThread);
int blkIdxInChannel = blockIdx.y % numBlocksPerChannel;
int channelIndex = blockIdx.y / numBlocksPerChannel;
// int numBlocksX = DIVUP(numCases, B_X);
extern __shared__ float shSqrt[];
int tidx = threadIdx.x + threadIdx.y * B_X;
for (int i=0; i<numLabels; i+=B_X*B_Y)
if (i + tidx < numLabels) {
shSqrt[i+tidx] = sqrts[numLabels*channelIndex + i + tidx];
shSqrt[i+tidx+numLabels] = values[i+tidx];
shSqrt[i+tidx+2*numLabels] = counts[i+tidx];
}
__syncthreads();
target += (channelIndex * imagePixels + blkIdxInChannel * B_Y * PixelsPerThread + threadIdx.y) * numCases + threadIdx.x + B_X * blockIdx.x;
acts += (channelIndex * imagePixels + blkIdxInChannel * B_Y * PixelsPerThread + threadIdx.y) * numCases + threadIdx.x + B_X * blockIdx.x;
int caseIndex = threadIdx.x + B_X * blockIdx.x;
if (caseIndex < numCases) {
for (int i=0; i<PixelsPerThread*B_Y; i+=B_Y)
if (blkIdxInChannel * B_Y * PixelsPerThread + threadIdx.y + i < imagePixels)
for (int j=0; j<numLabels; j++){
if (shSqrt[numLabels+j]==labels[caseIndex]) {
target[i*numCases] = ((float)shSqrt[2*numLabels+j])*acts[i*numCases] / (shSqrt[j] + 2.5e-7); // EPS
}
}
}
}
void CalculateGradient(NVMatrix& acts, NVMatrix& labels, NVMatrix& sqrts, thrust::device_vector<float>& values, thrust::device_vector<int>& counts, int channels, int imagePixels, int numCases, NVMatrix& target)
{
int B_X = 32;
int B_Y = 8;
int PixelsPerThread = 16;
int gridydim = channels * DIVUP(imagePixels, B_Y*PixelsPerThread);
int gridxdim = DIVUP(numCases, B_X);
dim3 blocks(gridxdim, gridydim); // The dimension of the grid
dim3 threads(B_X, B_Y); // The dimension of the block, 32 x 16 = 512, which is the thread number available inside a block for compute compatibility<2.0.
kCalculateGradient<8,32,16><<<blocks, threads, counts.size()*12>>>(acts.getDevData(), labels.getDevData(), sqrts.getDevData(), thrust::raw_pointer_cast(values.data()), thrust::raw_pointer_cast(counts.data()), counts.size(), channels, imagePixels, numCases, target.getDevData());
}
|
c05bdf219da29b657ea4f2ebb2328b27f6f9a727.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
GF100-optimized variant of the "Speculative while-while"
kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define STACK_SIZE 64 // Size of the traversal stack in local memory.
//------------------------------------------------------------------------
/*
extern "C" __global__ void queryConfig(void)
{
g_config.bvhLayout = BVHLayout_Compact;
g_config.blockWidth = 32; // One warp per row.
g_config.blockHeight = 4; // 4*32 = 128 threads, optimal for GTX480
}
*/
//------------------------------------------------------------------------
__device__ void IntersectFermi(const float4 o,
const float4 d,
const float4* nodesA,
bool anyHit,
int& retHitIndex,
float& retHitT,
FW::Vec2f& retBari)
{
// Traversal stack in CUDA thread-local memory.
int traversalStack[STACK_SIZE];
// Live state during traversal, stored in registers.
int rayidx; // Ray index.
float origx, origy, origz; // Ray origin.
float dirx, diry, dirz; // Ray direction.
float tmin; // t-value from which the ray starts. Usually 0.
float idirx, idiry, idirz; // 1 / dir
float oodx, oody, oodz; // orig / dir
char* stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int nodeAddr; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
// Fetch ray.
origx = o.x, origy = o.y, origz = o.z;
dirx = d.x, diry = d.y, dirz = d.z;
tmin = o.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
oodx = origx * idirx, oody = origy * idiry, oodz = origz * idirz;
// Setup traversal.
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
stackPtr = (char*)&traversalStack[0];
leafAddr = 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
hitT = d.w; // tmax
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
// Traverse internal nodes until all SIMD lanes have found a leaf.
bool searchingLeaf = true;
while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
{
// Fetch AABBs of the two child nodes.
float4* ptr = (float4*)((char*)nodesA + nodeAddr);
float4 n0xy = ptr[0]; // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = ptr[1]; // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = ptr[2]; // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
// Intersect the ray against the child nodes.
float c0lox = n0xy.x * idirx - oodx;
float c0hix = n0xy.y * idirx - oodx;
float c0loy = n0xy.z * idiry - oody;
float c0hiy = n0xy.w * idiry - oody;
float c0loz = nz.x * idirz - oodz;
float c0hiz = nz.y * idirz - oodz;
float c1loz = nz.z * idirz - oodz;
float c1hiz = nz.w * idirz - oodz;
float c0min = spanBeginFermi(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin);
float c0max = spanEndFermi (c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT);
float c1lox = n1xy.x * idirx - oodx;
float c1hix = n1xy.y * idirx - oodx;
float c1loy = n1xy.z * idiry - oody;
float c1hiy = n1xy.w * idiry - oody;
float c1min = spanBeginFermi(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin);
float c1max = spanEndFermi (c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT);
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
// Neither child was intersected => pop stack.
if (!traverseChild0 && !traverseChild1)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// Otherwise => fetch child pointers.
else
{
int2 cnodes = *(int2*)&ptr[3];
nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y;
// Both children were intersected => push the farther one.
if (traverseChild0 && traverseChild1)
{
if (c1min < c0min)
swap(nodeAddr, cnodes.y);
stackPtr += 4;
*(int*)stackPtr = cnodes.y;
}
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0)
{
searchingLeaf = false;
leafAddr = nodeAddr;
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// All SIMD lanes have found a leaf => process them.
if (!__any(searchingLeaf))
break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
// Intersect the ray against each triangle using Sven Woop's algorithm.
for (int triAddr = ~leafAddr;; triAddr += 3)
{
// Read first 16 bytes of the triangle.
// End marker (negative zero) => all triangles processed.
float4 v00 = tex1Dfetch(t_trisA, triAddr + 0);
if (__float_as_int(v00.x) == 0x80000000)
break;
// Compute and check intersection t-value.
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > tmin && t < hitT)
{
// Compute and check barycentric u.
float4 v11 = tex1Dfetch(t_trisA, triAddr + 1);
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
float u = Ox + t*Dx;
if (u >= 0.0f && u <= 1.0f)
{
// Compute and check barycentric v.
float4 v22 = tex1Dfetch(t_trisA, triAddr + 2);
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
retBari.x = u;
retBari.y = v;
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
leafAddr = nodeAddr;
if(nodeAddr<0)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
} // leaf
} // traversal
// Remap intersected triangle index, and store the result.
//if (hitIndex != -1)
// hitIndex = tex1Dfetch(t_triIndices, hitIndex);
//STORE_RESULT(rayidx, hitIndex, hitT);
if (hitIndex != -1) {
hitIndex = tex1Dfetch(t_triIndices, hitIndex);
}
retHitIndex = hitIndex;
retHitT = hitT;
}
//------------------------------------------------------------------------
| c05bdf219da29b657ea4f2ebb2328b27f6f9a727.cu | /*
* Copyright (c) 2009-2011, NVIDIA Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA Corporation nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
GF100-optimized variant of the "Speculative while-while"
kernel used in:
"Understanding the Efficiency of Ray Traversal on GPUs",
Timo Aila and Samuli Laine,
Proc. High-Performance Graphics 2009
*/
#include "CudaTracerKernels.hpp"
//------------------------------------------------------------------------
#define STACK_SIZE 64 // Size of the traversal stack in local memory.
//------------------------------------------------------------------------
/*
extern "C" __global__ void queryConfig(void)
{
g_config.bvhLayout = BVHLayout_Compact;
g_config.blockWidth = 32; // One warp per row.
g_config.blockHeight = 4; // 4*32 = 128 threads, optimal for GTX480
}
*/
//------------------------------------------------------------------------
__device__ void IntersectFermi(const float4 o,
const float4 d,
const float4* nodesA,
bool anyHit,
int& retHitIndex,
float& retHitT,
FW::Vec2f& retBari)
{
// Traversal stack in CUDA thread-local memory.
int traversalStack[STACK_SIZE];
// Live state during traversal, stored in registers.
int rayidx; // Ray index.
float origx, origy, origz; // Ray origin.
float dirx, diry, dirz; // Ray direction.
float tmin; // t-value from which the ray starts. Usually 0.
float idirx, idiry, idirz; // 1 / dir
float oodx, oody, oodz; // orig / dir
char* stackPtr; // Current position in traversal stack.
int leafAddr; // First postponed leaf, non-negative if none.
int nodeAddr; // Non-negative: current internal node, negative: second postponed leaf.
int hitIndex; // Triangle index of the closest intersection, -1 if none.
float hitT; // t-value of the closest intersection.
// Fetch ray.
origx = o.x, origy = o.y, origz = o.z;
dirx = d.x, diry = d.y, dirz = d.z;
tmin = o.w;
float ooeps = exp2f(-80.0f); // Avoid div by zero.
idirx = 1.0f / (fabsf(d.x) > ooeps ? d.x : copysignf(ooeps, d.x));
idiry = 1.0f / (fabsf(d.y) > ooeps ? d.y : copysignf(ooeps, d.y));
idirz = 1.0f / (fabsf(d.z) > ooeps ? d.z : copysignf(ooeps, d.z));
oodx = origx * idirx, oody = origy * idiry, oodz = origz * idirz;
// Setup traversal.
traversalStack[0] = EntrypointSentinel; // Bottom-most entry.
stackPtr = (char*)&traversalStack[0];
leafAddr = 0; // No postponed leaf.
nodeAddr = 0; // Start from the root.
hitIndex = -1; // No triangle intersected so far.
hitT = d.w; // tmax
// Traversal loop.
while (nodeAddr != EntrypointSentinel)
{
// Traverse internal nodes until all SIMD lanes have found a leaf.
bool searchingLeaf = true;
while (nodeAddr >= 0 && nodeAddr != EntrypointSentinel)
{
// Fetch AABBs of the two child nodes.
float4* ptr = (float4*)((char*)nodesA + nodeAddr);
float4 n0xy = ptr[0]; // (c0.lo.x, c0.hi.x, c0.lo.y, c0.hi.y)
float4 n1xy = ptr[1]; // (c1.lo.x, c1.hi.x, c1.lo.y, c1.hi.y)
float4 nz = ptr[2]; // (c0.lo.z, c0.hi.z, c1.lo.z, c1.hi.z)
// Intersect the ray against the child nodes.
float c0lox = n0xy.x * idirx - oodx;
float c0hix = n0xy.y * idirx - oodx;
float c0loy = n0xy.z * idiry - oody;
float c0hiy = n0xy.w * idiry - oody;
float c0loz = nz.x * idirz - oodz;
float c0hiz = nz.y * idirz - oodz;
float c1loz = nz.z * idirz - oodz;
float c1hiz = nz.w * idirz - oodz;
float c0min = spanBeginFermi(c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, tmin);
float c0max = spanEndFermi (c0lox, c0hix, c0loy, c0hiy, c0loz, c0hiz, hitT);
float c1lox = n1xy.x * idirx - oodx;
float c1hix = n1xy.y * idirx - oodx;
float c1loy = n1xy.z * idiry - oody;
float c1hiy = n1xy.w * idiry - oody;
float c1min = spanBeginFermi(c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, tmin);
float c1max = spanEndFermi (c1lox, c1hix, c1loy, c1hiy, c1loz, c1hiz, hitT);
bool traverseChild0 = (c0max >= c0min);
bool traverseChild1 = (c1max >= c1min);
// Neither child was intersected => pop stack.
if (!traverseChild0 && !traverseChild1)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// Otherwise => fetch child pointers.
else
{
int2 cnodes = *(int2*)&ptr[3];
nodeAddr = (traverseChild0) ? cnodes.x : cnodes.y;
// Both children were intersected => push the farther one.
if (traverseChild0 && traverseChild1)
{
if (c1min < c0min)
swap(nodeAddr, cnodes.y);
stackPtr += 4;
*(int*)stackPtr = cnodes.y;
}
}
// First leaf => postpone and continue traversal.
if (nodeAddr < 0 && leafAddr >= 0)
{
searchingLeaf = false;
leafAddr = nodeAddr;
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
// All SIMD lanes have found a leaf => process them.
if (!__any(searchingLeaf))
break;
}
// Process postponed leaf nodes.
while (leafAddr < 0)
{
// Intersect the ray against each triangle using Sven Woop's algorithm.
for (int triAddr = ~leafAddr;; triAddr += 3)
{
// Read first 16 bytes of the triangle.
// End marker (negative zero) => all triangles processed.
float4 v00 = tex1Dfetch(t_trisA, triAddr + 0);
if (__float_as_int(v00.x) == 0x80000000)
break;
// Compute and check intersection t-value.
float Oz = v00.w - origx*v00.x - origy*v00.y - origz*v00.z;
float invDz = 1.0f / (dirx*v00.x + diry*v00.y + dirz*v00.z);
float t = Oz * invDz;
if (t > tmin && t < hitT)
{
// Compute and check barycentric u.
float4 v11 = tex1Dfetch(t_trisA, triAddr + 1);
float Ox = v11.w + origx*v11.x + origy*v11.y + origz*v11.z;
float Dx = dirx*v11.x + diry*v11.y + dirz*v11.z;
float u = Ox + t*Dx;
if (u >= 0.0f && u <= 1.0f)
{
// Compute and check barycentric v.
float4 v22 = tex1Dfetch(t_trisA, triAddr + 2);
float Oy = v22.w + origx*v22.x + origy*v22.y + origz*v22.z;
float Dy = dirx*v22.x + diry*v22.y + dirz*v22.z;
float v = Oy + t*Dy;
if (v >= 0.0f && u + v <= 1.0f)
{
// Record intersection.
// Closest intersection not required => terminate.
hitT = t;
hitIndex = triAddr;
if (anyHit)
{
nodeAddr = EntrypointSentinel;
break;
}
retBari.x = u;
retBari.y = v;
}
}
}
} // triangle
// Another leaf was postponed => process it as well.
leafAddr = nodeAddr;
if(nodeAddr<0)
{
nodeAddr = *(int*)stackPtr;
stackPtr -= 4;
}
} // leaf
} // traversal
// Remap intersected triangle index, and store the result.
//if (hitIndex != -1)
// hitIndex = tex1Dfetch(t_triIndices, hitIndex);
//STORE_RESULT(rayidx, hitIndex, hitT);
if (hitIndex != -1) {
hitIndex = tex1Dfetch(t_triIndices, hitIndex);
}
retHitIndex = hitIndex;
retHitT = hitT;
}
//------------------------------------------------------------------------
|
72eea32a8ccacafba895452c6eadb57ec30a2382.hip | // !!! This is a file automatically generated by hipify!!!
/*
* FullyConnectedLayer.cpp
*
* Created on: 2016. 5. 10.
* Author: jhkim
*/
#include "hip/hip_runtime.h"
#include <algorithm>
#include "FullyConnectedLayer.h"
#include "MathFunctions.h"
#include "Util.h"
#include "Network.h"
#include "SysLog.h"
#include "StdOutLog.h"
#include "PropMgmt.h"
#include "Update.h"
#include "Updater.h"
#include "Donator.h"
#include "frcnn_common.h"
#include "MemoryMgmt.h"
#define FULLYCONNECTEDLAYER_LOG 0
using namespace std;
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
/**
* Fills a floating-point array with ones.
*
* @param vec The array to fill.
* @param size The number of elements in the array.
*/
template <typename Dtype>
__global__ void FillValues(Dtype *vec, int size, Dtype value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
vec[idx] = value;
}
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
/**
* Fills a floating-point array with ones.
*
* @param vec The array to fill.
* @param size The number of elements in the array.
*/
template <typename Dtype>
__global__ void Dropout(const int n, const Dtype* in, const Dtype* mask,
const unsigned int threashold, const float scale, Dtype *out)
{
CUDA_KERNEL_LOOP(index, n) {
//out[index] = in[index] * (mask[index] > threshold) * scale;
out[index] = in[index] * (mask[index]) * scale;
}
}
/**
* dst array src array .
*
* @param dst dst array, dst + src
* @param src src array
* @param N The number of elements in the array.
*/
template <typename Dtype>
__global__ void AddData(Dtype* dst, const Dtype* src, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
dst[idx] = dst[idx] + src[idx];
}
template <typename Dtype>
FullyConnectedLayer<Dtype>::~FullyConnectedLayer() {
if (SLPROP(FullyConnected, receive)) {
Donator<Dtype>::releaseReceiver(SLPROP(FullyConnected, donatorID));
} else {
Util::clearVector(this->_params);
Util::clearVector(this->_paramsHistory);
Util::clearVector(this->_paramsHistory2);
}
this->updateParams.clear();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::reshape() {
if (!Layer<Dtype>::_adjustInputShape()) {
const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1);
const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1);
SASSERT0(count == inputDataCount);
}
/*
// .
const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1);
const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1);
if (inputDataCount == count)
return;
*/
// XXX:
// batch .
// batch .
if (!Layer<Dtype>::_isInputShapeChanged(0))
return;
this->batches = this->_inputData[0]->getShape(0);
this->in_rows = this->_inputData[0]->getCountByAxis(SLPROP(FullyConnected, axis));
this->out_rows = SLPROP(FullyConnected, nOut);
const uint32_t channels = 1;
const uint32_t cols = 1;
//this->_inputShape[0] = {batches, channels, in_rows, cols};
this->_inputShape[0] = this->_inputData[0]->getShape();
this->_outputData[0]->reshape({this->batches, channels, this->out_rows, cols});
/*
checkCUDNN(cudnnSetTensor4dDescriptor(
this->inputTensorDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
this->batches, channels, this->in_rows, cols));
checkCUDNN(cudnnSetTensor4dDescriptor(
this->outputTensorDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
this->batches, channels, this->out_rows, cols));
*/
STDOUT_COND_LOG(FULLYCONNECTEDLAYER_LOG,
"<%s> layer' input-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), this->batches, channels, this->in_rows, cols);
STDOUT_COND_LOG(FULLYCONNECTEDLAYER_LOG,
"<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), this->batches, channels, this->out_rows, cols);
const uint32_t u_in = in_rows;
const uint32_t u_out = out_rows;
const uint32_t b_in = batches * in_rows;
const uint32_t b_out = batches * out_rows;
STDOUT_COND_LOG(FULLYCONNECTEDLAYER_LOG,
"<%s> layer reshape info (u_in, u_out, b_in, b_out) : %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), u_in, u_out, b_in, b_out);
this->_params[ParamType::Weight]->reshape({1, 1, u_out, u_in});
this->_params[ParamType::Bias]->reshape({1, u_out, 1, 1});
if (this->_paramsHistory[ParamType::Weight] != NULL)
this->_paramsHistory[ParamType::Weight]->reshape({1, 1, u_out, u_in});
if (this->_paramsHistory[ParamType::Bias] != NULL)
this->_paramsHistory[ParamType::Bias]->reshape({1, u_out, 1, 1});
if (this->_paramsHistory2[ParamType::Weight] != NULL)
this->_paramsHistory2[ParamType::Weight]->reshape({1, 1, u_out, u_in});
if (this->_paramsHistory2[ParamType::Bias] != NULL)
this->_paramsHistory2[ParamType::Bias]->reshape({1, u_out, 1, 1});
if (!this->_paramsInitialized[Weight]) {
SLPROP(FullyConnected, weightFiller).fill(this->_params[ParamType::Weight]);
this->_paramsInitialized[Weight] = true;
}
if (!this->_paramsInitialized[Bias]) {
SLPROP(FullyConnected, weightFiller).fill(this->_params[ParamType::Bias]);
this->_paramsInitialized[Bias] = true;
}
if (this->updateParams.size() == 0) {
UpdateParam upWeight;
upWeight.paramType = Weight;
upWeight.paramDataPtr = (void*)this->_params[Weight];
upWeight.paramHis1Ptr = (void*)this->_paramsHistory[Weight];
upWeight.paramHis2Ptr = (void*)this->_paramsHistory2[Weight];
this->updateParams.push_back(upWeight);
UpdateParam upBias;
upBias.paramType = Bias;
upBias.paramDataPtr = (void*)this->_params[Bias];
upBias.paramHis1Ptr = (void*)this->_paramsHistory[Bias];
upBias.paramHis2Ptr = (void*)this->_paramsHistory2[Bias];
this->updateParams.push_back(upBias);
}
this->_onevec.reshape(this->batches);
this->_onevec.reset_host_mem(false, 1.0);
//checkCudaErrors(Util::ucudaMalloc(&this->d_onevec, sizeof(Dtype)*batches));
//FillValues<<<SOOOA_GET_BLOCKS(batches), SOOOA_CUDA_NUM_THREADS>>>(
// this->d_onevec, batches, 1.0f);
this->_mask.reshape(b_out);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::update() {
const uint32_t weightSize = this->in_rows * this->out_rows;
const Dtype regScale =
SNPROP(weightDecay) * SLPROP(FullyConnected, weightUpdateParam).decay_mult;
const Dtype learnScale = Update<Dtype>::calcLearningRate() *
SLPROP(FullyConnected, weightUpdateParam).lr_mult;
const Dtype beta1 = SNPROP(beta1);
const Dtype beta2 = SNPROP(beta2);
SLPROP(FullyConnected, decayedBeta1) *= beta1;
SLPROP(FullyConnected, decayedBeta2) *= beta2;
UpdateContext contextWeight =
Update<Dtype>::makeContext(weightSize, regScale, learnScale);
const uint32_t biasSize = out_rows;
const Dtype regScale_b =
SNPROP(weightDecay) * SLPROP(FullyConnected, biasUpdateParam).decay_mult;
const Dtype learnScale_b = Update<Dtype>::calcLearningRate() *
SLPROP(FullyConnected, biasUpdateParam).lr_mult;
UpdateContext contextBias =
Update<Dtype>::makeContext(biasSize, regScale_b, learnScale_b);
SASSUME0(this->updateParams.size() == 2);
this->updateParams[Weight].context = contextWeight;
this->updateParams[Bias].context = contextBias;
Updater::updateParams(this->updateParams);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::applyChanges(LearnableLayer<Dtype> *targetLayer) {
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
const uint32_t weightSize = this->in_rows * this->out_rows;
const uint32_t biasSize = this->out_rows;
FullyConnectedLayer<Dtype>* _targetLayer = (FullyConnectedLayer<Dtype>*)targetLayer;
//int blockSize = BW;
int blockSize = SOOOA_CUDA_NUM_THREADS;
int gridSize;
gridSize = (weightSize + blockSize -1) / blockSize;
hipLaunchKernelGGL(( AddData), dim3(gridSize), dim3(blockSize), 0, 0,
_targetLayer->_params[Weight]->mutable_device_grad(),
this->_params[Weight]->device_grad(), weightSize);
gridSize = (biasSize + blockSize -1) / blockSize;
hipLaunchKernelGGL(( AddData), dim3(gridSize), dim3(blockSize), 0, 0,
_targetLayer->_params[Bias]->mutable_device_grad(),
this->_params[Bias]->device_grad(), biasSize);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::syncParams(LearnableLayer<Dtype> *targetLayer) {
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
const uint32_t weightSize = this->in_rows * this->out_rows;
const uint32_t biasSize = this->out_rows;
FullyConnectedLayer<Dtype>* _targetLayer = (FullyConnectedLayer<Dtype>*)targetLayer;
memcpy(this->_params[Weight]->mutable_host_grad(), _targetLayer->_params[Weight]->host_grad(),
weightSize);
memcpy(this->_params[Bias]->mutable_host_grad(), _targetLayer->_params[Bias]->host_grad(),
biasSize);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::saveParams(ofstream& ofs) {
LearnableLayer<Dtype>::saveParams(ofs);
/*
if (this->_inputData.size() == 1) {
cout << SLPROP_BASE(name) << " saves as usual ... " << endl;
LearnableLayer<Dtype>::saveParams(ofs);
} else {
cout << SLPROP_BASE(name) << " saves as special ... " << endl;
uint32_t numParams = this->_params.size();
vector<vector<float>> bboxMeans;
vector<vector<float>> bboxStds;
fill2dVecWithData(this->_inputData[1], bboxMeans);
fill2dVecWithData(this->_inputData[2], bboxStds);
#if 0
this->_inputData[1]->print_shape();
this->_inputData[2]->print_shape();
this->_params[0]->print_shape();
this->_params[1]->print_shape();
exit(1);
#endif
Data<Dtype>* param0 = this->_params[0];
Data<Dtype> orig0(param0->_name, true);
orig0.reshapeLike(param0);
const Dtype* srcPtr0 = param0->host_data();
Dtype* dstPtr0 = orig0.mutable_host_data();
const int numRows0 = param0->getShape(2);
const int numCols0 = param0->getShape(3);
int index;
int id1, id2;
for (int row = 0; row < numRows0; row++) {
id2 = row / 4;
id1 = row % 4;
for (int col = 0; col < numCols0; col++) {
index = row * numCols0 + col;
dstPtr0[index] = srcPtr0[index] * bboxStds[id2][id1];
}
}
Data<Dtype>* param1 = this->_params[1];
Data<Dtype> orig1(param1->_name, true);
orig1.reshapeLike(param1);
const Dtype* srcPtr1 = param1->host_data();
Dtype* dstPtr1 = orig1.mutable_host_data();
const int numRows1 = param1->getShape(1);
for (int row = 0; row < numRows1; row++) {
id2 = row / 4;
id1 = row % 4;
dstPtr1[row] = srcPtr1[row] * bboxStds[id2][id1] + bboxMeans[id2][id1];
}
orig0.save(ofs);
orig1.save(ofs);
}
*/
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::feedforward() {
reshape();
_computeWeightedData();
_computeWeightBiasedData();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeWeightedData() {
// Apply weight to input data
const Dtype* d_weightData = this->_params[Weight]->device_data();
const Dtype* d_inputData = this->_inputData[0]->device_data();
//Dtype* d_preActivationData = _preActivation->mutable_device_data();
Dtype* d_outputData = this->_outputData[0]->mutable_device_data();
/**
* [hipblasSgemm() (from cuBlas User Documentation)]
*
* hipblasStatus_t hipblasSgemm(hipblasHandle_t handle, hipblasOperation_t transa,
* hipblasOperation_t transb, int m, int n, int k,
* const float *alpha, const float *A, int * lda,
* const float *B, int ldb, const float *beta, float *C,
* int ldc)
*
* C = op ( A ) op ( B ) + C
*
* where and are scalars, and A , B and C are matrices stored in column-major format
* with dimensions op ( A ) m k , op ( B ) k n and C m n , respectively. Also, for
* matrix A
*
* op ( A ) = A if transa == HIPBLAS_OP_N A T if transa == HIPBLAS_OP_T A H if transa ==
* HIPBLAS_OP_C
*
* and op ( B ) is defined similarly for matrix B .
*
* hipblasOperation_t option
* (1) HIPBLAS_OP_N => the non-transpose operation is selected.
* (2) HIPBLAS_OP_T => the transpose operation is selected.
* (3) HIPBLAS_OP_C => the conjugate transpose operation is selected.
*
* lda,ldb,ldc => leading dimension of two-dimensional array used to store the matrix A,
* B, C
*/
if (this->batches == 1) {
soooa_gpu_gemv(CblasNoTrans,
this->out_rows, this->in_rows,
Cuda::alpha, d_weightData, d_inputData,
Cuda::beta, d_outputData);
} else {
soooa_gpu_gemm(CblasNoTrans, CblasTrans,
this->batches, this->out_rows, this->in_rows,
Cuda::alpha, d_inputData, d_weightData,
Cuda::beta, d_outputData);
}
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeWeightBiasedData() {
// Add bias to weighted input data
const Dtype* d_biasData = this->_params[Bias]->device_data();
Dtype* d_outputData = this->_outputData[0]->mutable_device_data();
if (this->batches == 1) {
soooa_gpu_axpy(this->out_rows, 1.0f, d_biasData, d_outputData);
} else {
soooa_gpu_gemm(CblasNoTrans, CblasNoTrans,
this->batches, this->out_rows, 1,
Cuda::alpha, this->_onevec.device_mem(), d_biasData,
Cuda::alpha, d_outputData);
}
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::backpropagation() {
/*
* simple network layer .
*
* <<<< ith layer >>>> <<<< i+1th layer >>>>
* ..... Xi Wi Ai Fi Yi (=Xi+1) ........
* Bi
* ..... O --------- O ------------ O ........
* dL/dYi is already computed
*
* ( Xi = i layer input , Wi = i layer weight,
* Bi = i layer bias , Ai = i layer
* Fi = i layer activation function
* Yi = i layer ouput , i+1 layer input
* L = loss, dL/dYi = i+1 layer gradient )
*
* gradient descent dL/dWi & dL/dBi .
* :
* () dYi/dWi = dL/dYi * dYi/dAi * dAi/dWi
* () dYi/dBi = dL/dYi * dYi/dAi * dAi/dBi
*
* (),() 4 .
*
* (A) dL/dYi : i+1 layer backward _outputData[0] grad
* .
*
* (B) dYi/dAi : _computePreActivationGrad() dL/dYi * dYi/dAi .
* dL/dYi Yi, Ai . forward
* _outputData[0] data _preActivation data .
* activation function Yi, Ai, dL/dYi dL/dYi *
* dYi/dAi , this->_preActivation grad .
*
* (C) dAi/dWi : _computeWeightGrad() (A), (B) weight Grad
* . dAi/dWi transpose Xi GEMM
* . _params[Weight] grad .
*
* (D) dAi/dBi : (C) . _computeBiasGrad() bias ,
* _params[Bias] grad .
*
* i-1 layer dL/dYi-1 . _computeInputGrad()
* . _inputData grad . dL/dYi-1 = dL/dXi =
* dL/dAi * dAi/dXi . dL/dAi _preAcitvation grad , dAi/dXi
* Wi transpose .
*/
_computeWeightGrad();
_computeBiasGrad();
_computeInputGrad();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeWeightGrad() {
// d(Cost)/d(Weight)
const Dtype* d_outputGrad = this->_outputData[0]->device_grad();
const Dtype* d_inputData = this->_inputData[0]->device_data();
Dtype* d_weightGrad = this->_params[Weight]->mutable_device_grad();
// d_weightGrad Cuda::alpha snapshot diff .
// SoooA reset Cuda::alpha
// reset Cuda::beta ...
// _computeBiasGrad .
soooa_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
this->out_rows, this->in_rows, this->batches,
Cuda::alpha, d_outputGrad, d_inputData,
Cuda::alpha, d_weightGrad);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeBiasGrad() {
// d(Cost)/d(Bias) (same as d_preActivationGrad)
const Dtype* d_outputGrad = this->_outputData[0]->device_grad();
Dtype* d_biasGrad = this->_params[Bias]->mutable_device_grad();
soooa_gpu_gemv<Dtype>(CblasTrans,
this->batches, this->out_rows,
Cuda::alpha, d_outputGrad, this->_onevec.device_mem(),
Cuda::alpha, d_biasGrad);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeInputGrad() {
//const uint32_t batches = this->_inputShape[0][0];
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
// d(Cost)/d(Input)
const Dtype* d_weightData = this->_params[Weight]->device_data();
//const Dtype* d_preActivationGrad = this->_preActivation->device_grad();
const Dtype* d_outputGrad = this->_outputData[0]->device_grad();
Dtype* d_inputGrad = this->_inputData[0]->mutable_device_grad();
soooa_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
this->batches, this->in_rows, this->out_rows,
Cuda::alpha, d_outputGrad, d_weightData,
Cuda::beta, d_inputGrad);
this->_inputData[0]->print_grad("inputGrad:");
}
template FullyConnectedLayer<float>::~FullyConnectedLayer();
template void FullyConnectedLayer<float>::reshape();
template void FullyConnectedLayer<float>::update();
template void FullyConnectedLayer<float>::feedforward();
template void FullyConnectedLayer<float>::backpropagation();
/*
template void* FullyConnectedLayer<float>::initLayer();
template void FullyConnectedLayer<float>::destroyLayer(void* instancePtr);
template void FullyConnectedLayer<float>::setInOutTensor(void* instancePtr, void* tensorPtr,
bool isInput, int index);
template bool FullyConnectedLayer<float>::allocLayerTensors(void* instancePtr);
template void FullyConnectedLayer<float>::forwardTensor(void* instancePtr, int miniBatchIdx);
template void FullyConnectedLayer<float>::backwardTensor(void* instancePtr);
template void FullyConnectedLayer<float>::learnTensor(void* instancePtr);
*/
| 72eea32a8ccacafba895452c6eadb57ec30a2382.cu | /*
* FullyConnectedLayer.cpp
*
* Created on: 2016. 5. 10.
* Author: jhkim
*/
#include "cuda_runtime.h"
#include <algorithm>
#include "FullyConnectedLayer.h"
#include "MathFunctions.h"
#include "Util.h"
#include "Network.h"
#include "SysLog.h"
#include "StdOutLog.h"
#include "PropMgmt.h"
#include "Update.h"
#include "Updater.h"
#include "Donator.h"
#include "frcnn_common.h"
#include "MemoryMgmt.h"
#define FULLYCONNECTEDLAYER_LOG 0
using namespace std;
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
/**
* Fills a floating-point array with ones.
*
* @param vec The array to fill.
* @param size The number of elements in the array.
*/
template <typename Dtype>
__global__ void FillValues(Dtype *vec, int size, Dtype value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= size)
return;
vec[idx] = value;
}
///////////////////////////////////////////////////////////////////////////////////////////
// GPU Kernels
/**
* Fills a floating-point array with ones.
*
* @param vec The array to fill.
* @param size The number of elements in the array.
*/
template <typename Dtype>
__global__ void Dropout(const int n, const Dtype* in, const Dtype* mask,
const unsigned int threashold, const float scale, Dtype *out)
{
CUDA_KERNEL_LOOP(index, n) {
//out[index] = in[index] * (mask[index] > threshold) * scale;
out[index] = in[index] * (mask[index]) * scale;
}
}
/**
* dst array에 src array를 더한다.
*
* @param dst dst array, dst + src가 저장이 될 장소
* @param src src array
* @param N The number of elements in the array.
*/
template <typename Dtype>
__global__ void AddData(Dtype* dst, const Dtype* src, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N)
return;
dst[idx] = dst[idx] + src[idx];
}
template <typename Dtype>
FullyConnectedLayer<Dtype>::~FullyConnectedLayer() {
if (SLPROP(FullyConnected, receive)) {
Donator<Dtype>::releaseReceiver(SLPROP(FullyConnected, donatorID));
} else {
Util::clearVector(this->_params);
Util::clearVector(this->_paramsHistory);
Util::clearVector(this->_paramsHistory2);
}
this->updateParams.clear();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::reshape() {
if (!Layer<Dtype>::_adjustInputShape()) {
const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1);
const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1);
SASSERT0(count == inputDataCount);
}
/*
// 배치수가 변경되는 경우는 허용하도록 하자.
const uint32_t count = Util::vecCountByAxis(this->_inputShape[0], 1);
const uint32_t inputDataCount = this->_inputData[0]->getCountByAxis(1);
if (inputDataCount == count)
return;
*/
// XXX: 주의
// 여기에서는 batch 개수만 변경이 될 수 있다고 가정하였다.
// 따라서 batch 개수에 대한 변경만 체크한다.
if (!Layer<Dtype>::_isInputShapeChanged(0))
return;
this->batches = this->_inputData[0]->getShape(0);
this->in_rows = this->_inputData[0]->getCountByAxis(SLPROP(FullyConnected, axis));
this->out_rows = SLPROP(FullyConnected, nOut);
const uint32_t channels = 1;
const uint32_t cols = 1;
//this->_inputShape[0] = {batches, channels, in_rows, cols};
this->_inputShape[0] = this->_inputData[0]->getShape();
this->_outputData[0]->reshape({this->batches, channels, this->out_rows, cols});
/*
checkCUDNN(cudnnSetTensor4dDescriptor(
this->inputTensorDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
this->batches, channels, this->in_rows, cols));
checkCUDNN(cudnnSetTensor4dDescriptor(
this->outputTensorDesc,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
this->batches, channels, this->out_rows, cols));
*/
STDOUT_COND_LOG(FULLYCONNECTEDLAYER_LOG,
"<%s> layer' input-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), this->batches, channels, this->in_rows, cols);
STDOUT_COND_LOG(FULLYCONNECTEDLAYER_LOG,
"<%s> layer' output-0 has reshaped as: %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), this->batches, channels, this->out_rows, cols);
const uint32_t u_in = in_rows;
const uint32_t u_out = out_rows;
const uint32_t b_in = batches * in_rows;
const uint32_t b_out = batches * out_rows;
STDOUT_COND_LOG(FULLYCONNECTEDLAYER_LOG,
"<%s> layer reshape info (u_in, u_out, b_in, b_out) : %dx%dx%dx%d\n",
SLPROP_BASE(name).c_str(), u_in, u_out, b_in, b_out);
this->_params[ParamType::Weight]->reshape({1, 1, u_out, u_in});
this->_params[ParamType::Bias]->reshape({1, u_out, 1, 1});
if (this->_paramsHistory[ParamType::Weight] != NULL)
this->_paramsHistory[ParamType::Weight]->reshape({1, 1, u_out, u_in});
if (this->_paramsHistory[ParamType::Bias] != NULL)
this->_paramsHistory[ParamType::Bias]->reshape({1, u_out, 1, 1});
if (this->_paramsHistory2[ParamType::Weight] != NULL)
this->_paramsHistory2[ParamType::Weight]->reshape({1, 1, u_out, u_in});
if (this->_paramsHistory2[ParamType::Bias] != NULL)
this->_paramsHistory2[ParamType::Bias]->reshape({1, u_out, 1, 1});
if (!this->_paramsInitialized[Weight]) {
SLPROP(FullyConnected, weightFiller).fill(this->_params[ParamType::Weight]);
this->_paramsInitialized[Weight] = true;
}
if (!this->_paramsInitialized[Bias]) {
SLPROP(FullyConnected, weightFiller).fill(this->_params[ParamType::Bias]);
this->_paramsInitialized[Bias] = true;
}
if (this->updateParams.size() == 0) {
UpdateParam upWeight;
upWeight.paramType = Weight;
upWeight.paramDataPtr = (void*)this->_params[Weight];
upWeight.paramHis1Ptr = (void*)this->_paramsHistory[Weight];
upWeight.paramHis2Ptr = (void*)this->_paramsHistory2[Weight];
this->updateParams.push_back(upWeight);
UpdateParam upBias;
upBias.paramType = Bias;
upBias.paramDataPtr = (void*)this->_params[Bias];
upBias.paramHis1Ptr = (void*)this->_paramsHistory[Bias];
upBias.paramHis2Ptr = (void*)this->_paramsHistory2[Bias];
this->updateParams.push_back(upBias);
}
this->_onevec.reshape(this->batches);
this->_onevec.reset_host_mem(false, 1.0);
//checkCudaErrors(Util::ucudaMalloc(&this->d_onevec, sizeof(Dtype)*batches));
//FillValues<<<SOOOA_GET_BLOCKS(batches), SOOOA_CUDA_NUM_THREADS>>>(
// this->d_onevec, batches, 1.0f);
this->_mask.reshape(b_out);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::update() {
const uint32_t weightSize = this->in_rows * this->out_rows;
const Dtype regScale =
SNPROP(weightDecay) * SLPROP(FullyConnected, weightUpdateParam).decay_mult;
const Dtype learnScale = Update<Dtype>::calcLearningRate() *
SLPROP(FullyConnected, weightUpdateParam).lr_mult;
const Dtype beta1 = SNPROP(beta1);
const Dtype beta2 = SNPROP(beta2);
SLPROP(FullyConnected, decayedBeta1) *= beta1;
SLPROP(FullyConnected, decayedBeta2) *= beta2;
UpdateContext contextWeight =
Update<Dtype>::makeContext(weightSize, regScale, learnScale);
const uint32_t biasSize = out_rows;
const Dtype regScale_b =
SNPROP(weightDecay) * SLPROP(FullyConnected, biasUpdateParam).decay_mult;
const Dtype learnScale_b = Update<Dtype>::calcLearningRate() *
SLPROP(FullyConnected, biasUpdateParam).lr_mult;
UpdateContext contextBias =
Update<Dtype>::makeContext(biasSize, regScale_b, learnScale_b);
SASSUME0(this->updateParams.size() == 2);
this->updateParams[Weight].context = contextWeight;
this->updateParams[Bias].context = contextBias;
Updater::updateParams(this->updateParams);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::applyChanges(LearnableLayer<Dtype> *targetLayer) {
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
const uint32_t weightSize = this->in_rows * this->out_rows;
const uint32_t biasSize = this->out_rows;
FullyConnectedLayer<Dtype>* _targetLayer = (FullyConnectedLayer<Dtype>*)targetLayer;
//int blockSize = BW;
int blockSize = SOOOA_CUDA_NUM_THREADS;
int gridSize;
gridSize = (weightSize + blockSize -1) / blockSize;
AddData<<<gridSize, blockSize>>>(
_targetLayer->_params[Weight]->mutable_device_grad(),
this->_params[Weight]->device_grad(), weightSize);
gridSize = (biasSize + blockSize -1) / blockSize;
AddData<<<gridSize, blockSize>>>(
_targetLayer->_params[Bias]->mutable_device_grad(),
this->_params[Bias]->device_grad(), biasSize);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::syncParams(LearnableLayer<Dtype> *targetLayer) {
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
const uint32_t weightSize = this->in_rows * this->out_rows;
const uint32_t biasSize = this->out_rows;
FullyConnectedLayer<Dtype>* _targetLayer = (FullyConnectedLayer<Dtype>*)targetLayer;
memcpy(this->_params[Weight]->mutable_host_grad(), _targetLayer->_params[Weight]->host_grad(),
weightSize);
memcpy(this->_params[Bias]->mutable_host_grad(), _targetLayer->_params[Bias]->host_grad(),
biasSize);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::saveParams(ofstream& ofs) {
LearnableLayer<Dtype>::saveParams(ofs);
/*
if (this->_inputData.size() == 1) {
cout << SLPROP_BASE(name) << " saves as usual ... " << endl;
LearnableLayer<Dtype>::saveParams(ofs);
} else {
cout << SLPROP_BASE(name) << " saves as special ... " << endl;
uint32_t numParams = this->_params.size();
vector<vector<float>> bboxMeans;
vector<vector<float>> bboxStds;
fill2dVecWithData(this->_inputData[1], bboxMeans);
fill2dVecWithData(this->_inputData[2], bboxStds);
#if 0
this->_inputData[1]->print_shape();
this->_inputData[2]->print_shape();
this->_params[0]->print_shape();
this->_params[1]->print_shape();
exit(1);
#endif
Data<Dtype>* param0 = this->_params[0];
Data<Dtype> orig0(param0->_name, true);
orig0.reshapeLike(param0);
const Dtype* srcPtr0 = param0->host_data();
Dtype* dstPtr0 = orig0.mutable_host_data();
const int numRows0 = param0->getShape(2);
const int numCols0 = param0->getShape(3);
int index;
int id1, id2;
for (int row = 0; row < numRows0; row++) {
id2 = row / 4;
id1 = row % 4;
for (int col = 0; col < numCols0; col++) {
index = row * numCols0 + col;
dstPtr0[index] = srcPtr0[index] * bboxStds[id2][id1];
}
}
Data<Dtype>* param1 = this->_params[1];
Data<Dtype> orig1(param1->_name, true);
orig1.reshapeLike(param1);
const Dtype* srcPtr1 = param1->host_data();
Dtype* dstPtr1 = orig1.mutable_host_data();
const int numRows1 = param1->getShape(1);
for (int row = 0; row < numRows1; row++) {
id2 = row / 4;
id1 = row % 4;
dstPtr1[row] = srcPtr1[row] * bboxStds[id2][id1] + bboxMeans[id2][id1];
}
orig0.save(ofs);
orig1.save(ofs);
}
*/
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::feedforward() {
reshape();
_computeWeightedData();
_computeWeightBiasedData();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeWeightedData() {
// Apply weight to input data
const Dtype* d_weightData = this->_params[Weight]->device_data();
const Dtype* d_inputData = this->_inputData[0]->device_data();
//Dtype* d_preActivationData = _preActivation->mutable_device_data();
Dtype* d_outputData = this->_outputData[0]->mutable_device_data();
/**
* [cublasSgemm() 함수 설명 (from cuBlas User Documentation)]
*
* cublasStatus_t cublasSgemm(cublasHandle_t handle, cublasOperation_t transa,
* cublasOperation_t transb, int m, int n, int k,
* const float *alpha, const float *A, int * lda,
* const float *B, int ldb, const float *beta, float *C,
* int ldc)
*
* C = α op ( A ) op ( B ) + β C
*
* where α and β are scalars, and A , B and C are matrices stored in column-major format
* with dimensions op ( A ) m × k , op ( B ) k × n and C m × n , respectively. Also, for
* matrix A
*
* op ( A ) = A if transa == CUBLAS_OP_N A T if transa == CUBLAS_OP_T A H if transa ==
* CUBLAS_OP_C
*
* and op ( B ) is defined similarly for matrix B .
*
* cublasOperation_t option
* (1) CUBLAS_OP_N => the non-transpose operation is selected.
* (2) CUBLAS_OP_T => the transpose operation is selected.
* (3) CUBLAS_OP_C => the conjugate transpose operation is selected.
*
* lda,ldb,ldc => leading dimension of two-dimensional array used to store the matrix A,
* B, C
*/
if (this->batches == 1) {
soooa_gpu_gemv(CblasNoTrans,
this->out_rows, this->in_rows,
Cuda::alpha, d_weightData, d_inputData,
Cuda::beta, d_outputData);
} else {
soooa_gpu_gemm(CblasNoTrans, CblasTrans,
this->batches, this->out_rows, this->in_rows,
Cuda::alpha, d_inputData, d_weightData,
Cuda::beta, d_outputData);
}
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeWeightBiasedData() {
// Add bias to weighted input data
const Dtype* d_biasData = this->_params[Bias]->device_data();
Dtype* d_outputData = this->_outputData[0]->mutable_device_data();
if (this->batches == 1) {
soooa_gpu_axpy(this->out_rows, 1.0f, d_biasData, d_outputData);
} else {
soooa_gpu_gemm(CblasNoTrans, CblasNoTrans,
this->batches, this->out_rows, 1,
Cuda::alpha, this->_onevec.device_mem(), d_biasData,
Cuda::alpha, d_outputData);
}
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::backpropagation() {
/*
* 아래와 같은 simple한 network layer가 있다고 가정하자.
*
* <<<< ith layer >>>> <<<< i+1th layer >>>>
* ..... Xi Wi Ai Fi Yi (=Xi+1) ........
* Bi
* ..... O --------- O ------------ O ........
* dL/dYi is already computed
*
* (※ Xi = i번째 layer의 input 값, Wi = i번째 layer의 weight,
* Bi = i번째 layer의 bias 값, Ai = i번째 layer의 중간 값
* Fi = i번째 layer의 activation function
* Yi = i번째 layer의 ouput 값, i+1 번째 layer의 input 값이기도 함
* L = loss, dL/dYi = i+1번째 layer에서 계산되었던 gradient 값)
*
* gradient descent 방식으로 학습을 하기 위해서는 dL/dWi & dL/dBi가 필요하다.
* 체인 룰에 의하여 아래와 같은 식으로 표현이 된다:
* (가) dYi/dWi = dL/dYi * dYi/dAi * dAi/dWi
* (나) dYi/dBi = dL/dYi * dYi/dAi * dAi/dBi
*
* (가),(나)를 계산하기 위해서는 아래와 같이 4가지 계산이 필요하다.
*
* (A) dL/dYi : i+1번째 layer의 backward 과정에서 _outputData[0]의 grad에 값을 저장해
* 두었다.
*
* (B) dYi/dAi : _computePreActivationGrad() 에서 dL/dYi * dYi/dAi의 계산을 수행 한다.
* dL/dYi는 구해져 있기 때문에 Yi, Ai 값이 필요하다. 이 값들은 forward시에
* 각각 _outputData[0]의 data와 _preActivation의 data에 저장이 되어 있다.
* activation function에 맞게 Yi, Ai, dL/dYi를 입력값으로 하여 dL/dYi *
* dYi/dAi 값이 계산이 되고, 결과값은 this->_preActivation의 grad에 담는다.
*
* (C) dAi/dWi : _computeWeightGrad()에서 (A), (B)의 결과를 조합하여 weight Grad를
* 계산한다. dAi/dWi는 실제로 transpose Xi이기 때문에 GEMM 연산만 진행
* 한다. 결과값은 _params[Weight]의 grad에 저장된다.
*
* (D) dAi/dBi : (C)과정과 동일하다. _computeBiasGrad()에서 bias를 계산하고, 그 결과 값을
* _params[Bias]의 grad에 저장을 하는 것만 다르다.
*
* 마지막으로 i-1 layer에게 dL/dYi-1값을 전달해야 한다. 이 과정은 _computeInputGrad()
* 에서 수행이 된다. 결과값을 _inputData의 grad에 저장한다. dL/dYi-1 = dL/dXi =
* dL/dAi * dAi/dXi가 된다. dL/dAi는 _preAcitvation의 grad에 저장이 되어 있고, dAi/dXi는
* Wi의 transpose 이기 때문에 계산가능하다.
*/
_computeWeightGrad();
_computeBiasGrad();
_computeInputGrad();
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeWeightGrad() {
// d(Cost)/d(Weight)
const Dtype* d_outputGrad = this->_outputData[0]->device_grad();
const Dtype* d_inputData = this->_inputData[0]->device_data();
Dtype* d_weightGrad = this->_params[Weight]->mutable_device_grad();
// d_weightGrad에 Cuda::alpha를 적용하는 것은 아마도 snapshot diff 기능 때문인듯 보임.
// SoooA에서는 일부러 reset하고 아래 Cuda::alpha를 적용하는 것보다는
// reset없이 Cuda::beta를 적용하는 것이 나아 보임 ...
// _computeBiasGrad에도 동일.
soooa_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
this->out_rows, this->in_rows, this->batches,
Cuda::alpha, d_outputGrad, d_inputData,
Cuda::alpha, d_weightGrad);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeBiasGrad() {
// d(Cost)/d(Bias) (same as d_preActivationGrad)
const Dtype* d_outputGrad = this->_outputData[0]->device_grad();
Dtype* d_biasGrad = this->_params[Bias]->mutable_device_grad();
soooa_gpu_gemv<Dtype>(CblasTrans,
this->batches, this->out_rows,
Cuda::alpha, d_outputGrad, this->_onevec.device_mem(),
Cuda::alpha, d_biasGrad);
}
template <typename Dtype>
void FullyConnectedLayer<Dtype>::_computeInputGrad() {
//const uint32_t batches = this->_inputShape[0][0];
//const uint32_t in_rows = this->_inputShape[0][2];
//const uint32_t out_rows = this->_outputData[0]->getShape(2);
// d(Cost)/d(Input)
const Dtype* d_weightData = this->_params[Weight]->device_data();
//const Dtype* d_preActivationGrad = this->_preActivation->device_grad();
const Dtype* d_outputGrad = this->_outputData[0]->device_grad();
Dtype* d_inputGrad = this->_inputData[0]->mutable_device_grad();
soooa_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
this->batches, this->in_rows, this->out_rows,
Cuda::alpha, d_outputGrad, d_weightData,
Cuda::beta, d_inputGrad);
this->_inputData[0]->print_grad("inputGrad:");
}
template FullyConnectedLayer<float>::~FullyConnectedLayer();
template void FullyConnectedLayer<float>::reshape();
template void FullyConnectedLayer<float>::update();
template void FullyConnectedLayer<float>::feedforward();
template void FullyConnectedLayer<float>::backpropagation();
/*
template void* FullyConnectedLayer<float>::initLayer();
template void FullyConnectedLayer<float>::destroyLayer(void* instancePtr);
template void FullyConnectedLayer<float>::setInOutTensor(void* instancePtr, void* tensorPtr,
bool isInput, int index);
template bool FullyConnectedLayer<float>::allocLayerTensors(void* instancePtr);
template void FullyConnectedLayer<float>::forwardTensor(void* instancePtr, int miniBatchIdx);
template void FullyConnectedLayer<float>::backwardTensor(void* instancePtr);
template void FullyConnectedLayer<float>::learnTensor(void* instancePtr);
*/
|
f3f6d4feee3c5b7f5311109bd7c4a3bc1cb36d3a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/* Matrix transpose with Cuda
* Device code.
*/
#ifndef _TRANSPOSE_KERNEL_H_
#define _TRANSPOSE_KERNEL_H_
#define BLOCK_DIM 16
// This kernel is optimized to ensure all global reads and writes are coalesced,
// and to avoid bank conflicts in shared memory. This kernel is up to 11x faster
// than the naive kernel below. Note that the shared memory array is sized to
// (BLOCK_DIM+1)*BLOCK_DIM. This pads each row of the 2D block in shared memory
// so that bank conflicts do not occur when threads address the array column-wise.
__global__ void transpose(float *odata, float *idata, int width, int height)
{
__shared__ float block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
// This naive transpose kernel suffers from completely non-coalesced writes.
// It can be up to 10x slower than the kernel above for large matrices.
__global__ void transpose_naive(float *odata, float* idata, int width, int height)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if (xIndex < width && yIndex < height)
{
unsigned int index_in = xIndex + width * yIndex;
unsigned int index_out = yIndex + height * xIndex;
odata[index_out] = idata[index_in];
}
}
#endif // _TRANSPOSE_KERNEL_H_
| f3f6d4feee3c5b7f5311109bd7c4a3bc1cb36d3a.cu | /*
* Copyright 1993-2009 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
/* Matrix transpose with Cuda
* Device code.
*/
#ifndef _TRANSPOSE_KERNEL_H_
#define _TRANSPOSE_KERNEL_H_
#define BLOCK_DIM 16
// This kernel is optimized to ensure all global reads and writes are coalesced,
// and to avoid bank conflicts in shared memory. This kernel is up to 11x faster
// than the naive kernel below. Note that the shared memory array is sized to
// (BLOCK_DIM+1)*BLOCK_DIM. This pads each row of the 2D block in shared memory
// so that bank conflicts do not occur when threads address the array column-wise.
__global__ void transpose(float *odata, float *idata, int width, int height)
{
__shared__ float block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
// This naive transpose kernel suffers from completely non-coalesced writes.
// It can be up to 10x slower than the kernel above for large matrices.
__global__ void transpose_naive(float *odata, float* idata, int width, int height)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
if (xIndex < width && yIndex < height)
{
unsigned int index_in = xIndex + width * yIndex;
unsigned int index_out = yIndex + height * xIndex;
odata[index_out] = idata[index_in];
}
}
#endif // _TRANSPOSE_KERNEL_H_
|
b613fdebd327a54244e40c693fd67ca4815ca995.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void kernel0(int* C, int loop) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < loop; i++) {
for (int j = 0; j < loop; j++) {
C[id] += id;
}
}
}
extern "C" __global__ void kernel1(int* C, int loop) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < loop; i++) {
for (int j = 0; j < loop; j++) {
C[id] += id;
}
}
}
extern "C" __global__ void kernel2(int* C, int loop) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < loop; i++) {
for (int j = 0; j < loop; j++) {
C[id] += id;
}
}
}
extern "C" __global__ void kernel3(int* C, int loop) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < loop; i++) {
for (int j = 0; j < loop; j++) {
C[id] += id;
}
}
}
| b613fdebd327a54244e40c693fd67ca4815ca995.cu | extern "C" __global__ void kernel0(int* C, int loop) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < loop; i++) {
for (int j = 0; j < loop; j++) {
C[id] += id;
}
}
}
extern "C" __global__ void kernel1(int* C, int loop) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < loop; i++) {
for (int j = 0; j < loop; j++) {
C[id] += id;
}
}
}
extern "C" __global__ void kernel2(int* C, int loop) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < loop; i++) {
for (int j = 0; j < loop; j++) {
C[id] += id;
}
}
}
extern "C" __global__ void kernel3(int* C, int loop) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = 0; i < loop; i++) {
for (int j = 0; j < loop; j++) {
C[id] += id;
}
}
}
|
2b7e8407cfc68f82f54989a015d07cdd7dc27070.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdlib.h>
#include<stdio.h>
#include<stdbool.h>
#include "timerc.h"
#define gpuErrchk(ans) {gpuAssert((ans),__FILE__,__LINE__);}
__device__ int global_num_areas = 32;
int global_num_areas_serial = 32;
__global__ void warmup(){
}
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true){
if (code != hipSuccess){
fprintf(stderr,"GPUassert: %s %s %d \n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__ bool isBetweenPointsInclusive(int a, int b, int x1, int x2 ){
// Tests whether or not the segment exists between 2 given x coordinates
// Holds true if (either of the points are inside the interval) or if (both are on opposite sides of the interval), but not if (both are on one side of the interval)
if( (a <= x1 && b >= x2) || (a >= x1 && a <= x2) || (b >= x1 && b <= x2) ){
return true;
}
return false;
};
bool isBetweenPointsInclusiveSerial(int a, int b, int x1, int x2 ){
// Tests whether or not the segment exists between 2 given x coordinates
// Holds true if (either of the points are inside the interval) or if (both are on opposite sides of the interval), but not if (both are on one side of the interval)
if( (a <= x1 && b >= x2) || (a >= x1 && a <= x2) || (b >= x1 && b <= x2) ){
return true;
}
return false;
};
__device__ void printLowerEnvelope(int * min_y, int size){
for(int i = 0; i < size; i++){
printf("The minimum y value between x = %d and x = %d is %d \n", i,i+1, min_y[i]);
}
}
void printLowerEnvelopeSerial(int * min_y, int size){
for(int i = 0; i < size; i++){
printf("The minimum y value between x = %d and x = %d is %d \n", i,i+1, min_y[i]);
}
}
int * lower_envelope(int * coordinate_array, int size){
// Input is an array of non-intersecting segments
// Output is an array of points where p_i's are endpoints of segments with increasing x coordinates
// Output 2 is a corresponding label list that specifies which segment is visible between each adjacent pair of endpoints
// The coordinate array contains alternating x and y coordinates so that consecutive segments are defined
// Each segment specifies 4 coordinates
// CHANGE THIS TO SPECIFY X GRID SIZE
int number_areas = global_num_areas_serial;
int * min_y = (int *) malloc(number_areas * sizeof(int));
// For each area between points
for(int i = 0; i < number_areas; i++){
min_y[i] = 99999;
// For each segment update the min_y array if betweeen segment exists between the points x = i and x = i+1
for(int j = 0; j < size - 1; j = j + 4){
if(isBetweenPointsInclusiveSerial(coordinate_array[j],coordinate_array[j+2],i,i+1) && coordinate_array[j+1] < min_y[i]){
min_y[i] = coordinate_array[j+1];
}
}
}
printLowerEnvelopeSerial(min_y,number_areas);
return min_y;
};
__global__ void parallel_lower_envelope(int * coordinate_array, int number_of_threads, int coordinate_array_size, int * min_y){
// Number of areas between x values
int number_areas = global_num_areas;
// For each area between points
int numElementsPerThread = number_areas / (gridDim.x * blockDim.x);
int cumulative_thread_id = threadIdx.x + (blockDim.x*blockIdx.x);
int startPos = cumulative_thread_id * numElementsPerThread;
// Each thread computes their own min_y index
for(int i = 0; i < numElementsPerThread; i++){
int i_thread = i + startPos;
min_y[i_thread] = 99999;
// For each segment update the min_y array if betweeen segment exists between the points x = i and x = i+1
for(int j = 0; j < coordinate_array_size - 1; j = j + 4){
if(isBetweenPointsInclusive(coordinate_array[j],coordinate_array[j+2],i_thread,i_thread+1) && coordinate_array[j+1] < min_y[i_thread]){
min_y[i_thread] = coordinate_array[j+1];
}
}
}
__syncthreads();
if(cumulative_thread_id == 0){
printLowerEnvelope(min_y,number_areas);
}
};
int main(){
int * segment_array;
int * device_input;
int * device_output;
// Initial memory allocation
segment_array = (int *) malloc(16 * sizeof(int));
segment_array[0] = 1; // s1.p1.x = 1
segment_array[1] = 1; // s1.p1.y = 1
segment_array[2] = 3; // s1.p2.x = 3
segment_array[3] = 3; // s1.p2.y = 3
segment_array[4] = 0; // s2.p1.x = 0
segment_array[5] = 0; // s2.p1.y = 0
segment_array[6] = 2; // s2.p2.x = 2
segment_array[7] = 2; // s2.p2.y = 2
segment_array[8] = 4; // s3.p1.x = 4
segment_array[9] = 4; // s3.p1.y = 4
segment_array[10] = 5; // s3.p2.x = 5
segment_array[11] = 5; // s3.p2.y = 5
segment_array[12] = 3;
segment_array[13] = 3;
segment_array[14] = 0;
segment_array[15] = 0;
float CPUtime;
cstart();
lower_envelope(segment_array,16);
cend(&CPUtime);
printf("Naive CPU time is %f \n", CPUtime);
hipMalloc((void **) &device_input, 16 * sizeof(int));
hipMemcpy(device_input,segment_array,16 * sizeof(int),hipMemcpyHostToDevice);
float GPUtime;
hipMalloc((void **) &device_output, global_num_areas_serial * sizeof(int));
hipLaunchKernelGGL(( warmup), dim3(1),dim3(1), 0, 0, );
gstart();
hipLaunchKernelGGL(( parallel_lower_envelope), dim3(4),dim3(4), 0, 0, device_input,4,16,device_output);
gend(&GPUtime);
printf("Naive GPU time is %f \n", GPUtime);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
hipDeviceSynchronize();
return 0;
}
| 2b7e8407cfc68f82f54989a015d07cdd7dc27070.cu | #include<stdlib.h>
#include<stdio.h>
#include<stdbool.h>
#include "timerc.h"
#define gpuErrchk(ans) {gpuAssert((ans),__FILE__,__LINE__);}
__device__ int global_num_areas = 32;
int global_num_areas_serial = 32;
__global__ void warmup(){
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true){
if (code != cudaSuccess){
fprintf(stderr,"GPUassert: %s %s %d \n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__ bool isBetweenPointsInclusive(int a, int b, int x1, int x2 ){
// Tests whether or not the segment exists between 2 given x coordinates
// Holds true if (either of the points are inside the interval) or if (both are on opposite sides of the interval), but not if (both are on one side of the interval)
if( (a <= x1 && b >= x2) || (a >= x1 && a <= x2) || (b >= x1 && b <= x2) ){
return true;
}
return false;
};
bool isBetweenPointsInclusiveSerial(int a, int b, int x1, int x2 ){
// Tests whether or not the segment exists between 2 given x coordinates
// Holds true if (either of the points are inside the interval) or if (both are on opposite sides of the interval), but not if (both are on one side of the interval)
if( (a <= x1 && b >= x2) || (a >= x1 && a <= x2) || (b >= x1 && b <= x2) ){
return true;
}
return false;
};
__device__ void printLowerEnvelope(int * min_y, int size){
for(int i = 0; i < size; i++){
printf("The minimum y value between x = %d and x = %d is %d \n", i,i+1, min_y[i]);
}
}
void printLowerEnvelopeSerial(int * min_y, int size){
for(int i = 0; i < size; i++){
printf("The minimum y value between x = %d and x = %d is %d \n", i,i+1, min_y[i]);
}
}
int * lower_envelope(int * coordinate_array, int size){
// Input is an array of non-intersecting segments
// Output is an array of points where p_i's are endpoints of segments with increasing x coordinates
// Output 2 is a corresponding label list that specifies which segment is visible between each adjacent pair of endpoints
// The coordinate array contains alternating x and y coordinates so that consecutive segments are defined
// Each segment specifies 4 coordinates
// CHANGE THIS TO SPECIFY X GRID SIZE
int number_areas = global_num_areas_serial;
int * min_y = (int *) malloc(number_areas * sizeof(int));
// For each area between points
for(int i = 0; i < number_areas; i++){
min_y[i] = 99999;
// For each segment update the min_y array if betweeen segment exists between the points x = i and x = i+1
for(int j = 0; j < size - 1; j = j + 4){
if(isBetweenPointsInclusiveSerial(coordinate_array[j],coordinate_array[j+2],i,i+1) && coordinate_array[j+1] < min_y[i]){
min_y[i] = coordinate_array[j+1];
}
}
}
printLowerEnvelopeSerial(min_y,number_areas);
return min_y;
};
__global__ void parallel_lower_envelope(int * coordinate_array, int number_of_threads, int coordinate_array_size, int * min_y){
// Number of areas between x values
int number_areas = global_num_areas;
// For each area between points
int numElementsPerThread = number_areas / (gridDim.x * blockDim.x);
int cumulative_thread_id = threadIdx.x + (blockDim.x*blockIdx.x);
int startPos = cumulative_thread_id * numElementsPerThread;
// Each thread computes their own min_y index
for(int i = 0; i < numElementsPerThread; i++){
int i_thread = i + startPos;
min_y[i_thread] = 99999;
// For each segment update the min_y array if betweeen segment exists between the points x = i and x = i+1
for(int j = 0; j < coordinate_array_size - 1; j = j + 4){
if(isBetweenPointsInclusive(coordinate_array[j],coordinate_array[j+2],i_thread,i_thread+1) && coordinate_array[j+1] < min_y[i_thread]){
min_y[i_thread] = coordinate_array[j+1];
}
}
}
__syncthreads();
if(cumulative_thread_id == 0){
printLowerEnvelope(min_y,number_areas);
}
};
int main(){
int * segment_array;
int * device_input;
int * device_output;
// Initial memory allocation
segment_array = (int *) malloc(16 * sizeof(int));
segment_array[0] = 1; // s1.p1.x = 1
segment_array[1] = 1; // s1.p1.y = 1
segment_array[2] = 3; // s1.p2.x = 3
segment_array[3] = 3; // s1.p2.y = 3
segment_array[4] = 0; // s2.p1.x = 0
segment_array[5] = 0; // s2.p1.y = 0
segment_array[6] = 2; // s2.p2.x = 2
segment_array[7] = 2; // s2.p2.y = 2
segment_array[8] = 4; // s3.p1.x = 4
segment_array[9] = 4; // s3.p1.y = 4
segment_array[10] = 5; // s3.p2.x = 5
segment_array[11] = 5; // s3.p2.y = 5
segment_array[12] = 3;
segment_array[13] = 3;
segment_array[14] = 0;
segment_array[15] = 0;
float CPUtime;
cstart();
lower_envelope(segment_array,16);
cend(&CPUtime);
printf("Naive CPU time is %f \n", CPUtime);
cudaMalloc((void **) &device_input, 16 * sizeof(int));
cudaMemcpy(device_input,segment_array,16 * sizeof(int),cudaMemcpyHostToDevice);
float GPUtime;
cudaMalloc((void **) &device_output, global_num_areas_serial * sizeof(int));
warmup<<<1,1>>>();
gstart();
parallel_lower_envelope<<<4,4>>>(device_input,4,16,device_output);
gend(&GPUtime);
printf("Naive GPU time is %f \n", GPUtime);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
cudaDeviceSynchronize();
return 0;
}
|
c975565cdd62bff8e20436fc2d685e67e169f336.hip | // !!! This is a file automatically generated by hipify!!!
#include "hashGlobal.h"
void initPaging(largeInt availableGPUMemory, multipassConfig_t* mbk)
{
mbk->totalNumPages = availableGPUMemory / PAGE_SIZE;
printf("@INFO: total number of pages: %d [each %dKB]\n", mbk->totalNumPages, (PAGE_SIZE / (1 << 10)));
mbk->initialPageAssignedCounter = 0;
mbk->keyPageCounter = 0;
mbk->totalNumFreePages = mbk->totalNumPages;
mbk->hfreeListId = (int*) malloc(mbk->totalNumPages * sizeof(int));
for(int i = 0; i < mbk->totalNumPages; i ++)
mbk->hfreeListId[i] = i;
hipMalloc((void**) &(mbk->freeListId), mbk->totalNumPages * sizeof(int));
hipMemcpy(mbk->freeListId, mbk->hfreeListId, mbk->totalNumPages * sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**) &(mbk->dbuffer), mbk->totalNumPages * PAGE_SIZE);
hipMemset(mbk->dbuffer, 0, mbk->totalNumPages * PAGE_SIZE);
printf("@INFO: done allocating base buffer in GPU memory\n");
//This has to be allocated GPU-side
mbk->hpages = (page_t*) malloc(mbk->totalNumPages * sizeof(page_t));
for(int i = 0; i < mbk->totalNumPages; i ++)
{
mbk->hpages[i].id = i;
mbk->hpages[i].next = NULL;
mbk->hpages[i].used = 0;
mbk->hpages[i].needed = 0;
mbk->hpages[i].hashTableOffset = mbk->hashTableOffset;
}
printf("@INFO: done initializing pages meta data\n");
hipMalloc((void**) &(mbk->pages), mbk->totalNumPages * sizeof(page_t));
hipMemcpy(mbk->pages, mbk->hpages, mbk->totalNumPages * sizeof(page_t), hipMemcpyHostToDevice);
printf("@INFO: done doing initPaging\n");
}
//TODO: currently we don't mark a bucket group to not ask for more memory if it previously revoked its pages
__device__ void* multipassMalloc(unsigned size, bucketGroup_t* myGroup, multipassConfig_t* mbk)
{
page_t* parentPage = myGroup->parentPage;
unsigned oldUsed = 0;
if(parentPage != NULL)
{
oldUsed = atomicAdd(&(parentPage->used), size);
if((oldUsed + size) < PAGE_SIZE)
{
return (void*) ((largeInt) mbk->dbuffer + parentPage->id * PAGE_SIZE + oldUsed);
}
}
page_t* newPage;
do
{
newPage = NULL;
//acquire some lock
unsigned oldLock = 1;
do
{
oldLock = atomicExch(&(myGroup->pageLock), 1);
if(oldLock == 0)
{
//Re-testing if the parent page has room (because the partenPage might have changed)
parentPage = myGroup->parentPage;
if(parentPage != NULL)
{
oldUsed = atomicAdd(&(parentPage->used), size);
if((oldUsed + size) < PAGE_SIZE)
{
//Unlocking
atomicExch(&(myGroup->pageLock), 0);
return (void*) ((largeInt) mbk->dbuffer + parentPage->id * PAGE_SIZE + oldUsed);
}
}
newPage = allocateNewPage(mbk, 1);
//If no more page exists and no page is used yet (for this bucketgroup), don't do anything
if(newPage == NULL)
{
//releaseLock
atomicExch(&(myGroup->pageLock), 0);
return NULL;
}
newPage->next = parentPage;
myGroup->parentPage = newPage;
//Unlocking
atomicExch(&(myGroup->pageLock), 0);
}
} while(oldLock == 1);
//This assumes that the newPage is not already full, which is to be tested.
oldUsed = atomicAdd(&(newPage->used), size);
} while((oldUsed + size) >= PAGE_SIZE);
return (void*) ((largeInt) mbk->dbuffer + oldUsed + newPage->id * PAGE_SIZE);
}
__device__ void* multipassMallocValue(unsigned size, bucketGroup_t* myGroup, multipassConfig_t* mbk)
{
page_t* parentPage = myGroup->valueParentPage;
unsigned oldUsed = 0;
if(parentPage != NULL)
{
oldUsed = atomicAdd(&(parentPage->used), size);
if((oldUsed + size) < PAGE_SIZE)
{
return (void*) ((largeInt) mbk->dbuffer + parentPage->id * PAGE_SIZE + oldUsed);
}
}
page_t* newPage = NULL;
//acquire some lock
unsigned oldLock = 1;
do
{
oldLock = atomicExch(&(myGroup->pageLock), 1);
if(oldLock == 0)
{
//Re-testing if the parent page has room (because the partenPage might have changed)
parentPage = myGroup->valueParentPage;
if(parentPage != NULL)
{
oldUsed = atomicAdd(&(parentPage->used), size);
if((oldUsed + size) < PAGE_SIZE)
{
//Unlocking
atomicExch(&(myGroup->pageLock), 0);
return (void*) ((largeInt) mbk->dbuffer + parentPage->id * PAGE_SIZE + oldUsed);
}
}
newPage = allocateNewPage(mbk, 0);
//If no more page exists and no page is used yet (for this bucketgroup), don't do anything
if(newPage == NULL)
{
//releaseLock
atomicExch(&(myGroup->pageLock), 0);
myGroup->overflownValue = 1;
return NULL;
}
newPage->next = parentPage;
myGroup->valueParentPage = newPage;
//Unlocking
atomicExch(&(myGroup->pageLock), 0);
}
} while(oldLock == 1);
//This assumes that the newPage is not already full, which is to be tested.
oldUsed = atomicAdd(&(newPage->used), size);
if((oldUsed + size) < PAGE_SIZE)
return (void*) ((largeInt) mbk->dbuffer + oldUsed + newPage->id * PAGE_SIZE);
else
{
return NULL;
}
}
__device__ page_t* allocateNewPage(multipassConfig_t* mbk, int isKeyPage)
{
#ifdef MULTI_VALUE
if(isKeyPage == 1)
{
if(mbk->keyPageCounter > (mbk->totalNumFreePages / 2))
return NULL;
mbk->keyPageCounter ++;
}
#endif
int indexToAllocate = atomicInc((unsigned*) &(mbk->initialPageAssignedCounter), INT_MAX);
if(indexToAllocate < mbk->totalNumFreePages)
{
return &(mbk->pages[mbk->freeListId[indexToAllocate]]);
}
return NULL;
}
| c975565cdd62bff8e20436fc2d685e67e169f336.cu | #include "hashGlobal.h"
void initPaging(largeInt availableGPUMemory, multipassConfig_t* mbk)
{
mbk->totalNumPages = availableGPUMemory / PAGE_SIZE;
printf("@INFO: total number of pages: %d [each %dKB]\n", mbk->totalNumPages, (PAGE_SIZE / (1 << 10)));
mbk->initialPageAssignedCounter = 0;
mbk->keyPageCounter = 0;
mbk->totalNumFreePages = mbk->totalNumPages;
mbk->hfreeListId = (int*) malloc(mbk->totalNumPages * sizeof(int));
for(int i = 0; i < mbk->totalNumPages; i ++)
mbk->hfreeListId[i] = i;
cudaMalloc((void**) &(mbk->freeListId), mbk->totalNumPages * sizeof(int));
cudaMemcpy(mbk->freeListId, mbk->hfreeListId, mbk->totalNumPages * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**) &(mbk->dbuffer), mbk->totalNumPages * PAGE_SIZE);
cudaMemset(mbk->dbuffer, 0, mbk->totalNumPages * PAGE_SIZE);
printf("@INFO: done allocating base buffer in GPU memory\n");
//This has to be allocated GPU-side
mbk->hpages = (page_t*) malloc(mbk->totalNumPages * sizeof(page_t));
for(int i = 0; i < mbk->totalNumPages; i ++)
{
mbk->hpages[i].id = i;
mbk->hpages[i].next = NULL;
mbk->hpages[i].used = 0;
mbk->hpages[i].needed = 0;
mbk->hpages[i].hashTableOffset = mbk->hashTableOffset;
}
printf("@INFO: done initializing pages meta data\n");
cudaMalloc((void**) &(mbk->pages), mbk->totalNumPages * sizeof(page_t));
cudaMemcpy(mbk->pages, mbk->hpages, mbk->totalNumPages * sizeof(page_t), cudaMemcpyHostToDevice);
printf("@INFO: done doing initPaging\n");
}
//TODO: currently we don't mark a bucket group to not ask for more memory if it previously revoked its pages
__device__ void* multipassMalloc(unsigned size, bucketGroup_t* myGroup, multipassConfig_t* mbk)
{
page_t* parentPage = myGroup->parentPage;
unsigned oldUsed = 0;
if(parentPage != NULL)
{
oldUsed = atomicAdd(&(parentPage->used), size);
if((oldUsed + size) < PAGE_SIZE)
{
return (void*) ((largeInt) mbk->dbuffer + parentPage->id * PAGE_SIZE + oldUsed);
}
}
page_t* newPage;
do
{
newPage = NULL;
//acquire some lock
unsigned oldLock = 1;
do
{
oldLock = atomicExch(&(myGroup->pageLock), 1);
if(oldLock == 0)
{
//Re-testing if the parent page has room (because the partenPage might have changed)
parentPage = myGroup->parentPage;
if(parentPage != NULL)
{
oldUsed = atomicAdd(&(parentPage->used), size);
if((oldUsed + size) < PAGE_SIZE)
{
//Unlocking
atomicExch(&(myGroup->pageLock), 0);
return (void*) ((largeInt) mbk->dbuffer + parentPage->id * PAGE_SIZE + oldUsed);
}
}
newPage = allocateNewPage(mbk, 1);
//If no more page exists and no page is used yet (for this bucketgroup), don't do anything
if(newPage == NULL)
{
//releaseLock
atomicExch(&(myGroup->pageLock), 0);
return NULL;
}
newPage->next = parentPage;
myGroup->parentPage = newPage;
//Unlocking
atomicExch(&(myGroup->pageLock), 0);
}
} while(oldLock == 1);
//This assumes that the newPage is not already full, which is to be tested.
oldUsed = atomicAdd(&(newPage->used), size);
} while((oldUsed + size) >= PAGE_SIZE);
return (void*) ((largeInt) mbk->dbuffer + oldUsed + newPage->id * PAGE_SIZE);
}
__device__ void* multipassMallocValue(unsigned size, bucketGroup_t* myGroup, multipassConfig_t* mbk)
{
page_t* parentPage = myGroup->valueParentPage;
unsigned oldUsed = 0;
if(parentPage != NULL)
{
oldUsed = atomicAdd(&(parentPage->used), size);
if((oldUsed + size) < PAGE_SIZE)
{
return (void*) ((largeInt) mbk->dbuffer + parentPage->id * PAGE_SIZE + oldUsed);
}
}
page_t* newPage = NULL;
//acquire some lock
unsigned oldLock = 1;
do
{
oldLock = atomicExch(&(myGroup->pageLock), 1);
if(oldLock == 0)
{
//Re-testing if the parent page has room (because the partenPage might have changed)
parentPage = myGroup->valueParentPage;
if(parentPage != NULL)
{
oldUsed = atomicAdd(&(parentPage->used), size);
if((oldUsed + size) < PAGE_SIZE)
{
//Unlocking
atomicExch(&(myGroup->pageLock), 0);
return (void*) ((largeInt) mbk->dbuffer + parentPage->id * PAGE_SIZE + oldUsed);
}
}
newPage = allocateNewPage(mbk, 0);
//If no more page exists and no page is used yet (for this bucketgroup), don't do anything
if(newPage == NULL)
{
//releaseLock
atomicExch(&(myGroup->pageLock), 0);
myGroup->overflownValue = 1;
return NULL;
}
newPage->next = parentPage;
myGroup->valueParentPage = newPage;
//Unlocking
atomicExch(&(myGroup->pageLock), 0);
}
} while(oldLock == 1);
//This assumes that the newPage is not already full, which is to be tested.
oldUsed = atomicAdd(&(newPage->used), size);
if((oldUsed + size) < PAGE_SIZE)
return (void*) ((largeInt) mbk->dbuffer + oldUsed + newPage->id * PAGE_SIZE);
else
{
return NULL;
}
}
__device__ page_t* allocateNewPage(multipassConfig_t* mbk, int isKeyPage)
{
#ifdef MULTI_VALUE
if(isKeyPage == 1)
{
if(mbk->keyPageCounter > (mbk->totalNumFreePages / 2))
return NULL;
mbk->keyPageCounter ++;
}
#endif
int indexToAllocate = atomicInc((unsigned*) &(mbk->initialPageAssignedCounter), INT_MAX);
if(indexToAllocate < mbk->totalNumFreePages)
{
return &(mbk->pages[mbk->freeListId[indexToAllocate]]);
}
return NULL;
}
|
48e350bf808db315502dbd43903f035e9707bcf3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "common.h"
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
// copied from cutorch/lib/THC/THCTensorRandom.cu
#define MAX_NUM_BLOCKS 64
#define BLOCK_SIZE 256
#define NUM_BLOCKS(n) min((int)THCCeilDiv(n, (long) BLOCK_SIZE), MAX_NUM_BLOCKS)
__global__ void rreluUpdateOutputTrain(int n, hiprandStateMtgp32_t *state,
float *input, float* noise, float *output, double a, double b)
{
CUDA_KERNEL_LOOP(i, n)
{
if (input[i] <= 0)
{
float r = hiprand_uniform(&state[blockIdx.x]);
r = r * (b-a) + a;
output[i] = input[i] * r;
noise[i] = r;
}
else
{
output[i] = input[i];
noise[i] = 1;
}
}
}
struct RReLUUpdateOutputEval_functor
{
const float negSlope_;
RReLUUpdateOutputEval_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *out, float *in)
{
const float x = *in;
const float r = x <= 0 ? negSlope_ : 1;
*out = x * r;
}
};
struct RReLUUpdateOutputEvalIP_functor
{
const float negSlope_;
RReLUUpdateOutputEvalIP_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *x)
{
if (*x <= 0)
{
*x = *x * negSlope_;
}
}
};
void THNN_CudaRReLU_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output,
THCudaTensor *noise, double lower, double upper, bool train, bool inplace, void *generator)
{
THCUNN_assertSameGPU(state, 3, input, output, noise);
struct hiprandStateMtgp32_t* gen_states = THCRandom_generatorStates(state);
if (train)
{
input = THCudaTensor_newContiguous(state, input);
THCudaTensor_resizeAs(state, noise, input);
float *input_data = THCudaTensor_data(state, input);
float *noise_data = THCudaTensor_data(state, noise);
long n = THCudaTensor_nElement(state, input);
if (inplace)
{
hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(NUM_BLOCKS(n)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
n, gen_states, input_data, noise_data, input_data, lower, upper);
THCudaTensor_set(state, output, input);
}
else
{
THCudaTensor_resizeAs(state, output, input);
float *output_data = THCudaTensor_data(state, output);
hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(NUM_BLOCKS(n)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state),
n, gen_states, input_data, noise_data, output_data, lower, upper);
}
THCudaCheck(hipGetLastError());
THCudaTensor_free(state, input);
}
else
{
const double negSlope = (lower + upper) / 2;
if (inplace)
{
THC_pointwiseApply1(state, input, RReLUUpdateOutputEvalIP_functor(negSlope));
THCudaTensor_set(state, output, input);
}
else
{
THCudaTensor_resizeAs(state, output, input);
THC_pointwiseApply2(state, output, input, RReLUUpdateOutputEval_functor(negSlope));
}
}
}
struct RReLUupdateGradInputEval_functor
{
const float negSlope_;
RReLUupdateGradInputEval_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *gradIn, float *gradOut, float *in)
{
*gradIn = (*in) <= 0 ? (*gradOut) * negSlope_ : (*gradOut);
}
};
struct RReLUupdateGradInputEvalIP_functor
{
const float negSlope_;
RReLUupdateGradInputEvalIP_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *gradOut, float *in)
{
if (*in <= 0)
{
*gradOut = (*gradOut) * negSlope_;
}
}
};
void THNN_CudaRReLU_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput,
THCudaTensor *gradInput, THCudaTensor *noise, double lower, double upper, bool train, bool inplace)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, noise);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU
{
// multiply the gradient by the noise tensor
if (inplace)
{
THCudaTensor_cmul(state, gradOutput, gradOutput, noise);
THCudaTensor_set(state, gradInput, gradOutput);
}
else
{
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_cmul(state, gradInput, gradOutput, noise);
}
}
else
{
// use constant factor for negative input values
const double negSlope = (lower + upper) / 2;
if (inplace)
{
THC_pointwiseApply2(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor(negSlope));
THCudaTensor_set(state, gradInput, gradOutput);
}
else
{
THCudaTensor_resizeAs(state, gradInput, input);
THC_pointwiseApply3(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor(negSlope));
}
}
THCudaTensor_free(state, gradOutput);
}
| 48e350bf808db315502dbd43903f035e9707bcf3.cu | #include "THCUNN.h"
#include "common.h"
#include <curand.h>
#include <curand_kernel.h>
// copied from cutorch/lib/THC/THCTensorRandom.cu
#define MAX_NUM_BLOCKS 64
#define BLOCK_SIZE 256
#define NUM_BLOCKS(n) min((int)THCCeilDiv(n, (long) BLOCK_SIZE), MAX_NUM_BLOCKS)
__global__ void rreluUpdateOutputTrain(int n, curandStateMtgp32 *state,
float *input, float* noise, float *output, double a, double b)
{
CUDA_KERNEL_LOOP(i, n)
{
if (input[i] <= 0)
{
float r = curand_uniform(&state[blockIdx.x]);
r = r * (b-a) + a;
output[i] = input[i] * r;
noise[i] = r;
}
else
{
output[i] = input[i];
noise[i] = 1;
}
}
}
struct RReLUUpdateOutputEval_functor
{
const float negSlope_;
RReLUUpdateOutputEval_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *out, float *in)
{
const float x = *in;
const float r = x <= 0 ? negSlope_ : 1;
*out = x * r;
}
};
struct RReLUUpdateOutputEvalIP_functor
{
const float negSlope_;
RReLUUpdateOutputEvalIP_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *x)
{
if (*x <= 0)
{
*x = *x * negSlope_;
}
}
};
void THNN_CudaRReLU_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output,
THCudaTensor *noise, double lower, double upper, bool train, bool inplace, void *generator)
{
THCUNN_assertSameGPU(state, 3, input, output, noise);
struct curandStateMtgp32* gen_states = THCRandom_generatorStates(state);
if (train)
{
input = THCudaTensor_newContiguous(state, input);
THCudaTensor_resizeAs(state, noise, input);
float *input_data = THCudaTensor_data(state, input);
float *noise_data = THCudaTensor_data(state, noise);
long n = THCudaTensor_nElement(state, input);
if (inplace)
{
rreluUpdateOutputTrain<<<NUM_BLOCKS(n), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
n, gen_states, input_data, noise_data, input_data, lower, upper);
THCudaTensor_set(state, output, input);
}
else
{
THCudaTensor_resizeAs(state, output, input);
float *output_data = THCudaTensor_data(state, output);
rreluUpdateOutputTrain<<<NUM_BLOCKS(n), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>(
n, gen_states, input_data, noise_data, output_data, lower, upper);
}
THCudaCheck(cudaGetLastError());
THCudaTensor_free(state, input);
}
else
{
const double negSlope = (lower + upper) / 2;
if (inplace)
{
THC_pointwiseApply1(state, input, RReLUUpdateOutputEvalIP_functor(negSlope));
THCudaTensor_set(state, output, input);
}
else
{
THCudaTensor_resizeAs(state, output, input);
THC_pointwiseApply2(state, output, input, RReLUUpdateOutputEval_functor(negSlope));
}
}
}
struct RReLUupdateGradInputEval_functor
{
const float negSlope_;
RReLUupdateGradInputEval_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *gradIn, float *gradOut, float *in)
{
*gradIn = (*in) <= 0 ? (*gradOut) * negSlope_ : (*gradOut);
}
};
struct RReLUupdateGradInputEvalIP_functor
{
const float negSlope_;
RReLUupdateGradInputEvalIP_functor(float negSlope)
: negSlope_(negSlope)
{}
__device__ __forceinline__ void operator()(float *gradOut, float *in)
{
if (*in <= 0)
{
*gradOut = (*gradOut) * negSlope_;
}
}
};
void THNN_CudaRReLU_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput,
THCudaTensor *gradInput, THCudaTensor *noise, double lower, double upper, bool train, bool inplace)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, noise);
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU
{
// multiply the gradient by the noise tensor
if (inplace)
{
THCudaTensor_cmul(state, gradOutput, gradOutput, noise);
THCudaTensor_set(state, gradInput, gradOutput);
}
else
{
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_cmul(state, gradInput, gradOutput, noise);
}
}
else
{
// use constant factor for negative input values
const double negSlope = (lower + upper) / 2;
if (inplace)
{
THC_pointwiseApply2(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor(negSlope));
THCudaTensor_set(state, gradInput, gradOutput);
}
else
{
THCudaTensor_resizeAs(state, gradInput, input);
THC_pointwiseApply3(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor(negSlope));
}
}
THCudaTensor_free(state, gradOutput);
}
|
7f8bc9b5629e0b42ec1401e8284808a3b1731bb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file gemm.cu
* @author Alessandro Capotondi
* @date 12 May 2020
* @brief GEMM Kernel
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
#define TWO02 (1 << 2)
#define TWO04 (1 << 4)
#define TWO08 (1 << 8)
#ifndef N
#define N (1 << 10)
#endif
#ifndef TILE_W
#define TILE_W 128
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 32
#endif
#define SM 64
static void reorder(float *__restrict__ a, float *__restrict__ b, int n)
{
for (int i = 0; i < SM; i++)
for (int j = 0; j < SM; j++)
b[i * SM + j] = a[i * n + j];
}
static void mm(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
for (int i = 0; i < SM; i++)
{
for (int k = 0; k < SM; k++)
{
for (int j = 0; j < SM; j++)
{
c[i * n + j] += a[i * n + k] * b[k * SM + j];
}
}
}
}
void gemm_host(float *a, float *b, float *c, int n)
{
int bk = n / SM;
#pragma omp parallel for collapse(3)
for (int i = 0; i < bk; i++)
{
for (int j = 0; j < bk; j++)
{
for (int k = 0; k < bk; k++)
{
float b2[SM * SM];
reorder(&b[SM * (k * n + j)], b2, n);
mm(&a[SM * (i * n + k)], b2, &c[SM * (i * n + j)], n);
}
}
}
}
__global__ void gemm(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int ib = blockIdx.y;
int jb = blockIdx.x;
int it = threadIdx.y;
int jt = threadIdx.x;
int a_offset, b_offset, c_offset;
float Cvalue = 0.0f;
for (int kb = 0; kb < (n / BLOCK_SIZE); ++kb)
{
a_offset = ib * n * BLOCK_SIZE + kb * BLOCK_SIZE;
b_offset = kb * n * BLOCK_SIZE + jb * BLOCK_SIZE;
As[it][jt] = a[a_offset + it * n + jt];
Bs[it][jt] = b[b_offset + it * n + jt];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
Cvalue += As[it][k] * Bs[k][jt];
__syncthreads();
}
c_offset = ib * n * BLOCK_SIZE + jb * BLOCK_SIZE;
c[c_offset + it * n + jt] = Cvalue;
}
int main(int argc, char *argv[])
{
int n = N, iret = 0;
float *a, *b, *c, *g;
struct timespec rt[2];
double wt; // walltime
if (argc > 1)
n = atoi(argv[1]);
//TODO Update malloc to hipMallocManaged
gpuErrchk(hipMallocManaged((void **)&a, sizeof(float) * n *n));
gpuErrchk(hipMallocManaged((void **)&b, sizeof(float) * n *n));
gpuErrchk(hipMallocManaged((void **)&c, sizeof(float) * n *n));
if (NULL == (g = (float *)malloc(sizeof(*g) * n * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (0 != iret)
{
gpuErrchk(hipFree(a));
gpuErrchk(hipFree(b));
gpuErrchk(hipFree(c));
free(g);
exit(EXIT_FAILURE);
}
//Init Data
int _b = rand() % TWO04;
int _c = rand() % TWO08;
#pragma omp parallel for
for (int i = 0; i < n * n; i++)
{
a[i] = _b / (float)TWO02;
b[i] = _c / (float)TWO04;
c[i] = g[i] = 0.0;
}
clock_gettime(CLOCK_REALTIME, rt + 0);
gemm_host(a, b, g, n);
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM (Host) : %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
clock_gettime(CLOCK_REALTIME, rt + 0);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
hipLaunchKernelGGL(( gemm), dim3(dimGrid), dim3(dimBlock), 0, 0, a, b, c, n);
gpuErrchk(hipPeekAtLastError());
hipDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM-v1 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
for (int i = 0; i < n * n; i++)
{
iret = *(int *)(g + i) ^ *(int *)(c + i);
assert(iret == 0);
}
gpuErrchk(hipFree(a));
gpuErrchk(hipFree(b));
gpuErrchk(hipFree(c));
free(g);
return 0;
}
| 7f8bc9b5629e0b42ec1401e8284808a3b1731bb7.cu | /*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file gemm.cu
* @author Alessandro Capotondi
* @date 12 May 2020
* @brief GEMM Kernel
*
* @see https://dolly.fim.unimore.it/2019/course/view.php?id=152
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
extern "C"
{
#include "utils.h"
}
#define TWO02 (1 << 2)
#define TWO04 (1 << 4)
#define TWO08 (1 << 8)
#ifndef N
#define N (1 << 10)
#endif
#ifndef TILE_W
#define TILE_W 128
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 32
#endif
#define SM 64
static void reorder(float *__restrict__ a, float *__restrict__ b, int n)
{
for (int i = 0; i < SM; i++)
for (int j = 0; j < SM; j++)
b[i * SM + j] = a[i * n + j];
}
static void mm(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
for (int i = 0; i < SM; i++)
{
for (int k = 0; k < SM; k++)
{
for (int j = 0; j < SM; j++)
{
c[i * n + j] += a[i * n + k] * b[k * SM + j];
}
}
}
}
void gemm_host(float *a, float *b, float *c, int n)
{
int bk = n / SM;
#pragma omp parallel for collapse(3)
for (int i = 0; i < bk; i++)
{
for (int j = 0; j < bk; j++)
{
for (int k = 0; k < bk; k++)
{
float b2[SM * SM];
reorder(&b[SM * (k * n + j)], b2, n);
mm(&a[SM * (i * n + k)], b2, &c[SM * (i * n + j)], n);
}
}
}
}
__global__ void gemm(float *__restrict__ a, float *__restrict__ b, float *__restrict__ c, int n)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int ib = blockIdx.y;
int jb = blockIdx.x;
int it = threadIdx.y;
int jt = threadIdx.x;
int a_offset, b_offset, c_offset;
float Cvalue = 0.0f;
for (int kb = 0; kb < (n / BLOCK_SIZE); ++kb)
{
a_offset = ib * n * BLOCK_SIZE + kb * BLOCK_SIZE;
b_offset = kb * n * BLOCK_SIZE + jb * BLOCK_SIZE;
As[it][jt] = a[a_offset + it * n + jt];
Bs[it][jt] = b[b_offset + it * n + jt];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
Cvalue += As[it][k] * Bs[k][jt];
__syncthreads();
}
c_offset = ib * n * BLOCK_SIZE + jb * BLOCK_SIZE;
c[c_offset + it * n + jt] = Cvalue;
}
int main(int argc, char *argv[])
{
int n = N, iret = 0;
float *a, *b, *c, *g;
struct timespec rt[2];
double wt; // walltime
if (argc > 1)
n = atoi(argv[1]);
//TODO Update malloc to cudaMallocManaged
gpuErrchk(cudaMallocManaged((void **)&a, sizeof(float) * n *n));
gpuErrchk(cudaMallocManaged((void **)&b, sizeof(float) * n *n));
gpuErrchk(cudaMallocManaged((void **)&c, sizeof(float) * n *n));
if (NULL == (g = (float *)malloc(sizeof(*g) * n * n)))
{
printf("error: memory allocation for 'z'\n");
iret = -1;
}
if (0 != iret)
{
gpuErrchk(cudaFree(a));
gpuErrchk(cudaFree(b));
gpuErrchk(cudaFree(c));
free(g);
exit(EXIT_FAILURE);
}
//Init Data
int _b = rand() % TWO04;
int _c = rand() % TWO08;
#pragma omp parallel for
for (int i = 0; i < n * n; i++)
{
a[i] = _b / (float)TWO02;
b[i] = _c / (float)TWO04;
c[i] = g[i] = 0.0;
}
clock_gettime(CLOCK_REALTIME, rt + 0);
gemm_host(a, b, g, n);
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM (Host) : %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
clock_gettime(CLOCK_REALTIME, rt + 0);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((n + (BLOCK_SIZE)-1) / (BLOCK_SIZE), (n + (BLOCK_SIZE)-1) / (BLOCK_SIZE));
gemm<<<dimGrid, dimBlock>>>(a, b, c, n);
gpuErrchk(cudaPeekAtLastError());
cudaDeviceSynchronize();
clock_gettime(CLOCK_REALTIME, rt + 1);
wt = (rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec);
printf("GEMM-v1 (GPU): %9.3f sec %9.1f GFLOPS\n", wt, 2.0 * n * n * n / (1.0e9 * wt));
for (int i = 0; i < n * n; i++)
{
iret = *(int *)(g + i) ^ *(int *)(c + i);
assert(iret == 0);
}
gpuErrchk(cudaFree(a));
gpuErrchk(cudaFree(b));
gpuErrchk(cudaFree(c));
free(g);
return 0;
}
|
8c2d924a5afa8c2445a5878993cb05cb97ba56a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (C) 2020 ByteDance Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_kernels.h"
#include <assert.h>
#include <cstdio>
#include <cstdlib>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <hip/hip_fp16.h>
namespace effectivetransformer{
// gelu code from
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v1/fastertransformer/cuda/cuda_kernels.cu#L26-L45
template <typename T>
__inline__ __device__
T gelu(T x)
{
float cdf = 0.5f *
(1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x))));
return x * cdf;
}
template <>
__inline__ __device__
half2 gelu(half2 val)
{
half2 val_pow3 = __hmul2(val, __hmul2(val, val));
float2 tmp_pow = __half22float2(val_pow3);
float2 tmp = __half22float2(val);
tmp.x = 0.5f *
(1.0f + tanhf((0.7978845608028654f * (tmp.x + 0.044715f * tmp_pow.x))));
tmp.y = 0.5f *
(1.0f + tanhf((0.7978845608028654f * (tmp.y + 0.044715f * tmp_pow.y))));
return __hmul2(val, __float22half2_rn(tmp));
}
// reduce code from
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v1/fastertransformer/cuda/cuda_kernels.cu#L47-L73
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)0.0f;
val = warpReduceSum(val);
return val;
}
/// ***************************** add_bias + gelu *****************************
template <typename T>
__global__
void add_bias_act(T* out, const T* bias, int m, int n)
{
T val, reg_bias;
int row_id = blockIdx.x;
int ite = n / blockDim.x;
int tid = threadIdx.x;
for(int i = 0; i < ite; ++i)
{
reg_bias = __ldg(&bias[i * blockDim.x + tid]);
row_id = blockIdx.x;
while(row_id < m){
val = out[tid + i * blockDim.x + row_id * n]+ reg_bias;
out[tid + i * blockDim.x + row_id * n] = gelu<T>(val);
row_id += gridDim.x;
}
}
}
template <>
__global__
void add_bias_act(__half* out, const __half* bias, int m, int n)
{
half2 val, reg_bias;
int row_id = blockIdx.x;
int ite = n / blockDim.x / 2;
int tid = threadIdx.x;
half2* out_ptr = (half2*) out;
const half2* bias_ptr = (half2*) bias;
for(int i = 0; i < ite; ++i)
{
reg_bias = __ldg(&bias_ptr[i * blockDim.x + tid]);
row_id = blockIdx.x;
while(row_id < m){
val = out_ptr[tid + i * blockDim.x + row_id * n / 2];
val = __hadd2(val, reg_bias);
out_ptr[tid + i * blockDim.x + row_id * n / 2] = gelu<half2>(val);
row_id += gridDim.x;
}
}
}
template <typename T>
void add_bias_act_kernelLauncher(
T* out, const T* bias, int m, int n, hipStream_t stream)
{
dim3 grid(max(m / 4, 1));
dim3 block(n / 4);
assert(block.x < 1024);
hipLaunchKernelGGL(( add_bias_act<T>), dim3(grid), dim3(block), 0, stream, out, bias, m, n);
}
template void add_bias_act_kernelLauncher<__half>(
__half* out, const __half* bias, int m, int n, hipStream_t stream);
template void add_bias_act_kernelLauncher<float>(
float* out, const float* bias, int m, int n, hipStream_t stream);
/// *********************************** fin ***********************************
/// ************************** add_bias + layer_norm **************************
template <typename T>
__global__
void add_bias_input_layernorm(
T* out, const T* input, const T* bias, const T* gamma,
const T* beta, int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out = 0.0f;
for(int i = tid; i < n; i += blockDim.x)
local_out += (float)(out[blockIdx.x * n + i]
+ input[blockIdx.x * n + i] + __ldg(&bias[i]));
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>((
local_out - s_mean) * (local_out - s_mean));
if(threadIdx.x == 0)
s_variance = variance / n + 1e-6f;
__syncthreads();
for(int i = tid; i < n; i += blockDim.x)
out[blockIdx.x * n + i] =
(T)(((local_out - s_mean) * rsqrtf(s_variance))
* (float)(__ldg(&gamma[i])) + (float)(__ldg(&beta[i])));
}
template <>
__global__
void add_bias_input_layernorm(
__half* out, const __half* input, const __half* bias,
const __half* gamma, const __half* beta, int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float2 local_out_fp2;
half2* out_ptr = (half2*)out;
const half2* input_ptr = (const half2*)input;
const half2* bias_ptr = (const half2*)bias;
const half2* gamma_ptr = (const half2*)gamma;
const half2* beta_ptr = (const half2*)beta;
float local_out = 0.0f;
int id = blockIdx.x * n / 2 + tid;
local_out_fp2 = __half22float2(
__hadd2(__hadd2(out_ptr[id], input_ptr[id]), __ldg(&bias_ptr[tid])));
local_out += local_out_fp2.x;
local_out += local_out_fp2.y;
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = (local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean);
variance += (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean);
variance = blockReduceSum<float>(variance);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6f);
__syncthreads();
float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid]));
float2 beta_val = __half22float2(__ldg(&beta_ptr[tid]));
local_out_fp2.x =
(local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x;
local_out_fp2.y =
(local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y;
out_ptr[id] = __float22half2_rn(local_out_fp2);
}
template<typename T>
void add_bias_input_layernorm_kernelLauncher(
T* out, const T* input, const T* bias,
const T* gamma, const T* beta, int m, int n, hipStream_t stream)
{
assert(n < 1024);
dim3 grid(m);
dim3 block(n);
hipLaunchKernelGGL(( add_bias_input_layernorm<T>), dim3(grid), dim3(block), 0, stream,
out, input, bias, gamma, beta, m, n);
}
template <>
void add_bias_input_layernorm_kernelLauncher(
__half* out, const __half* input, const __half* bias,
const __half* gamma, const __half* beta, int m, int n, hipStream_t stream)
{
assert(n / 2 < 1024);
dim3 grid(m);
dim3 block(n / 2);
hipLaunchKernelGGL(( add_bias_input_layernorm<__half>), dim3(grid), dim3(block), 0, stream,
out, input, bias, gamma, beta, m, n);
}
template void add_bias_input_layernorm_kernelLauncher<float>(
float* out, const float* input,
const float* bias, const float* gamma, const float* beta,
int m, int n, hipStream_t stream);
template void add_bias_input_layernorm_kernelLauncher<__half>(
__half* out, const __half* input,
const __half* bias, const __half* gamma, const __half* beta,
int m, int n, hipStream_t stream);
/// *********************************** fin ***********************************
/// *********************** compresse transformer input ***********************
template <typename T>
__global__
void compress_bert_input(
const T* from_tensor, const int* mask, const int* prefix_sum,
T* to_tensor, int* batch_idx, int* word_idx,
int batch_size , int seq_len, int hidden_dim)
{
int bid = blockIdx.y; // batch
int wid = blockIdx.x; // word
int tid = threadIdx.x; //
/// 1. count pos for from tensor
int mask_idx = bid * seq_len + wid;
if (mask[mask_idx] > 0.5) {
int valid_idx = prefix_sum[mask_idx];
/// 2. wirte batch id and word id for each word
if (tid == 0) {
batch_idx[valid_idx] = bid;
word_idx[valid_idx] = wid;
}
/// 3. copy src data
float* src_ptr = (float*)from_tensor;
float* dst_ptr = (float*)to_tensor;
int src_idx = mask_idx * hidden_dim + tid;
int dst_idx = valid_idx * hidden_dim + tid;
dst_ptr[dst_idx] = src_ptr[src_idx];
}
}
template <>
__global__
void compress_bert_input(
const __half* from_tensor, const int* mask, const int* prefix_sum,
__half* to_tensor, int* batch_idx, int* word_idx,
int batch_size , int seq_len, int hidden_dim)
{
int bid = blockIdx.y; // batch
int wid = blockIdx.x; // word
int tid = threadIdx.x; //
/// 1. count pos for from tensor
int mask_idx = bid * seq_len + wid;
if (mask[mask_idx] > 0.5) {
int valid_idx = prefix_sum[mask_idx];
/// 2. wirte batch id and word id for each word
if (tid == 0) {
batch_idx[valid_idx] = bid;
word_idx[valid_idx] = wid;
}
/// 3. copy src data
half2* src_ptr = (half2*)from_tensor;
half2* dst_ptr = (half2*)to_tensor;
int src_idx = mask_idx * hidden_dim + tid;
int dst_idx = valid_idx * hidden_dim + tid;
dst_ptr[dst_idx] = src_ptr[src_idx];
}
}
template<typename T>
void compressBertInput_kernelLauncher(
const T* from_tensor, const int* mask, const int* prefix_sum,
T* to_tensor, int* batch_idx, int* word_idx,
int batch_size , int seq_len, int hidden_dim, hipStream_t stream)
{
/// TODO : fp32
dim3 grid(seq_len, batch_size);
dim3 block(hidden_dim);
// dim3 block(1);
assert(hidden_dim <= 1024);
hipLaunchKernelGGL(( compress_bert_input), dim3(grid), dim3(block), 0, stream,
from_tensor, mask, prefix_sum,
to_tensor, batch_idx, word_idx,
batch_size , seq_len, hidden_dim);
return;
}
template<>
void compressBertInput_kernelLauncher(
const __half* from_tensor, const int* mask, const int* prefix_sum,
__half* to_tensor, int* batch_idx, int* word_idx,
int batch_size , int seq_len, int hidden_dim, hipStream_t stream)
{
dim3 grid(seq_len, batch_size);
dim3 block(hidden_dim / 2);
// dim3 block(1);
assert(hidden_dim <= 1024 / 2);
hipLaunchKernelGGL(( compress_bert_input), dim3(grid), dim3(block), 0, stream,
from_tensor, mask, prefix_sum,
to_tensor, batch_idx, word_idx,
batch_size , seq_len, hidden_dim / 2);
}
template void compressBertInput_kernelLauncher<float>(
const float* from_tensor, const int* mask, const int* prefix_sum,
float* to_tensor, int* batch_idx, int* word_idx,
int batch_size , int seq_len, int hidden_dim, hipStream_t stream);
template void compressBertInput_kernelLauncher<__half>(
const __half* from_tensor, const int* mask, const int* prefix_sum,
__half* to_tensor, int* batch_idx, int* word_idx,
int batch_size , int seq_len, int hidden_dim, hipStream_t stream);
/// *********************************** fin ***********************************
/// *********************** restore transformer output ************************
template<typename T>
__global__
void restore_bert_output(
T* to_tensor,
const T* from_tensor, const int* batch_idx, const int* word_idx,
int valid_word_num, int seq_len, int hidden_dim)
{
int bid = batch_idx[blockIdx.x];
int wid = word_idx[blockIdx.x];
int tid = threadIdx.x;
int vid = blockIdx.x;
/// 3. copy src data
float* src_ptr = (float*)from_tensor;
float* dst_ptr = (float*)to_tensor;
int src_idx = vid * hidden_dim + tid;
int dst_idx = (bid * seq_len + wid) * hidden_dim + tid;
dst_ptr[dst_idx] = src_ptr[src_idx];
}
template <>
__global__
void restore_bert_output(
__half* to_tensor,
const __half* from_tensor, const int* batch_idx, const int* word_idx,
int valid_word_num, int seq_len, int hidden_dim)
{
int bid = batch_idx[blockIdx.x];
int wid = word_idx[blockIdx.x];
int tid = threadIdx.x;
int vid = blockIdx.x;
/// 3. copy src data
half2* src_ptr = (half2*)from_tensor;
half2* dst_ptr = (half2*)to_tensor;
int src_idx = vid * hidden_dim + tid;
int dst_idx = (bid * seq_len + wid) * hidden_dim + tid;
dst_ptr[dst_idx] = src_ptr[src_idx];
}
template<typename T>
void restoreBertOutput_kernelLauncher(
T* to_tensor,
const T* from_tensor, const int* batch_idx, const int* word_idx,
int valid_word_num, int seq_len, int hidden_dim, hipStream_t stream)
{
// TODO : fp32
dim3 grid(valid_word_num);
dim3 block(hidden_dim);
assert(hidden_dim <= 1024);
hipLaunchKernelGGL(( restore_bert_output), dim3(grid), dim3(block), 0, stream,
to_tensor,
from_tensor, batch_idx, word_idx,
valid_word_num, seq_len, hidden_dim);
}
template<>
void restoreBertOutput_kernelLauncher(
__half* to_tensor,
const __half* from_tensor, const int* batch_idx, const int* word_idx,
int valid_word_num, int seq_len, int hidden_dim, hipStream_t stream)
{
dim3 grid(valid_word_num);
dim3 block(hidden_dim / 2);
assert(hidden_dim <= 1024 / 2);
hipLaunchKernelGGL(( restore_bert_output), dim3(grid), dim3(block), 0, stream,
to_tensor,
from_tensor, batch_idx, word_idx,
valid_word_num, seq_len, hidden_dim / 2);
}
template void restoreBertOutput_kernelLauncher<float>(
float* to_tensor,
const float* from_tensor, const int* batch_idx, const int* word_idx,
int valid_word_num, int seq_len, int hidden_dim, hipStream_t stream);
template void restoreBertOutput_kernelLauncher<__half>(
__half* to_tensor,
const __half* from_tensor, const int* batch_idx, const int* word_idx,
int valid_word_num, int seq_len, int hidden_dim, hipStream_t stream);
/// *********************************** fin ***********************************
/// ***************************** exclusive scan ******************************
// The scan code is rewritten based on this repo :
// https://github.com/mattdean1/cuda/tree/master/parallel-scan
// I only rewritted device memory allocation part.
int THREADS_PER_BLOCK = 512;
int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * 2;
#define SHARED_MEMORY_BANKS 32
#define LOG_MEM_BANKS 5
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_MEM_BANKS)
__global__ void prescan_large(int *output, const int *input, int n, int *sums)
{
extern __shared__ int temp[];
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * n;
int ai = threadID;
int bi = threadID + (n / 2);
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
temp[ai + bankOffsetA] = input[blockOffset + ai];
temp[bi + bankOffsetB] = input[blockOffset + bi];
int offset = 1;
for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
__syncthreads();
if (threadID == 0) {
sums[blockID] = temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)];
temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;
}
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
output[blockOffset + ai] = temp[ai + bankOffsetA];
output[blockOffset + bi] = temp[bi + bankOffsetB];
}
__global__ void prescan_arbitrary(
int *output, const int *input, int n, int powerOfTwo)
{
extern __shared__ int temp[];// allocated on invocation
int threadID = threadIdx.x;
int ai = threadID;
int bi = threadID + (n / 2);
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
if (threadID < n) {
temp[ai + bankOffsetA] = input[ai];
temp[bi + bankOffsetB] = input[bi];
}
else {
temp[ai + bankOffsetA] = 0;
temp[bi + bankOffsetB] = 0;
}
int offset = 1;
// build sum in place up the tree
for (int d = powerOfTwo >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (threadID == 0) {
// clear the last element
temp[powerOfTwo - 1 + CONFLICT_FREE_OFFSET(powerOfTwo - 1)] = 0;
}
for (int d = 1; d < powerOfTwo; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if (threadID < n) {
output[ai] = temp[ai + bankOffsetA];
output[bi] = temp[bi + bankOffsetB];
}
}
__global__ void add(int *output, int length, int *n) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * length;
output[blockOffset + threadID] += n[blockID];
}
__global__ void add(int *output, int length, const int *n1, const int *n2) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * length;
output[blockOffset + threadID] += n1[blockID] + n2[blockID];
}
// from https://stackoverflow.com/a/12506181
int nextPowerOfTwo(int x) {
int power = 1;
while (power < x) {
power *= 2;
}
return power;
}
void scanSmallDeviceArray(
int *d_out, const int* d_in, const int length, const hipStream_t stream);
void scanLargeDeviceArray(
int *d_out, const int* d_in, const int length, int *d_buf,
const hipStream_t stream);
void scanLargeEvenDeviceArray(
int *d_out, const int* d_in, const int length, int *d_buf,
const hipStream_t stream);
void scanLargeEvenDeviceArray(
int *d_out, const int* d_in, const int length, int *d_buf,
const hipStream_t stream)
{
const int blocks = length / ELEMENTS_PER_BLOCK;
const int sharedMemArraySize = ELEMENTS_PER_BLOCK * sizeof(int);
int *d_sums = d_buf;
int *d_incr = d_buf + blocks;
// hipMalloc((void **)&d_sums, blocks * sizeof(int));
// hipMalloc((void **)&d_incr, blocks * sizeof(int));
hipLaunchKernelGGL(( prescan_large), dim3(blocks), dim3(THREADS_PER_BLOCK), 2 * sharedMemArraySize, stream,
d_out, d_in, ELEMENTS_PER_BLOCK, d_sums);
const int sumsArrThreadsNeeded = (blocks + 1) / 2;
if (sumsArrThreadsNeeded > THREADS_PER_BLOCK) {
// perform a large scan on the sums arr
scanLargeDeviceArray(d_incr, d_sums, blocks, d_buf, stream);
}
else {
// only need one block to scan sums arr so can use small scan
scanSmallDeviceArray(d_incr, d_sums, blocks, stream);
}
hipLaunchKernelGGL(( add), dim3(blocks), dim3(ELEMENTS_PER_BLOCK), 0, stream,
d_out, ELEMENTS_PER_BLOCK, d_incr);
}
void scanSmallDeviceArray(
int *d_out, const int* d_in, const int length, const hipStream_t stream)
{
int powerOfTwo = nextPowerOfTwo(length);
hipLaunchKernelGGL(( prescan_arbitrary)
, dim3(1), dim3((length + 1) / 2), 2 * powerOfTwo * sizeof(int), stream ,
d_out, d_in, length, powerOfTwo);
}
///
void scanLargeDeviceArray(
int *d_out, const int* d_in, const int length, int *d_buf,
const hipStream_t stream)
{
int remainder = length % (ELEMENTS_PER_BLOCK);
if (remainder == 0) {
scanLargeEvenDeviceArray(d_out, d_in, length, d_buf, stream);
}
else {
// perform a large scan on a compatible multiple of elements
int lengthMultiple = length - remainder;
scanLargeEvenDeviceArray(d_out, d_in, lengthMultiple, d_buf, stream);
// scan the remaining elements and add the (inclusive)
// last element of the large scan to this
int *startOfOutputArray = &(d_out[lengthMultiple]);
scanSmallDeviceArray(
startOfOutputArray, &(d_in[lengthMultiple]), remainder, stream);
hipLaunchKernelGGL(( add), dim3(1), dim3(remainder), 0, stream,
startOfOutputArray, remainder, &(d_in[lengthMultiple - 1]),
&(d_out[lengthMultiple - 1]));
}
}
void exclusiveScan_kernelLauncher(
int* d_out, const int* d_in, const int length, const hipStream_t stream)
{
if (length > ELEMENTS_PER_BLOCK) {
scanLargeDeviceArray(d_out, d_in, length, d_out + length, stream);
}
else {
scanSmallDeviceArray(d_out, d_in, length, stream);
}
}
/// *********************************** fin ***********************************
}//namespace effectivetransformer
| 8c2d924a5afa8c2445a5878993cb05cb97ba56a2.cu | /*
* Copyright (C) 2020 ByteDance Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cuda_kernels.h"
#include <assert.h>
#include <cstdio>
#include <cstdlib>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <cuda_fp16.h>
namespace effectivetransformer{
// gelu code from
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v1/fastertransformer/cuda/cuda_kernels.cu#L26-L45
template <typename T>
__inline__ __device__
T gelu(T x)
{
float cdf = 0.5f *
(1.0f + tanhf((0.7978845608028654f * (x + 0.044715f * x * x * x))));
return x * cdf;
}
template <>
__inline__ __device__
half2 gelu(half2 val)
{
half2 val_pow3 = __hmul2(val, __hmul2(val, val));
float2 tmp_pow = __half22float2(val_pow3);
float2 tmp = __half22float2(val);
tmp.x = 0.5f *
(1.0f + tanhf((0.7978845608028654f * (tmp.x + 0.044715f * tmp_pow.x))));
tmp.y = 0.5f *
(1.0f + tanhf((0.7978845608028654f * (tmp.y + 0.044715f * tmp_pow.y))));
return __hmul2(val, __float22half2_rn(tmp));
}
// reduce code from
// https://github.com/NVIDIA/DeepLearningExamples/blob/master/FasterTransformer/v1/fastertransformer/cuda/cuda_kernels.cu#L47-L73
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)0.0f;
val = warpReduceSum(val);
return val;
}
/// ***************************** add_bias + gelu *****************************
template <typename T>
__global__
void add_bias_act(T* out, const T* bias, int m, int n)
{
T val, reg_bias;
int row_id = blockIdx.x;
int ite = n / blockDim.x;
int tid = threadIdx.x;
for(int i = 0; i < ite; ++i)
{
reg_bias = __ldg(&bias[i * blockDim.x + tid]);
row_id = blockIdx.x;
while(row_id < m){
val = out[tid + i * blockDim.x + row_id * n]+ reg_bias;
out[tid + i * blockDim.x + row_id * n] = gelu<T>(val);
row_id += gridDim.x;
}
}
}
template <>
__global__
void add_bias_act(__half* out, const __half* bias, int m, int n)
{
half2 val, reg_bias;
int row_id = blockIdx.x;
int ite = n / blockDim.x / 2;
int tid = threadIdx.x;
half2* out_ptr = (half2*) out;
const half2* bias_ptr = (half2*) bias;
for(int i = 0; i < ite; ++i)
{
reg_bias = __ldg(&bias_ptr[i * blockDim.x + tid]);
row_id = blockIdx.x;
while(row_id < m){
val = out_ptr[tid + i * blockDim.x + row_id * n / 2];
val = __hadd2(val, reg_bias);
out_ptr[tid + i * blockDim.x + row_id * n / 2] = gelu<half2>(val);
row_id += gridDim.x;
}
}
}
template <typename T>
void add_bias_act_kernelLauncher(
T* out, const T* bias, int m, int n, cudaStream_t stream)
{
dim3 grid(max(m / 4, 1));
dim3 block(n / 4);
assert(block.x < 1024);
add_bias_act<T><<<grid, block, 0, stream>>>(out, bias, m, n);
}
template void add_bias_act_kernelLauncher<__half>(
__half* out, const __half* bias, int m, int n, cudaStream_t stream);
template void add_bias_act_kernelLauncher<float>(
float* out, const float* bias, int m, int n, cudaStream_t stream);
/// *********************************** fin ***********************************
/// ************************** add_bias + layer_norm **************************
template <typename T>
__global__
void add_bias_input_layernorm(
T* out, const T* input, const T* bias, const T* gamma,
const T* beta, int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float local_out = 0.0f;
for(int i = tid; i < n; i += blockDim.x)
local_out += (float)(out[blockIdx.x * n + i]
+ input[blockIdx.x * n + i] + __ldg(&bias[i]));
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = blockReduceSum<float>((
local_out - s_mean) * (local_out - s_mean));
if(threadIdx.x == 0)
s_variance = variance / n + 1e-6f;
__syncthreads();
for(int i = tid; i < n; i += blockDim.x)
out[blockIdx.x * n + i] =
(T)(((local_out - s_mean) * rsqrtf(s_variance))
* (float)(__ldg(&gamma[i])) + (float)(__ldg(&beta[i])));
}
template <>
__global__
void add_bias_input_layernorm(
__half* out, const __half* input, const __half* bias,
const __half* gamma, const __half* beta, int m, int n)
{
int tid = threadIdx.x;
__shared__ float s_mean;
__shared__ float s_variance;
float mean = 0.0f;
float variance = 0.0f;
float2 local_out_fp2;
half2* out_ptr = (half2*)out;
const half2* input_ptr = (const half2*)input;
const half2* bias_ptr = (const half2*)bias;
const half2* gamma_ptr = (const half2*)gamma;
const half2* beta_ptr = (const half2*)beta;
float local_out = 0.0f;
int id = blockIdx.x * n / 2 + tid;
local_out_fp2 = __half22float2(
__hadd2(__hadd2(out_ptr[id], input_ptr[id]), __ldg(&bias_ptr[tid])));
local_out += local_out_fp2.x;
local_out += local_out_fp2.y;
mean = blockReduceSum<float>(local_out);
if(threadIdx.x == 0)
s_mean = mean / n;
__syncthreads();
variance = (local_out_fp2.x - s_mean) * (local_out_fp2.x - s_mean);
variance += (local_out_fp2.y - s_mean) * (local_out_fp2.y - s_mean);
variance = blockReduceSum<float>(variance);
if(threadIdx.x == 0)
s_variance = rsqrtf(variance / n + 1e-6f);
__syncthreads();
float2 gamma_val = __half22float2(__ldg(&gamma_ptr[tid]));
float2 beta_val = __half22float2(__ldg(&beta_ptr[tid]));
local_out_fp2.x =
(local_out_fp2.x - s_mean) * s_variance * gamma_val.x + beta_val.x;
local_out_fp2.y =
(local_out_fp2.y - s_mean) * s_variance * gamma_val.y + beta_val.y;
out_ptr[id] = __float22half2_rn(local_out_fp2);
}
template<typename T>
void add_bias_input_layernorm_kernelLauncher(
T* out, const T* input, const T* bias,
const T* gamma, const T* beta, int m, int n, cudaStream_t stream)
{
assert(n < 1024);
dim3 grid(m);
dim3 block(n);
add_bias_input_layernorm<T><<<grid, block, 0, stream>>>(
out, input, bias, gamma, beta, m, n);
}
template <>
void add_bias_input_layernorm_kernelLauncher(
__half* out, const __half* input, const __half* bias,
const __half* gamma, const __half* beta, int m, int n, cudaStream_t stream)
{
assert(n / 2 < 1024);
dim3 grid(m);
dim3 block(n / 2);
add_bias_input_layernorm<__half><<<grid, block, 0, stream>>>(
out, input, bias, gamma, beta, m, n);
}
template void add_bias_input_layernorm_kernelLauncher<float>(
float* out, const float* input,
const float* bias, const float* gamma, const float* beta,
int m, int n, cudaStream_t stream);
template void add_bias_input_layernorm_kernelLauncher<__half>(
__half* out, const __half* input,
const __half* bias, const __half* gamma, const __half* beta,
int m, int n, cudaStream_t stream);
/// *********************************** fin ***********************************
/// *********************** compresse transformer input ***********************
template <typename T>
__global__
void compress_bert_input(
const T* from_tensor, const int* mask, const int* prefix_sum,
T* to_tensor, int* batch_idx, int* word_idx,
int batch_size , int seq_len, int hidden_dim)
{
int bid = blockIdx.y; // batch
int wid = blockIdx.x; // word
int tid = threadIdx.x; //
/// 1. count pos for from tensor
int mask_idx = bid * seq_len + wid;
if (mask[mask_idx] > 0.5) {
int valid_idx = prefix_sum[mask_idx];
/// 2. wirte batch id and word id for each word
if (tid == 0) {
batch_idx[valid_idx] = bid;
word_idx[valid_idx] = wid;
}
/// 3. copy src data
float* src_ptr = (float*)from_tensor;
float* dst_ptr = (float*)to_tensor;
int src_idx = mask_idx * hidden_dim + tid;
int dst_idx = valid_idx * hidden_dim + tid;
dst_ptr[dst_idx] = src_ptr[src_idx];
}
}
template <>
__global__
void compress_bert_input(
const __half* from_tensor, const int* mask, const int* prefix_sum,
__half* to_tensor, int* batch_idx, int* word_idx,
int batch_size , int seq_len, int hidden_dim)
{
int bid = blockIdx.y; // batch
int wid = blockIdx.x; // word
int tid = threadIdx.x; //
/// 1. count pos for from tensor
int mask_idx = bid * seq_len + wid;
if (mask[mask_idx] > 0.5) {
int valid_idx = prefix_sum[mask_idx];
/// 2. wirte batch id and word id for each word
if (tid == 0) {
batch_idx[valid_idx] = bid;
word_idx[valid_idx] = wid;
}
/// 3. copy src data
half2* src_ptr = (half2*)from_tensor;
half2* dst_ptr = (half2*)to_tensor;
int src_idx = mask_idx * hidden_dim + tid;
int dst_idx = valid_idx * hidden_dim + tid;
dst_ptr[dst_idx] = src_ptr[src_idx];
}
}
template<typename T>
void compressBertInput_kernelLauncher(
const T* from_tensor, const int* mask, const int* prefix_sum,
T* to_tensor, int* batch_idx, int* word_idx,
int batch_size , int seq_len, int hidden_dim, cudaStream_t stream)
{
/// TODO : fp32
dim3 grid(seq_len, batch_size);
dim3 block(hidden_dim);
// dim3 block(1);
assert(hidden_dim <= 1024);
compress_bert_input<<<grid, block, 0, stream>>>(
from_tensor, mask, prefix_sum,
to_tensor, batch_idx, word_idx,
batch_size , seq_len, hidden_dim);
return;
}
template<>
void compressBertInput_kernelLauncher(
const __half* from_tensor, const int* mask, const int* prefix_sum,
__half* to_tensor, int* batch_idx, int* word_idx,
int batch_size , int seq_len, int hidden_dim, cudaStream_t stream)
{
dim3 grid(seq_len, batch_size);
dim3 block(hidden_dim / 2);
// dim3 block(1);
assert(hidden_dim <= 1024 / 2);
compress_bert_input<<<grid, block, 0, stream>>>(
from_tensor, mask, prefix_sum,
to_tensor, batch_idx, word_idx,
batch_size , seq_len, hidden_dim / 2);
}
template void compressBertInput_kernelLauncher<float>(
const float* from_tensor, const int* mask, const int* prefix_sum,
float* to_tensor, int* batch_idx, int* word_idx,
int batch_size , int seq_len, int hidden_dim, cudaStream_t stream);
template void compressBertInput_kernelLauncher<__half>(
const __half* from_tensor, const int* mask, const int* prefix_sum,
__half* to_tensor, int* batch_idx, int* word_idx,
int batch_size , int seq_len, int hidden_dim, cudaStream_t stream);
/// *********************************** fin ***********************************
/// *********************** restore transformer output ************************
template<typename T>
__global__
void restore_bert_output(
T* to_tensor,
const T* from_tensor, const int* batch_idx, const int* word_idx,
int valid_word_num, int seq_len, int hidden_dim)
{
int bid = batch_idx[blockIdx.x];
int wid = word_idx[blockIdx.x];
int tid = threadIdx.x;
int vid = blockIdx.x;
/// 3. copy src data
float* src_ptr = (float*)from_tensor;
float* dst_ptr = (float*)to_tensor;
int src_idx = vid * hidden_dim + tid;
int dst_idx = (bid * seq_len + wid) * hidden_dim + tid;
dst_ptr[dst_idx] = src_ptr[src_idx];
}
template <>
__global__
void restore_bert_output(
__half* to_tensor,
const __half* from_tensor, const int* batch_idx, const int* word_idx,
int valid_word_num, int seq_len, int hidden_dim)
{
int bid = batch_idx[blockIdx.x];
int wid = word_idx[blockIdx.x];
int tid = threadIdx.x;
int vid = blockIdx.x;
/// 3. copy src data
half2* src_ptr = (half2*)from_tensor;
half2* dst_ptr = (half2*)to_tensor;
int src_idx = vid * hidden_dim + tid;
int dst_idx = (bid * seq_len + wid) * hidden_dim + tid;
dst_ptr[dst_idx] = src_ptr[src_idx];
}
template<typename T>
void restoreBertOutput_kernelLauncher(
T* to_tensor,
const T* from_tensor, const int* batch_idx, const int* word_idx,
int valid_word_num, int seq_len, int hidden_dim, cudaStream_t stream)
{
// TODO : fp32
dim3 grid(valid_word_num);
dim3 block(hidden_dim);
assert(hidden_dim <= 1024);
restore_bert_output<<<grid, block, 0, stream>>>(
to_tensor,
from_tensor, batch_idx, word_idx,
valid_word_num, seq_len, hidden_dim);
}
template<>
void restoreBertOutput_kernelLauncher(
__half* to_tensor,
const __half* from_tensor, const int* batch_idx, const int* word_idx,
int valid_word_num, int seq_len, int hidden_dim, cudaStream_t stream)
{
dim3 grid(valid_word_num);
dim3 block(hidden_dim / 2);
assert(hidden_dim <= 1024 / 2);
restore_bert_output<<<grid, block, 0, stream>>>(
to_tensor,
from_tensor, batch_idx, word_idx,
valid_word_num, seq_len, hidden_dim / 2);
}
template void restoreBertOutput_kernelLauncher<float>(
float* to_tensor,
const float* from_tensor, const int* batch_idx, const int* word_idx,
int valid_word_num, int seq_len, int hidden_dim, cudaStream_t stream);
template void restoreBertOutput_kernelLauncher<__half>(
__half* to_tensor,
const __half* from_tensor, const int* batch_idx, const int* word_idx,
int valid_word_num, int seq_len, int hidden_dim, cudaStream_t stream);
/// *********************************** fin ***********************************
/// ***************************** exclusive scan ******************************
// The scan code is rewritten based on this repo :
// https://github.com/mattdean1/cuda/tree/master/parallel-scan
// I only rewritted device memory allocation part.
int THREADS_PER_BLOCK = 512;
int ELEMENTS_PER_BLOCK = THREADS_PER_BLOCK * 2;
#define SHARED_MEMORY_BANKS 32
#define LOG_MEM_BANKS 5
#define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_MEM_BANKS)
__global__ void prescan_large(int *output, const int *input, int n, int *sums)
{
extern __shared__ int temp[];
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * n;
int ai = threadID;
int bi = threadID + (n / 2);
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
temp[ai + bankOffsetA] = input[blockOffset + ai];
temp[bi + bankOffsetB] = input[blockOffset + bi];
int offset = 1;
for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
__syncthreads();
if (threadID == 0) {
sums[blockID] = temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)];
temp[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;
}
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
output[blockOffset + ai] = temp[ai + bankOffsetA];
output[blockOffset + bi] = temp[bi + bankOffsetB];
}
__global__ void prescan_arbitrary(
int *output, const int *input, int n, int powerOfTwo)
{
extern __shared__ int temp[];// allocated on invocation
int threadID = threadIdx.x;
int ai = threadID;
int bi = threadID + (n / 2);
int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
if (threadID < n) {
temp[ai + bankOffsetA] = input[ai];
temp[bi + bankOffsetB] = input[bi];
}
else {
temp[ai + bankOffsetA] = 0;
temp[bi + bankOffsetB] = 0;
}
int offset = 1;
// build sum in place up the tree
for (int d = powerOfTwo >> 1; d > 0; d >>= 1)
{
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
temp[bi] += temp[ai];
}
offset *= 2;
}
if (threadID == 0) {
// clear the last element
temp[powerOfTwo - 1 + CONFLICT_FREE_OFFSET(powerOfTwo - 1)] = 0;
}
for (int d = 1; d < powerOfTwo; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if (threadID < n) {
output[ai] = temp[ai + bankOffsetA];
output[bi] = temp[bi + bankOffsetB];
}
}
__global__ void add(int *output, int length, int *n) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * length;
output[blockOffset + threadID] += n[blockID];
}
__global__ void add(int *output, int length, const int *n1, const int *n2) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * length;
output[blockOffset + threadID] += n1[blockID] + n2[blockID];
}
// from https://stackoverflow.com/a/12506181
int nextPowerOfTwo(int x) {
int power = 1;
while (power < x) {
power *= 2;
}
return power;
}
void scanSmallDeviceArray(
int *d_out, const int* d_in, const int length, const cudaStream_t stream);
void scanLargeDeviceArray(
int *d_out, const int* d_in, const int length, int *d_buf,
const cudaStream_t stream);
void scanLargeEvenDeviceArray(
int *d_out, const int* d_in, const int length, int *d_buf,
const cudaStream_t stream);
void scanLargeEvenDeviceArray(
int *d_out, const int* d_in, const int length, int *d_buf,
const cudaStream_t stream)
{
const int blocks = length / ELEMENTS_PER_BLOCK;
const int sharedMemArraySize = ELEMENTS_PER_BLOCK * sizeof(int);
int *d_sums = d_buf;
int *d_incr = d_buf + blocks;
// cudaMalloc((void **)&d_sums, blocks * sizeof(int));
// cudaMalloc((void **)&d_incr, blocks * sizeof(int));
prescan_large<<<blocks, THREADS_PER_BLOCK, 2 * sharedMemArraySize, stream>>>(
d_out, d_in, ELEMENTS_PER_BLOCK, d_sums);
const int sumsArrThreadsNeeded = (blocks + 1) / 2;
if (sumsArrThreadsNeeded > THREADS_PER_BLOCK) {
// perform a large scan on the sums arr
scanLargeDeviceArray(d_incr, d_sums, blocks, d_buf, stream);
}
else {
// only need one block to scan sums arr so can use small scan
scanSmallDeviceArray(d_incr, d_sums, blocks, stream);
}
add<<<blocks, ELEMENTS_PER_BLOCK, 0, stream>>>(
d_out, ELEMENTS_PER_BLOCK, d_incr);
}
void scanSmallDeviceArray(
int *d_out, const int* d_in, const int length, const cudaStream_t stream)
{
int powerOfTwo = nextPowerOfTwo(length);
prescan_arbitrary
<<<1, (length + 1) / 2, 2 * powerOfTwo * sizeof(int), stream >>>(
d_out, d_in, length, powerOfTwo);
}
///
void scanLargeDeviceArray(
int *d_out, const int* d_in, const int length, int *d_buf,
const cudaStream_t stream)
{
int remainder = length % (ELEMENTS_PER_BLOCK);
if (remainder == 0) {
scanLargeEvenDeviceArray(d_out, d_in, length, d_buf, stream);
}
else {
// perform a large scan on a compatible multiple of elements
int lengthMultiple = length - remainder;
scanLargeEvenDeviceArray(d_out, d_in, lengthMultiple, d_buf, stream);
// scan the remaining elements and add the (inclusive)
// last element of the large scan to this
int *startOfOutputArray = &(d_out[lengthMultiple]);
scanSmallDeviceArray(
startOfOutputArray, &(d_in[lengthMultiple]), remainder, stream);
add<<<1, remainder, 0, stream>>>(
startOfOutputArray, remainder, &(d_in[lengthMultiple - 1]),
&(d_out[lengthMultiple - 1]));
}
}
void exclusiveScan_kernelLauncher(
int* d_out, const int* d_in, const int length, const cudaStream_t stream)
{
if (length > ELEMENTS_PER_BLOCK) {
scanLargeDeviceArray(d_out, d_in, length, d_out + length, stream);
}
else {
scanSmallDeviceArray(d_out, d_in, length, stream);
}
}
/// *********************************** fin ***********************************
}//namespace effectivetransformer
|
4dbc821828c99b24a3091e8999cb2d35b718c6f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2018 Wei Dai <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <include/bootstrap_gpu.cuh>
#include <include/gatebootstrapping_gpu.cuh>
#include <include/keyswitch_gpu.cuh>
#include <include/cufhe_gpu.cuh>
#include <include/details/error_gpu.cuh>
#include <include/ntt_gpu/ntt.cuh>
#include <limits>
#include <vector>
#include <algorithm>
namespace cufhe {
template<class P = TFHEpp::lvl1param>
constexpr uint MEM4HOMGATE =
((P::k+1) * P::l + 1 + P::k) * P::n * sizeof(FFP);
using namespace std;
using namespace TFHEpp;
vector<FFP*> bk_ntts;
vector<CuNTTHandler<>*> ntt_handlers;
template<class P = TFHEpp::lvl1param>
__global__ void __TRGSW2NTT__(FFP* const bk_ntt, const typename P::T* const bk,
CuNTTHandler<> ntt)
{
__shared__ FFP sh_temp[P::n];
const int index = blockIdx.z * ((P::k+1) * P::l * (P::k+1) * P::n) +
blockIdx.y * (P::k+1) * P::n + blockIdx.x * P::n;
ntt.NTT<typename P::T>(&bk_ntt[index], &bk[index], sh_temp, 0);
}
void TRGSW2NTT(cuFHETRGSWNTTlvl1& trgswntt,
const TFHEpp::TRGSW<TFHEpp::lvl1param>& trgsw, Stream& st)
{
hipSetDevice(st.device_id());
TFHEpp::lvl1param::T* d_trgsw;
hipMalloc((void**)&d_trgsw, sizeof(trgsw));
hipMemcpyAsync(d_trgsw, trgsw.data(), sizeof(trgsw),
hipMemcpyHostToDevice, st.st());
dim3 grid(lvl1param::k+1, (lvl1param::k+1) * lvl1param::l, 1);
dim3 block(lvl1param::n >> NTT_THREAD_UNITBIT);
hipLaunchKernelGGL(( __TRGSW2NTT__), dim3(grid), dim3(block), 0, st.st(),
trgswntt.trgswdevices[st.device_id()], d_trgsw,
*ntt_handlers[st.device_id()]);
CuCheckError();
hipMemcpyAsync(
trgswntt.trgswhost.data(), trgswntt.trgswdevices[st.device_id()],
sizeof(trgswntt.trgswhost), hipMemcpyDeviceToHost, st.st());
hipFree(d_trgsw);
}
void InitializeNTThandlers(const int gpuNum)
{
for (int i = 0; i < gpuNum; i++) {
hipSetDevice(i);
ntt_handlers.push_back(new CuNTTHandler<>());
ntt_handlers[i]->Create();
ntt_handlers[i]->CreateConstant();
hipDeviceSynchronize();
CuCheckError();
}
}
template<class P>
void BootstrappingKeyToNTT(const BootstrappingKey<P>& bk,
const int gpuNum)
{
bk_ntts.resize(gpuNum);
for (int i = 0; i < gpuNum; i++) {
hipSetDevice(i);
hipMalloc((void**)&bk_ntts[i], sizeof(FFP) * P::domainP::n * 2 *
P::targetP::l * 2 * P::targetP::n);
typename P::targetP::T* d_bk;
hipMalloc((void**)&d_bk, sizeof(bk));
hipMemcpy(d_bk, bk.data(), sizeof(bk), hipMemcpyHostToDevice);
hipDeviceSynchronize();
CuCheckError();
dim3 grid(P::targetP::k+1, (P::targetP::k+1) * P::targetP::l, P::domainP::n);
dim3 block(P::targetP::n >> NTT_THREAD_UNITBIT);
hipLaunchKernelGGL(( __TRGSW2NTT__), dim3(grid), dim3(block), 0, 0, bk_ntts[i], d_bk, *ntt_handlers[i]);
hipDeviceSynchronize();
CuCheckError();
hipFree(d_bk);
}
}
#define INST(P) \
template void BootstrappingKeyToNTT<P>(const BootstrappingKey<P>& bk, \
const int gpuNum)
INST(TFHEpp::lvl01param);
#undef INST
void DeleteBootstrappingKeyNTT(const int gpuNum)
{
for (int i = 0; i < bk_ntts.size(); i++) {
hipSetDevice(i);
hipFree(bk_ntts[i]);
ntt_handlers[i]->Destroy();
delete ntt_handlers[i];
}
ntt_handlers.clear();
}
__device__ inline void TRLWESubAndDecomposition(
FFP* const dectrlwe, const TFHEpp::lvl1param::T* const trlwe1,
const TFHEpp::lvl1param::T* const trlwe0)
{
const uint32_t tid = ThisThreadRankInBlock();
const uint32_t bdim = ThisBlockSize();
constexpr uint32_t decomp_mask = (1 << lvl1param::Bgbit) - 1;
constexpr int32_t decomp_half = 1 << (lvl1param::Bgbit - 1);
constexpr uint32_t decomp_offset = offsetgen<lvl1param>();
constexpr typename lvl1param::T roundoffset =
1ULL << (std::numeric_limits<typename lvl1param::T>::digits -
lvl1param::l * lvl1param::Bgbit - 1);
#pragma unroll
for (int i = tid; i < lvl1param::n; i += bdim) {
#pragma unroll
for (int j = 0; j < (lvl1param::k+1); j++) {
// decomp temp
lvl1param::T temp = trlwe1[j * lvl1param::n + i] -
trlwe0[j * lvl1param::n + i] + decomp_offset +
roundoffset;
#pragma unroll
for (int digit = 0; digit < lvl1param::l; digit += 1)
dectrlwe[j * lvl1param::l * lvl1param::n +
digit * lvl1param::n + i] =
FFP(lvl1param::T(
((temp >>
(std::numeric_limits<typename lvl1param::T>::digits -
(digit + 1) * lvl1param::Bgbit)) &
decomp_mask) -
decomp_half));
}
}
__syncthreads(); // must
}
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<TFHEpp::lvl1param>) void __CMUXNTT__(
TFHEpp::lvl1param::T* out, const FFP* const tgsw_ntt,
const TFHEpp::lvl1param::T* const trlwe1,
const TFHEpp::lvl1param::T* const trlwe0, const CuNTTHandler<> ntt)
{
const uint32_t tid = ThisThreadRankInBlock();
const uint32_t bdim = ThisBlockSize();
extern __shared__ FFP sh[];
// To hold the data after Decomposition and NTT
FFP* sh_acc_ntt = &sh[0];
// To hold sum
FFP* sh_res_ntt = &sh[(lvl1param::k+1) * lvl1param::l * lvl1param::n];
TFHEpp::lvl1param::T* outtemp = (TFHEpp::lvl1param::T*)&sh[0];
TRLWESubAndDecomposition(sh_acc_ntt, trlwe1, trlwe0);
// (k+1)*l NTTs
// Input/output/buffer use the same shared memory location.
if (tid < (lvl1param::k+1) * lvl1param::l * (lvl1param::n >> NTT_THREAD_UNITBIT)) {
FFP* tar = &sh_acc_ntt[tid >> (lvl1param::nbit - NTT_THREAD_UNITBIT)
<< lvl1param::nbit];
ntt.NTT<FFP>(tar, tar, tar,
tid >> (lvl1param::nbit - NTT_THREAD_UNITBIT)
<< (lvl1param::nbit - NTT_THREAD_UNITBIT));
}
else { // must meet 5 sync made by NTTInv
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
}
__syncthreads();
// Multiply with bootstrapping key in global memory.
#pragma unroll
for (int i = tid; i < lvl1param::n; i += bdim) {
#pragma unroll
for(int k = 0; k < lvl1param::k+1; k++)
sh_res_ntt[i + k*lvl1param::n] = sh_acc_ntt[0 * lvl1param::n + i] *
tgsw_ntt[(((lvl1param::k+1) * 0 + k) << lvl1param::nbit) + i];
#pragma unroll
for (int digit = 1; digit < 2 * lvl1param::l; digit += 1) {
#pragma unroll
for(int k = 0; k < lvl1param::k+1; k++)
sh_res_ntt[i + k*lvl1param::n] = sh_acc_ntt[digit * lvl1param::n + i] *
tgsw_ntt[(((lvl1param::k+1) * digit + k) << lvl1param::nbit) + i];
}
}
__syncthreads();
#pragma unroll
for (int i = tid; i < (lvl1param::k+1) * lvl1param::n; i += bdim) outtemp[i] = trlwe0[i];
// k+1 NTTInvs and add acc
if (tid < (lvl1param::k+1) * (lvl1param::n >> NTT_THREAD_UNITBIT)) {
FFP* src = &sh_res_ntt[tid >> (lvl1param::nbit - NTT_THREAD_UNITBIT)
<< lvl1param::nbit];
ntt.NTTInvAdd<typename lvl1param::T>(
&outtemp[tid >> (lvl1param::nbit - NTT_THREAD_UNITBIT)
<< lvl1param::nbit],
src, src,
tid >> (lvl1param::nbit - NTT_THREAD_UNITBIT)
<< (lvl1param::nbit - NTT_THREAD_UNITBIT));
}
else { // must meet 5 sync made by NTTInv
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
}
__syncthreads(); // must
for (int i = 0; i < (lvl1param::k+1) * lvl1param::n; i++) out[i] = outtemp[i];
__syncthreads();
}
template <class bkP, class iksP>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename bkP::targetP>) void __Bootstrap__(
typename iksP::domainP::T* const out, const typename iksP::domainP::T* const in,
const typename bkP::targetP::T mu, const FFP* const bk,
const typename iksP::targetP::T* const ksk, const CuNTTHandler<> ntt)
{
__shared__ typename bkP::targetP::T tlwe[(bkP::targetP::k+1)*bkP::targetP::n];
__BlindRotate__<bkP>(tlwe,in,mu,bk,ntt);
KeySwitch<iksP>(out, tlwe, ksk);
__threadfence();
}
// template <class iksP, class bkP>
// __global__ __launch_bounds__(NUM_THREAD4HOMGATE) void __Bootstrap__(
// typename iksP::domainP::T* const out, const typename iksP::domainP::T* const in,
// const typename bkP::targetP::T mu, const FFP* const bk,
// const typename iksP::targetP::T* const ksk, const CuNTTHandler<> ntt)
// {
// __shared__ typename bkP::targetP::T tlwe[iksP::targetP::k*iksP::targetP::n+1];
// KeySwitch<iksP>(tlwe, in, ksk);
// __threadfence();
// __BlindRotate__<bkP>(out,tlwe,mu,bk,ntt);
// __threadfence();
// }
template<class P>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename P::targetP>) void __BlindRotateGlobal__(
TFHEpp::lvl1param::T* const out, const TFHEpp::lvl0param::T* const in,
const TFHEpp::lvl1param::T mu, const FFP* const bk, const CuNTTHandler<> ntt)
{
__BlindRotate__<P>(out, in, mu, bk, ntt);
}
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<TFHEpp::lvl1param>) void __SEIandBootstrap2TRLWE__(
TFHEpp::lvl1param::T* const out, const TFHEpp::lvl1param::T* const in,
const TFHEpp::lvl1param::T mu, const FFP* const bk, const TFHEpp::lvl0param::T* const ksk,
const CuNTTHandler<> ntt)
{
// Assert(bk.k() == 1);
// Assert(bk.l() == 2);
// Assert(bk.n() == lvl1param::n);
extern __shared__ FFP sh[];
FFP* sh_acc_ntt = &sh[0];
// Use Last section to hold tlwe. This may to make these data in serial
TFHEpp::lvl1param::T* tlwe =
(TFHEpp::lvl1param::T*)&sh[((lvl1param::k+1) * lvl1param::l + 2) * lvl1param::n];
lvl0param::T* tlwelvl0 =
(lvl0param::T*)&sh[((lvl1param::k+1) * lvl1param::l + 2 + lvl1param::k) * lvl1param::n];
KeySwitch<lvl10param>(tlwelvl0, in, ksk);
__syncthreads();
// test vector
// acc.a = 0; acc.b = vec(mu) * x ^ (in.b()/2048)
register uint32_t bar = 2 * lvl1param::n - modSwitchFromTorus<lvl1param>(
tlwelvl0[lvl0param::n]);
RotatedTestVector<lvl1param>(tlwe, bar, mu);
// accumulate
for (int i = 0; i < lvl0param::n; i++) { // n iterations
bar = modSwitchFromTorus<lvl1param>(tlwelvl0[i]);
Accumulate<lvl01param>(tlwe, sh_acc_ntt, bar,
bk + (i << lvl1param::nbit) * (lvl1param::k+1) * (lvl1param::k+1) * lvl1param::l, ntt);
}
__syncthreads();
for (int i = 0; i < (lvl1param::k+1) * lvl1param::n; i++) {
out[i] = tlwe[i];
}
__threadfence();
}
template<class P, uint index>
__device__ inline void __SampleExtractIndex__(typename P::T* const res, const typename P::T* const in){
const uint32_t tid = ThisThreadRankInBlock();
const uint32_t bdim = ThisBlockSize();
constexpr uint nmask = P::n-1;
for (uint i = tid; i <= P::k*P::n; i += bdim) {
if (i == P::k*P::n){
res[P::k*P::n] = in[P::k*P::n+index];
}else {
const uint k = i >> P::nbit;
const uint n = i & nmask;
if (n <= index) res[i] = in[k*P::n + index - n];
else res[i] = -in[k*P::n + P::n + index-n];
}
}
}
template <class iksP, class brP, typename brP::targetP::T , int casign, int cbsign, typename brP::domainP::T offset>
__device__ inline void __HomGate__(typename brP::targetP::T* const out,
const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const FFP* const bk,
const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__shared__ typename iksP::targetP::T tlwe[iksP::targetP::k*iksP::targetP::n+1];
IdentityKeySwitchPreAdd<iksP, casign, cbsign, offset>(tlwe, in0, in1, ksk);
__syncthreads();
__shared__ typename brP::targetP::T trlwe[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotate__<brP>(trlwe, tlwe, , bk,ntt);
__SampleExtractIndex__<typename brP::targetP,0>(out,trlwe);
__threadfence();
}
template <class brP, typename brP::targetP::T , class iksP, int casign, int cbsign, typename brP::domainP::T offset>
__device__ inline void __HomGate__(typename iksP::targetP::T* const out,
const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const FFP* const bk,
const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__shared__ typename brP::targetP::T tlwe[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotatePreAdd__<brP, casign,cbsign,offset>(tlwe,in0,in1,bk,ntt);
KeySwitch<iksP>(out, tlwe, ksk);
__threadfence();
}
// br iks ver.
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __NandBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, , iksP, -1, -1, brP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __NorBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, , iksP, -1, -1, -brP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __XnorBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, , iksP, -2, -2, -2 * brP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __AndBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, , iksP, 1, 1, -brP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __OrBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, , iksP, 1, 1, iksP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __XorBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, , iksP, 2, 2, 2 * brP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __AndNYBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, , iksP, -1, 1, -brP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __AndYNBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, , iksP, 1, -1, -brP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __OrNYBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, , iksP, -1, 1, brP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __OrYNBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, , iksP, 1, -1, brP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
// Mux(inc,in1,in0) = inc?in1:in0 = inc&in1 + (!inc)&in0
template<class brP, typename brP::targetP::T , class iksP>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __MuxBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const inc,
const typename brP::domainP::T* const in1, const typename brP::domainP::T* const in0, const FFP* const bk,
const typename iksP::targetP::T* const ksk, const CuNTTHandler<> ntt)
{
__shared__ typename brP::targetP::T tlwe1[(brP::targetP::k+1)*brP::targetP::n];
__shared__ typename brP::targetP::T tlwe0[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotatePreAdd__<brP, 1, 1, -brP::domainP::>(tlwe1,inc,in1,bk,ntt);
__BlindRotatePreAdd__<brP, -1, 1, -brP::domainP::>(tlwe0,inc,in0,bk,ntt);
volatile const uint32_t tid = ThisThreadRankInBlock();
volatile const uint32_t bdim = ThisBlockSize();
#pragma unroll
for (int i = tid; i <= brP::targetP::n; i += bdim) {
tlwe1[i] += tlwe0[i];
if (i == brP::targetP::n) {
tlwe1[brP::targetP::n] += ;
}
}
__syncthreads();
KeySwitch<iksP>(out, tlwe1, ksk);
__threadfence();
}
// NMux(inc,in1,in0) = !(inc?in1:in0) = !(inc&in1 + (!inc)&in0)
template<class brP, typename brP::targetP::T , class iksP>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __NMuxBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const inc,
const typename brP::domainP::T* const in1, const typename brP::domainP::T* const in0, const FFP* const bk,
const typename iksP::targetP::T* const ksk, const CuNTTHandler<> ntt)
{
__shared__ typename brP::targetP::T tlwe1[(brP::targetP::k+1)*brP::targetP::n];
__shared__ typename brP::targetP::T tlwe0[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotatePreAdd__<brP, 1, 1, -brP::domainP::>(tlwe1,inc,in1,bk,ntt);
__BlindRotatePreAdd__<brP, -1, 1, -brP::domainP::>(tlwe0,inc,in0,bk,ntt);
volatile const uint32_t tid = ThisThreadRankInBlock();
volatile const uint32_t bdim = ThisBlockSize();
#pragma unroll
for (int i = tid; i <= brP::targetP::n; i += bdim) {
tlwe1[i] = -tlwe1[i] - tlwe0[i];
if (i == brP::targetP::n) {
tlwe1[brP::targetP::n] -= ;
}
}
__syncthreads();
KeySwitch<iksP>(out, tlwe1, ksk);
__threadfence();
}
// iks br ver.
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __NandBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, , -1, -1, iksP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __NorBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, , -1, -1, -iksP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __XnorBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, , -2, -2, -2 * iksP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __AndBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, , 1, 1, -iksP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __OrBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, , 1, 1, iksP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __XorBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, , 2, 2, 2 * iksP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __AndNYBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, , -1, 1, -iksP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __AndYNBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, , 1, -1, -iksP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __OrNYBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, , -1, 1, iksP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T = TFHEpp::lvl1param::>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __OrYNBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, , 1, -1, iksP::domainP::>(out, in0, in1, bk, ksk, ntt);
}
template<class P>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<TFHEpp::lvl1param>) void __CopyBootstrap__(
typename P::T* const out, const typename P::T* const in)
{
const uint tid = ThisThreadRankInBlock();
const uint bdim = ThisBlockSize();
for (int i = tid; i <= P::k*P::n; i += bdim)
out[i] = in[i];
__syncthreads();
__threadfence();
}
template<class P>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<TFHEpp::lvl1param>) void __NotBootstrap__(
typename P::T* const out, const typename P::T* const in)
{
const uint tid = ThisThreadRankInBlock();
const uint bdim = ThisBlockSize();
for (int i = tid; i <= P::k*P::n; i += bdim)
out[i] = -in[i];
__syncthreads();
__threadfence();
}
// Mux(inc,in1,in0) = inc?in1:in0 = inc&in1 + (!inc)&in0
template<class iksP, class brP, typename brP::targetP::T >
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __MuxBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const inc,
const typename iksP::domainP::T* const in1, const typename iksP::domainP::T* const in0, const FFP* const bk,
const typename iksP::targetP::T* const ksk, const CuNTTHandler<> ntt)
{
__shared__ typename iksP::targetP::T tlwelvl0[iksP::targetP::k*iksP::targetP::n+1];
IdentityKeySwitchPreAdd<iksP, 1, 1, -iksP::domainP::>(tlwelvl0, inc, in1, ksk);
__syncthreads();
__shared__ typename brP::targetP::T tlwe1[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotate__<brP>(tlwe1,tlwelvl0,,bk,ntt);
__SampleExtractIndex__<typename brP::targetP,0>(out, tlwe1);
IdentityKeySwitchPreAdd<iksP, -1, 1, -iksP::domainP::>(tlwelvl0, inc, in0, ksk);
__syncthreads();
__shared__ typename brP::targetP::T tlwe0[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotate__<brP>(tlwe0,tlwelvl0,,bk,ntt);
__SampleExtractIndex__<typename brP::targetP,0>(tlwe1, tlwe0);
__syncthreads();
volatile const uint32_t tid = ThisThreadRankInBlock();
volatile const uint32_t bdim = ThisBlockSize();
#pragma unroll
for (int i = tid; i <= brP::targetP::n; i += bdim) {
out[i] += tlwe1[i];
if (i == brP::targetP::n) {
out[brP::targetP::n] += ;
}
}
__threadfence();
}
// NMux(inc,in1,in0) = !(inc?in1:in0) = !(inc&in1 + (!inc)&in0)
template<class iksP, class brP, typename brP::targetP::T >
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __NMuxBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const inc,
const typename iksP::domainP::T* const in1, const typename iksP::domainP::T* const in0, const FFP* const bk,
const typename iksP::targetP::T* const ksk, const CuNTTHandler<> ntt)
{
__shared__ typename iksP::targetP::T tlwelvl0[iksP::targetP::k*iksP::targetP::n+1];
IdentityKeySwitchPreAdd<iksP, 1, 1, -iksP::domainP::>(tlwelvl0, inc, in1, ksk);
__syncthreads();
__shared__ typename brP::targetP::T tlwe1[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotate__<brP>(tlwe1,tlwelvl0,,bk,ntt);
__SampleExtractIndex__<typename brP::targetP,0>(out, tlwe1);
IdentityKeySwitchPreAdd<iksP, -1, 1, -iksP::domainP::>(tlwelvl0, inc, in0, ksk);
__syncthreads();
__shared__ typename brP::targetP::T tlwe0[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotate__<brP>(tlwe0,tlwelvl0,,bk,ntt);
__SampleExtractIndex__<typename brP::targetP,0>(tlwe1, tlwe0);
__syncthreads();
volatile const uint32_t tid = ThisThreadRankInBlock();
volatile const uint32_t bdim = ThisBlockSize();
#pragma unroll
for (int i = tid; i <= brP::targetP::n; i += bdim) {
out[i] = -out[i] - tlwe1[i];
if (i == brP::targetP::n) {
out[brP::targetP::n] -= ;
}
}
__threadfence();
}
void Bootstrap(TFHEpp::lvl0param::T* const out, const TFHEpp::lvl0param::T* const in,
const lvl1param::T mu, const hipStream_t st, const int gpuNum)
{
hipLaunchKernelGGL(( __Bootstrap__<lvl01param,lvl10param>), dim3(1), dim3(NUM_THREAD4HOMGATE<TFHEpp::lvl1param>), 0, st,
out, in, mu, bk_ntts[gpuNum], ksk_devs[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
void CMUXNTTkernel(TFHEpp::lvl1param::T* const res, const FFP* const cs,
TFHEpp::lvl1param::T* const c1,
TFHEpp::lvl1param::T* const c0, hipStream_t st,
const int gpuNum)
{
hipFuncSetAttribute(__CMUXNTT__,
hipFuncAttributeMaxDynamicSharedMemorySize,
(2 * TFHEpp::lvl1param::l + 2) * TFHEpp::lvl1param::n * sizeof(FFP));
hipLaunchKernelGGL(( __CMUXNTT__), dim3(1), dim3(NUM_THREAD4HOMGATE<TFHEpp::lvl1param>),
((TFHEpp::lvl1param::k+1) * TFHEpp::lvl1param::l + 2) * TFHEpp::lvl1param::n * sizeof(FFP), st,
res, cs, c1, c0, *ntt_handlers[gpuNum]);
CuCheckError();
}
void BootstrapTLWE2TRLWE(TFHEpp::lvl1param::T* const out, const TFHEpp::lvl0param::T* const in,
const lvl1param::T mu, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__BlindRotate__<TFHEpp::lvl01param>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<TFHEpp::lvl1param>);
hipLaunchKernelGGL(( __BlindRotateGlobal__<TFHEpp::lvl01param>), dim3(1), dim3(NUM_THREAD4HOMGATE<TFHEpp::lvl1param>), MEM4HOMGATE<TFHEpp::lvl1param>, st,
out, in, mu, bk_ntts[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
void SEIandBootstrap2TRLWE(TFHEpp::lvl1param::T* const out, const TFHEpp::lvl1param::T* const in,
const lvl1param::T mu, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(
__SEIandBootstrap2TRLWE__, hipFuncAttributeMaxDynamicSharedMemorySize,
(((lvl1param::k+1) * lvl1param::l + 3) * lvl1param::n + (lvl0param::n + 1) / 2 + 1) *
sizeof(FFP));
hipLaunchKernelGGL(( __SEIandBootstrap2TRLWE__), dim3(1), lvl1param::l * lvl1param::n>>
NTT_THREAD_UNITBIT,
((2 * lvl1param::l + 3) * lvl1param::n +
(lvl0param::n + 1) / 2 + 1) *
sizeof(FFP),
st,
out, in, mu, bk_ntts[gpuNum], ksk_devs[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
template<class brP, typename brP::targetP::T , class iksP>
void NandBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__NandBootstrap__<brP, brP::targetP::, iksP>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __NandBootstrap__<brP, , iksP>), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, , iksP) \
template void NandBootstrap<brP,,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T >
void NandBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__NandBootstrap__<iksP, brP, brP::targetP::>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __NandBootstrap__<iksP, brP, >), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, ) \
template void NandBootstrap<iksP, brP,>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::);
#undef INST
template<class brP, typename brP::targetP::T , class iksP>
void OrBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__OrBootstrap__<brP, brP::targetP::, iksP>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __OrBootstrap__<brP, brP::targetP::, iksP>), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, , iksP) \
template void OrBootstrap<brP,,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T >
void OrBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__OrBootstrap__<iksP, brP, brP::targetP::>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __OrBootstrap__<iksP, brP, >), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, ) \
template void OrBootstrap<iksP, brP,>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::);
#undef INST
template<class brP, typename brP::targetP::T , class iksP>
void OrYNBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__OrYNBootstrap__<brP, brP::targetP::, iksP>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __OrYNBootstrap__<brP, brP::targetP::, iksP>), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, , iksP) \
template void OrYNBootstrap<brP,,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T >
void OrYNBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__OrYNBootstrap__<iksP, brP, brP::targetP::>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __OrYNBootstrap__<iksP, brP, >), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, ) \
template void OrYNBootstrap<iksP, brP,>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::);
#undef INST
template<class brP, typename brP::targetP::T , class iksP>
void OrNYBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__OrNYBootstrap__<brP, brP::targetP::, iksP>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __OrNYBootstrap__<brP, brP::targetP::, iksP>), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, , iksP) \
template void OrNYBootstrap<brP,,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T >
void OrNYBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__OrNYBootstrap__<iksP, brP, brP::targetP::>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __OrNYBootstrap__<iksP, brP, >), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, ) \
template void OrNYBootstrap<iksP, brP,>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::);
#undef INST
template<class brP, typename brP::targetP::T , class iksP>
void AndBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__AndBootstrap__<brP, brP::targetP::, iksP>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __AndBootstrap__<brP, brP::targetP::, iksP>), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, , iksP) \
template void AndBootstrap<brP,,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T >
void AndBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__AndBootstrap__<iksP, brP, brP::targetP::>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __AndBootstrap__<iksP, brP, >), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, ) \
template void AndBootstrap<iksP, brP,>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::);
#undef INST
template<class brP, typename brP::targetP::T , class iksP>
void AndYNBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__AndYNBootstrap__<brP, brP::targetP::, iksP>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __AndYNBootstrap__<brP, brP::targetP::, iksP>), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, , iksP) \
template void AndYNBootstrap<brP,,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T >
void AndYNBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__AndYNBootstrap__<iksP, brP, brP::targetP::>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __AndYNBootstrap__<iksP, brP, >), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, ) \
template void AndYNBootstrap<iksP, brP,>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::);
#undef INST
template<class brP, typename brP::targetP::T , class iksP>
void AndNYBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__AndNYBootstrap__<brP, brP::targetP::, iksP>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __AndNYBootstrap__<brP, brP::targetP::, iksP>), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, , iksP) \
template void AndNYBootstrap<brP,,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T >
void AndNYBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__AndNYBootstrap__<iksP, brP, brP::targetP::>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __AndNYBootstrap__<iksP, brP, >), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, ) \
template void AndNYBootstrap<iksP, brP,>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::);
#undef INST
template<class brP, typename brP::targetP::T , class iksP>
void NorBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__NorBootstrap__<brP, brP::targetP::, iksP>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __NorBootstrap__<brP, brP::targetP::, iksP>), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, , iksP) \
template void NorBootstrap<brP,,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T >
void NorBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__NorBootstrap__<iksP, brP, brP::targetP::>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __NorBootstrap__<iksP, brP, >), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, ) \
template void NorBootstrap<iksP, brP,>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::);
#undef INST
template<class brP, typename brP::targetP::T , class iksP>
void XorBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__XorBootstrap__<brP, brP::targetP::, iksP>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __XorBootstrap__<brP, brP::targetP::, iksP>), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, , iksP) \
template void XorBootstrap<brP,,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T >
void XorBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__XorBootstrap__<iksP, brP, brP::targetP::>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __XorBootstrap__<iksP, brP, >), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, ) \
template void XorBootstrap<iksP, brP,>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::);
#undef INST
template<class brP, typename brP::targetP::T , class iksP>
void XnorBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__XnorBootstrap__<brP, brP::targetP::, iksP>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __XnorBootstrap__<brP, brP::targetP::, iksP>), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, , iksP) \
template void XnorBootstrap<brP,,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T >
void XnorBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__XnorBootstrap__<iksP, brP, brP::targetP::>,
hipFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
hipLaunchKernelGGL(( __XnorBootstrap__<iksP, brP, >), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>), MEM4HOMGATE<typename brP::targetP>, st,
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, ) \
template void XnorBootstrap<iksP, brP,>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::);
#undef INST
template<class P>
void CopyBootstrap(typename P::T* const out, const typename P::T* const in,
const hipStream_t st, const int gpuNum)
{
hipLaunchKernelGGL(( __CopyBootstrap__<P>), dim3(1), dim3(::min(P::n + 1,NUM_THREAD4HOMGATE<TFHEpp::lvl1param>)), 0, st, out, in);
CuCheckError();
}
#define INST(P) \
template void CopyBootstrap<P>(typename P::T* const out, const typename P::T* const in, \
const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl0param);
INST(TFHEpp::lvl1param);
#undef INST
template<class P>
void NotBootstrap(typename P::T* const out, const typename P::T* const in,
const hipStream_t st, const int gpuNum)
{
hipLaunchKernelGGL(( __NotBootstrap__<P>), dim3(1), dim3(::min(P::n + 1,NUM_THREAD4HOMGATE<TFHEpp::lvl1param>)), 0, st, out, in);
CuCheckError();
}
#define INST(P) \
template void NotBootstrap<P>(typename P::T* const out, const typename P::T* const in, \
const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl0param);
INST(TFHEpp::lvl1param);
#undef INST
template<class brP, typename brP::targetP::T , class iksP>
void MuxBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const inc,
const typename brP::domainP::T* const in1, const typename brP::domainP::T* const in0,
const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__MuxBootstrap__<brP,,iksP>,
hipFuncAttributeMaxDynamicSharedMemorySize,
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP));
hipLaunchKernelGGL(( __MuxBootstrap__<brP,,iksP>), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>),
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP),
st, out, inc, in1, in0, bk_ntts[gpuNum],
ksk_devs[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, , iksP) \
template void MuxBootstrap<brP,,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const inc, \
const typename brP::domainP::T* const in1, const typename brP::domainP::T* const in0, \
const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T >
void MuxBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const inc,
const typename iksP::domainP::T* const in1, const typename iksP::domainP::T* const in0,
const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__MuxBootstrap__<iksP,brP,>,
hipFuncAttributeMaxDynamicSharedMemorySize,
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP));
hipLaunchKernelGGL(( __MuxBootstrap__<iksP,brP,>), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>),
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP),
st, out, inc, in1, in0, bk_ntts[gpuNum],
ksk_devs[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, ) \
template void MuxBootstrap<iksP, brP,>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const inc, \
const typename iksP::domainP::T* const in1, const typename iksP::domainP::T* const in0, \
const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::);
#undef INST
template<class iksP, class brP, typename brP::targetP::T >
void NMuxBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const inc,
const typename iksP::domainP::T* const in1, const typename iksP::domainP::T* const in0,
const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__NMuxBootstrap__<iksP,brP,>,
hipFuncAttributeMaxDynamicSharedMemorySize,
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP));
hipLaunchKernelGGL(( __NMuxBootstrap__<iksP, brP, >), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>),
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP),
st, out, inc, in1, in0, bk_ntts[gpuNum],
ksk_devs[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, ) \
template void NMuxBootstrap<iksP, brP,>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const inc, \
const typename iksP::domainP::T* const in1, const typename iksP::domainP::T* const in0, \
const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::);
#undef INST
template<class brP, typename brP::targetP::T , class iksP>
void NMuxBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const inc,
const typename brP::domainP::T* const in1, const typename brP::domainP::T* const in0,
const hipStream_t st, const int gpuNum)
{
hipFuncSetAttribute(__NMuxBootstrap__<brP,,iksP>,
hipFuncAttributeMaxDynamicSharedMemorySize,
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP));
hipLaunchKernelGGL(( __NMuxBootstrap__<brP,,iksP>), dim3(1), dim3(NUM_THREAD4HOMGATE<typename brP::targetP>),
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP),
st, out, inc, in1, in0, bk_ntts[gpuNum],
ksk_devs[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, , iksP) \
template void NMuxBootstrap<brP,,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const inc, \
const typename brP::domainP::T* const in1, const typename brP::domainP::T* const in0, \
const hipStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::, TFHEpp::lvl10param);
#undef INST
} // namespace cufhe
| 4dbc821828c99b24a3091e8999cb2d35b718c6f0.cu | /**
* Copyright 2018 Wei Dai <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <include/bootstrap_gpu.cuh>
#include <include/gatebootstrapping_gpu.cuh>
#include <include/keyswitch_gpu.cuh>
#include <include/cufhe_gpu.cuh>
#include <include/details/error_gpu.cuh>
#include <include/ntt_gpu/ntt.cuh>
#include <limits>
#include <vector>
#include <algorithm>
namespace cufhe {
template<class P = TFHEpp::lvl1param>
constexpr uint MEM4HOMGATE =
((P::k+1) * P::l + 1 + P::k) * P::n * sizeof(FFP);
using namespace std;
using namespace TFHEpp;
vector<FFP*> bk_ntts;
vector<CuNTTHandler<>*> ntt_handlers;
template<class P = TFHEpp::lvl1param>
__global__ void __TRGSW2NTT__(FFP* const bk_ntt, const typename P::T* const bk,
CuNTTHandler<> ntt)
{
__shared__ FFP sh_temp[P::n];
const int index = blockIdx.z * ((P::k+1) * P::l * (P::k+1) * P::n) +
blockIdx.y * (P::k+1) * P::n + blockIdx.x * P::n;
ntt.NTT<typename P::T>(&bk_ntt[index], &bk[index], sh_temp, 0);
}
void TRGSW2NTT(cuFHETRGSWNTTlvl1& trgswntt,
const TFHEpp::TRGSW<TFHEpp::lvl1param>& trgsw, Stream& st)
{
cudaSetDevice(st.device_id());
TFHEpp::lvl1param::T* d_trgsw;
cudaMalloc((void**)&d_trgsw, sizeof(trgsw));
cudaMemcpyAsync(d_trgsw, trgsw.data(), sizeof(trgsw),
cudaMemcpyHostToDevice, st.st());
dim3 grid(lvl1param::k+1, (lvl1param::k+1) * lvl1param::l, 1);
dim3 block(lvl1param::n >> NTT_THREAD_UNITBIT);
__TRGSW2NTT__<<<grid, block, 0, st.st()>>>(
trgswntt.trgswdevices[st.device_id()], d_trgsw,
*ntt_handlers[st.device_id()]);
CuCheckError();
cudaMemcpyAsync(
trgswntt.trgswhost.data(), trgswntt.trgswdevices[st.device_id()],
sizeof(trgswntt.trgswhost), cudaMemcpyDeviceToHost, st.st());
cudaFree(d_trgsw);
}
void InitializeNTThandlers(const int gpuNum)
{
for (int i = 0; i < gpuNum; i++) {
cudaSetDevice(i);
ntt_handlers.push_back(new CuNTTHandler<>());
ntt_handlers[i]->Create();
ntt_handlers[i]->CreateConstant();
cudaDeviceSynchronize();
CuCheckError();
}
}
template<class P>
void BootstrappingKeyToNTT(const BootstrappingKey<P>& bk,
const int gpuNum)
{
bk_ntts.resize(gpuNum);
for (int i = 0; i < gpuNum; i++) {
cudaSetDevice(i);
cudaMalloc((void**)&bk_ntts[i], sizeof(FFP) * P::domainP::n * 2 *
P::targetP::l * 2 * P::targetP::n);
typename P::targetP::T* d_bk;
cudaMalloc((void**)&d_bk, sizeof(bk));
cudaMemcpy(d_bk, bk.data(), sizeof(bk), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
CuCheckError();
dim3 grid(P::targetP::k+1, (P::targetP::k+1) * P::targetP::l, P::domainP::n);
dim3 block(P::targetP::n >> NTT_THREAD_UNITBIT);
__TRGSW2NTT__<<<grid, block>>>(bk_ntts[i], d_bk, *ntt_handlers[i]);
cudaDeviceSynchronize();
CuCheckError();
cudaFree(d_bk);
}
}
#define INST(P) \
template void BootstrappingKeyToNTT<P>(const BootstrappingKey<P>& bk, \
const int gpuNum)
INST(TFHEpp::lvl01param);
#undef INST
void DeleteBootstrappingKeyNTT(const int gpuNum)
{
for (int i = 0; i < bk_ntts.size(); i++) {
cudaSetDevice(i);
cudaFree(bk_ntts[i]);
ntt_handlers[i]->Destroy();
delete ntt_handlers[i];
}
ntt_handlers.clear();
}
__device__ inline void TRLWESubAndDecomposition(
FFP* const dectrlwe, const TFHEpp::lvl1param::T* const trlwe1,
const TFHEpp::lvl1param::T* const trlwe0)
{
const uint32_t tid = ThisThreadRankInBlock();
const uint32_t bdim = ThisBlockSize();
constexpr uint32_t decomp_mask = (1 << lvl1param::Bgbit) - 1;
constexpr int32_t decomp_half = 1 << (lvl1param::Bgbit - 1);
constexpr uint32_t decomp_offset = offsetgen<lvl1param>();
constexpr typename lvl1param::T roundoffset =
1ULL << (std::numeric_limits<typename lvl1param::T>::digits -
lvl1param::l * lvl1param::Bgbit - 1);
#pragma unroll
for (int i = tid; i < lvl1param::n; i += bdim) {
#pragma unroll
for (int j = 0; j < (lvl1param::k+1); j++) {
// decomp temp
lvl1param::T temp = trlwe1[j * lvl1param::n + i] -
trlwe0[j * lvl1param::n + i] + decomp_offset +
roundoffset;
#pragma unroll
for (int digit = 0; digit < lvl1param::l; digit += 1)
dectrlwe[j * lvl1param::l * lvl1param::n +
digit * lvl1param::n + i] =
FFP(lvl1param::T(
((temp >>
(std::numeric_limits<typename lvl1param::T>::digits -
(digit + 1) * lvl1param::Bgbit)) &
decomp_mask) -
decomp_half));
}
}
__syncthreads(); // must
}
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<TFHEpp::lvl1param>) void __CMUXNTT__(
TFHEpp::lvl1param::T* out, const FFP* const tgsw_ntt,
const TFHEpp::lvl1param::T* const trlwe1,
const TFHEpp::lvl1param::T* const trlwe0, const CuNTTHandler<> ntt)
{
const uint32_t tid = ThisThreadRankInBlock();
const uint32_t bdim = ThisBlockSize();
extern __shared__ FFP sh[];
// To hold the data after Decomposition and NTT
FFP* sh_acc_ntt = &sh[0];
// To hold sum
FFP* sh_res_ntt = &sh[(lvl1param::k+1) * lvl1param::l * lvl1param::n];
TFHEpp::lvl1param::T* outtemp = (TFHEpp::lvl1param::T*)&sh[0];
TRLWESubAndDecomposition(sh_acc_ntt, trlwe1, trlwe0);
// (k+1)*l NTTs
// Input/output/buffer use the same shared memory location.
if (tid < (lvl1param::k+1) * lvl1param::l * (lvl1param::n >> NTT_THREAD_UNITBIT)) {
FFP* tar = &sh_acc_ntt[tid >> (lvl1param::nbit - NTT_THREAD_UNITBIT)
<< lvl1param::nbit];
ntt.NTT<FFP>(tar, tar, tar,
tid >> (lvl1param::nbit - NTT_THREAD_UNITBIT)
<< (lvl1param::nbit - NTT_THREAD_UNITBIT));
}
else { // must meet 5 sync made by NTTInv
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
}
__syncthreads();
// Multiply with bootstrapping key in global memory.
#pragma unroll
for (int i = tid; i < lvl1param::n; i += bdim) {
#pragma unroll
for(int k = 0; k < lvl1param::k+1; k++)
sh_res_ntt[i + k*lvl1param::n] = sh_acc_ntt[0 * lvl1param::n + i] *
tgsw_ntt[(((lvl1param::k+1) * 0 + k) << lvl1param::nbit) + i];
#pragma unroll
for (int digit = 1; digit < 2 * lvl1param::l; digit += 1) {
#pragma unroll
for(int k = 0; k < lvl1param::k+1; k++)
sh_res_ntt[i + k*lvl1param::n] = sh_acc_ntt[digit * lvl1param::n + i] *
tgsw_ntt[(((lvl1param::k+1) * digit + k) << lvl1param::nbit) + i];
}
}
__syncthreads();
#pragma unroll
for (int i = tid; i < (lvl1param::k+1) * lvl1param::n; i += bdim) outtemp[i] = trlwe0[i];
// k+1 NTTInvs and add acc
if (tid < (lvl1param::k+1) * (lvl1param::n >> NTT_THREAD_UNITBIT)) {
FFP* src = &sh_res_ntt[tid >> (lvl1param::nbit - NTT_THREAD_UNITBIT)
<< lvl1param::nbit];
ntt.NTTInvAdd<typename lvl1param::T>(
&outtemp[tid >> (lvl1param::nbit - NTT_THREAD_UNITBIT)
<< lvl1param::nbit],
src, src,
tid >> (lvl1param::nbit - NTT_THREAD_UNITBIT)
<< (lvl1param::nbit - NTT_THREAD_UNITBIT));
}
else { // must meet 5 sync made by NTTInv
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
__syncthreads();
}
__syncthreads(); // must
for (int i = 0; i < (lvl1param::k+1) * lvl1param::n; i++) out[i] = outtemp[i];
__syncthreads();
}
template <class bkP, class iksP>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename bkP::targetP>) void __Bootstrap__(
typename iksP::domainP::T* const out, const typename iksP::domainP::T* const in,
const typename bkP::targetP::T mu, const FFP* const bk,
const typename iksP::targetP::T* const ksk, const CuNTTHandler<> ntt)
{
__shared__ typename bkP::targetP::T tlwe[(bkP::targetP::k+1)*bkP::targetP::n];
__BlindRotate__<bkP>(tlwe,in,mu,bk,ntt);
KeySwitch<iksP>(out, tlwe, ksk);
__threadfence();
}
// template <class iksP, class bkP>
// __global__ __launch_bounds__(NUM_THREAD4HOMGATE) void __Bootstrap__(
// typename iksP::domainP::T* const out, const typename iksP::domainP::T* const in,
// const typename bkP::targetP::T mu, const FFP* const bk,
// const typename iksP::targetP::T* const ksk, const CuNTTHandler<> ntt)
// {
// __shared__ typename bkP::targetP::T tlwe[iksP::targetP::k*iksP::targetP::n+1];
// KeySwitch<iksP>(tlwe, in, ksk);
// __threadfence();
// __BlindRotate__<bkP>(out,tlwe,mu,bk,ntt);
// __threadfence();
// }
template<class P>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename P::targetP>) void __BlindRotateGlobal__(
TFHEpp::lvl1param::T* const out, const TFHEpp::lvl0param::T* const in,
const TFHEpp::lvl1param::T mu, const FFP* const bk, const CuNTTHandler<> ntt)
{
__BlindRotate__<P>(out, in, mu, bk, ntt);
}
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<TFHEpp::lvl1param>) void __SEIandBootstrap2TRLWE__(
TFHEpp::lvl1param::T* const out, const TFHEpp::lvl1param::T* const in,
const TFHEpp::lvl1param::T mu, const FFP* const bk, const TFHEpp::lvl0param::T* const ksk,
const CuNTTHandler<> ntt)
{
// Assert(bk.k() == 1);
// Assert(bk.l() == 2);
// Assert(bk.n() == lvl1param::n);
extern __shared__ FFP sh[];
FFP* sh_acc_ntt = &sh[0];
// Use Last section to hold tlwe. This may to make these data in serial
TFHEpp::lvl1param::T* tlwe =
(TFHEpp::lvl1param::T*)&sh[((lvl1param::k+1) * lvl1param::l + 2) * lvl1param::n];
lvl0param::T* tlwelvl0 =
(lvl0param::T*)&sh[((lvl1param::k+1) * lvl1param::l + 2 + lvl1param::k) * lvl1param::n];
KeySwitch<lvl10param>(tlwelvl0, in, ksk);
__syncthreads();
// test vector
// acc.a = 0; acc.b = vec(mu) * x ^ (in.b()/2048)
register uint32_t bar = 2 * lvl1param::n - modSwitchFromTorus<lvl1param>(
tlwelvl0[lvl0param::n]);
RotatedTestVector<lvl1param>(tlwe, bar, mu);
// accumulate
for (int i = 0; i < lvl0param::n; i++) { // n iterations
bar = modSwitchFromTorus<lvl1param>(tlwelvl0[i]);
Accumulate<lvl01param>(tlwe, sh_acc_ntt, bar,
bk + (i << lvl1param::nbit) * (lvl1param::k+1) * (lvl1param::k+1) * lvl1param::l, ntt);
}
__syncthreads();
for (int i = 0; i < (lvl1param::k+1) * lvl1param::n; i++) {
out[i] = tlwe[i];
}
__threadfence();
}
template<class P, uint index>
__device__ inline void __SampleExtractIndex__(typename P::T* const res, const typename P::T* const in){
const uint32_t tid = ThisThreadRankInBlock();
const uint32_t bdim = ThisBlockSize();
constexpr uint nmask = P::n-1;
for (uint i = tid; i <= P::k*P::n; i += bdim) {
if (i == P::k*P::n){
res[P::k*P::n] = in[P::k*P::n+index];
}else {
const uint k = i >> P::nbit;
const uint n = i & nmask;
if (n <= index) res[i] = in[k*P::n + index - n];
else res[i] = -in[k*P::n + P::n + index-n];
}
}
}
template <class iksP, class brP, typename brP::targetP::T μ, int casign, int cbsign, typename brP::domainP::T offset>
__device__ inline void __HomGate__(typename brP::targetP::T* const out,
const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const FFP* const bk,
const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__shared__ typename iksP::targetP::T tlwe[iksP::targetP::k*iksP::targetP::n+1];
IdentityKeySwitchPreAdd<iksP, casign, cbsign, offset>(tlwe, in0, in1, ksk);
__syncthreads();
__shared__ typename brP::targetP::T trlwe[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotate__<brP>(trlwe, tlwe, μ, bk,ntt);
__SampleExtractIndex__<typename brP::targetP,0>(out,trlwe);
__threadfence();
}
template <class brP, typename brP::targetP::T μ, class iksP, int casign, int cbsign, typename brP::domainP::T offset>
__device__ inline void __HomGate__(typename iksP::targetP::T* const out,
const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const FFP* const bk,
const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__shared__ typename brP::targetP::T tlwe[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotatePreAdd__<brP, casign,cbsign,offset>(tlwe,in0,in1,bk,ntt);
KeySwitch<iksP>(out, tlwe, ksk);
__threadfence();
}
// br iks ver.
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __NandBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, μ, iksP, -1, -1, brP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __NorBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, μ, iksP, -1, -1, -brP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __XnorBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, μ, iksP, -2, -2, -2 * brP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __AndBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, μ, iksP, 1, 1, -brP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __OrBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, μ, iksP, 1, 1, iksP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __XorBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, μ, iksP, 2, 2, 2 * brP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __AndNYBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, μ, iksP, -1, 1, -brP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __AndYNBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, μ, iksP, 1, -1, -brP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __OrNYBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, μ, iksP, -1, 1, brP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ, class iksP = TFHEpp::lvl10param>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __OrYNBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<brP, μ, iksP, 1, -1, brP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
// Mux(inc,in1,in0) = inc?in1:in0 = inc&in1 + (!inc)&in0
template<class brP, typename brP::targetP::T μ, class iksP>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __MuxBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const inc,
const typename brP::domainP::T* const in1, const typename brP::domainP::T* const in0, const FFP* const bk,
const typename iksP::targetP::T* const ksk, const CuNTTHandler<> ntt)
{
__shared__ typename brP::targetP::T tlwe1[(brP::targetP::k+1)*brP::targetP::n];
__shared__ typename brP::targetP::T tlwe0[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotatePreAdd__<brP, 1, 1, -brP::domainP::μ>(tlwe1,inc,in1,bk,ntt);
__BlindRotatePreAdd__<brP, -1, 1, -brP::domainP::μ>(tlwe0,inc,in0,bk,ntt);
volatile const uint32_t tid = ThisThreadRankInBlock();
volatile const uint32_t bdim = ThisBlockSize();
#pragma unroll
for (int i = tid; i <= brP::targetP::n; i += bdim) {
tlwe1[i] += tlwe0[i];
if (i == brP::targetP::n) {
tlwe1[brP::targetP::n] += μ;
}
}
__syncthreads();
KeySwitch<iksP>(out, tlwe1, ksk);
__threadfence();
}
// NMux(inc,in1,in0) = !(inc?in1:in0) = !(inc&in1 + (!inc)&in0)
template<class brP, typename brP::targetP::T μ, class iksP>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __NMuxBootstrap__(
typename iksP::targetP::T* const out, const typename brP::domainP::T* const inc,
const typename brP::domainP::T* const in1, const typename brP::domainP::T* const in0, const FFP* const bk,
const typename iksP::targetP::T* const ksk, const CuNTTHandler<> ntt)
{
__shared__ typename brP::targetP::T tlwe1[(brP::targetP::k+1)*brP::targetP::n];
__shared__ typename brP::targetP::T tlwe0[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotatePreAdd__<brP, 1, 1, -brP::domainP::μ>(tlwe1,inc,in1,bk,ntt);
__BlindRotatePreAdd__<brP, -1, 1, -brP::domainP::μ>(tlwe0,inc,in0,bk,ntt);
volatile const uint32_t tid = ThisThreadRankInBlock();
volatile const uint32_t bdim = ThisBlockSize();
#pragma unroll
for (int i = tid; i <= brP::targetP::n; i += bdim) {
tlwe1[i] = -tlwe1[i] - tlwe0[i];
if (i == brP::targetP::n) {
tlwe1[brP::targetP::n] -= μ;
}
}
__syncthreads();
KeySwitch<iksP>(out, tlwe1, ksk);
__threadfence();
}
// iks br ver.
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __NandBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, μ, -1, -1, iksP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __NorBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, μ, -1, -1, -iksP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __XnorBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, μ, -2, -2, -2 * iksP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __AndBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, μ, 1, 1, -iksP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __OrBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, μ, 1, 1, iksP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __XorBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, μ, 2, 2, 2 * iksP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __AndNYBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, μ, -1, 1, -iksP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __AndYNBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, μ, 1, -1, -iksP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __OrNYBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, μ, -1, 1, iksP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class iksP = TFHEpp::lvl10param, class brP = TFHEpp::lvl01param, typename brP::targetP::T μ = TFHEpp::lvl1param::μ>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __OrYNBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, FFP* bk, const typename iksP::targetP::T* const ksk,
const CuNTTHandler<> ntt)
{
__HomGate__<iksP, brP, μ, 1, -1, iksP::domainP::μ>(out, in0, in1, bk, ksk, ntt);
}
template<class P>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<TFHEpp::lvl1param>) void __CopyBootstrap__(
typename P::T* const out, const typename P::T* const in)
{
const uint tid = ThisThreadRankInBlock();
const uint bdim = ThisBlockSize();
for (int i = tid; i <= P::k*P::n; i += bdim)
out[i] = in[i];
__syncthreads();
__threadfence();
}
template<class P>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<TFHEpp::lvl1param>) void __NotBootstrap__(
typename P::T* const out, const typename P::T* const in)
{
const uint tid = ThisThreadRankInBlock();
const uint bdim = ThisBlockSize();
for (int i = tid; i <= P::k*P::n; i += bdim)
out[i] = -in[i];
__syncthreads();
__threadfence();
}
// Mux(inc,in1,in0) = inc?in1:in0 = inc&in1 + (!inc)&in0
template<class iksP, class brP, typename brP::targetP::T μ>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __MuxBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const inc,
const typename iksP::domainP::T* const in1, const typename iksP::domainP::T* const in0, const FFP* const bk,
const typename iksP::targetP::T* const ksk, const CuNTTHandler<> ntt)
{
__shared__ typename iksP::targetP::T tlwelvl0[iksP::targetP::k*iksP::targetP::n+1];
IdentityKeySwitchPreAdd<iksP, 1, 1, -iksP::domainP::μ>(tlwelvl0, inc, in1, ksk);
__syncthreads();
__shared__ typename brP::targetP::T tlwe1[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotate__<brP>(tlwe1,tlwelvl0,μ,bk,ntt);
__SampleExtractIndex__<typename brP::targetP,0>(out, tlwe1);
IdentityKeySwitchPreAdd<iksP, -1, 1, -iksP::domainP::μ>(tlwelvl0, inc, in0, ksk);
__syncthreads();
__shared__ typename brP::targetP::T tlwe0[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotate__<brP>(tlwe0,tlwelvl0,μ,bk,ntt);
__SampleExtractIndex__<typename brP::targetP,0>(tlwe1, tlwe0);
__syncthreads();
volatile const uint32_t tid = ThisThreadRankInBlock();
volatile const uint32_t bdim = ThisBlockSize();
#pragma unroll
for (int i = tid; i <= brP::targetP::n; i += bdim) {
out[i] += tlwe1[i];
if (i == brP::targetP::n) {
out[brP::targetP::n] += μ;
}
}
__threadfence();
}
// NMux(inc,in1,in0) = !(inc?in1:in0) = !(inc&in1 + (!inc)&in0)
template<class iksP, class brP, typename brP::targetP::T μ>
__global__ __launch_bounds__(NUM_THREAD4HOMGATE<typename brP::targetP>) void __NMuxBootstrap__(
typename brP::targetP::T* const out, const typename iksP::domainP::T* const inc,
const typename iksP::domainP::T* const in1, const typename iksP::domainP::T* const in0, const FFP* const bk,
const typename iksP::targetP::T* const ksk, const CuNTTHandler<> ntt)
{
__shared__ typename iksP::targetP::T tlwelvl0[iksP::targetP::k*iksP::targetP::n+1];
IdentityKeySwitchPreAdd<iksP, 1, 1, -iksP::domainP::μ>(tlwelvl0, inc, in1, ksk);
__syncthreads();
__shared__ typename brP::targetP::T tlwe1[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotate__<brP>(tlwe1,tlwelvl0,μ,bk,ntt);
__SampleExtractIndex__<typename brP::targetP,0>(out, tlwe1);
IdentityKeySwitchPreAdd<iksP, -1, 1, -iksP::domainP::μ>(tlwelvl0, inc, in0, ksk);
__syncthreads();
__shared__ typename brP::targetP::T tlwe0[(brP::targetP::k+1)*brP::targetP::n];
__BlindRotate__<brP>(tlwe0,tlwelvl0,μ,bk,ntt);
__SampleExtractIndex__<typename brP::targetP,0>(tlwe1, tlwe0);
__syncthreads();
volatile const uint32_t tid = ThisThreadRankInBlock();
volatile const uint32_t bdim = ThisBlockSize();
#pragma unroll
for (int i = tid; i <= brP::targetP::n; i += bdim) {
out[i] = -out[i] - tlwe1[i];
if (i == brP::targetP::n) {
out[brP::targetP::n] -= μ;
}
}
__threadfence();
}
void Bootstrap(TFHEpp::lvl0param::T* const out, const TFHEpp::lvl0param::T* const in,
const lvl1param::T mu, const cudaStream_t st, const int gpuNum)
{
__Bootstrap__<lvl01param,lvl10param><<<1, NUM_THREAD4HOMGATE<TFHEpp::lvl1param>, 0, st>>>(
out, in, mu, bk_ntts[gpuNum], ksk_devs[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
void CMUXNTTkernel(TFHEpp::lvl1param::T* const res, const FFP* const cs,
TFHEpp::lvl1param::T* const c1,
TFHEpp::lvl1param::T* const c0, cudaStream_t st,
const int gpuNum)
{
cudaFuncSetAttribute(__CMUXNTT__,
cudaFuncAttributeMaxDynamicSharedMemorySize,
(2 * TFHEpp::lvl1param::l + 2) * TFHEpp::lvl1param::n * sizeof(FFP));
__CMUXNTT__<<<1, NUM_THREAD4HOMGATE<TFHEpp::lvl1param>,
((TFHEpp::lvl1param::k+1) * TFHEpp::lvl1param::l + 2) * TFHEpp::lvl1param::n * sizeof(FFP), st>>>(
res, cs, c1, c0, *ntt_handlers[gpuNum]);
CuCheckError();
}
void BootstrapTLWE2TRLWE(TFHEpp::lvl1param::T* const out, const TFHEpp::lvl0param::T* const in,
const lvl1param::T mu, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__BlindRotate__<TFHEpp::lvl01param>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<TFHEpp::lvl1param>);
__BlindRotateGlobal__<TFHEpp::lvl01param><<<1, NUM_THREAD4HOMGATE<TFHEpp::lvl1param>, MEM4HOMGATE<TFHEpp::lvl1param>, st>>>(
out, in, mu, bk_ntts[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
void SEIandBootstrap2TRLWE(TFHEpp::lvl1param::T* const out, const TFHEpp::lvl1param::T* const in,
const lvl1param::T mu, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(
__SEIandBootstrap2TRLWE__, cudaFuncAttributeMaxDynamicSharedMemorySize,
(((lvl1param::k+1) * lvl1param::l + 3) * lvl1param::n + (lvl0param::n + 1) / 2 + 1) *
sizeof(FFP));
__SEIandBootstrap2TRLWE__<<<1, lvl1param::l * lvl1param::n>>
NTT_THREAD_UNITBIT,
((2 * lvl1param::l + 3) * lvl1param::n +
(lvl0param::n + 1) / 2 + 1) *
sizeof(FFP),
st>>>
(out, in, mu, bk_ntts[gpuNum], ksk_devs[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
template<class brP, typename brP::targetP::T μ, class iksP>
void NandBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__NandBootstrap__<brP, brP::targetP::μ, iksP>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__NandBootstrap__<brP, μ, iksP><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, μ, iksP) \
template void NandBootstrap<brP,μ,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::μ, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T μ>
void NandBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__NandBootstrap__<iksP, brP, brP::targetP::μ>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__NandBootstrap__<iksP, brP, μ><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, μ) \
template void NandBootstrap<iksP, brP,μ>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::μ);
#undef INST
template<class brP, typename brP::targetP::T μ, class iksP>
void OrBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__OrBootstrap__<brP, brP::targetP::μ, iksP>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__OrBootstrap__<brP, brP::targetP::μ, iksP><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, μ, iksP) \
template void OrBootstrap<brP,μ,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::μ, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T μ>
void OrBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__OrBootstrap__<iksP, brP, brP::targetP::μ>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__OrBootstrap__<iksP, brP, μ><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, μ) \
template void OrBootstrap<iksP, brP,μ>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::μ);
#undef INST
template<class brP, typename brP::targetP::T μ, class iksP>
void OrYNBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__OrYNBootstrap__<brP, brP::targetP::μ, iksP>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__OrYNBootstrap__<brP, brP::targetP::μ, iksP><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, μ, iksP) \
template void OrYNBootstrap<brP,μ,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::μ, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T μ>
void OrYNBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__OrYNBootstrap__<iksP, brP, brP::targetP::μ>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__OrYNBootstrap__<iksP, brP, μ><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, μ) \
template void OrYNBootstrap<iksP, brP,μ>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::μ);
#undef INST
template<class brP, typename brP::targetP::T μ, class iksP>
void OrNYBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__OrNYBootstrap__<brP, brP::targetP::μ, iksP>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__OrNYBootstrap__<brP, brP::targetP::μ, iksP><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, μ, iksP) \
template void OrNYBootstrap<brP,μ,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::μ, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T μ>
void OrNYBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__OrNYBootstrap__<iksP, brP, brP::targetP::μ>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__OrNYBootstrap__<iksP, brP, μ><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, μ) \
template void OrNYBootstrap<iksP, brP,μ>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::μ);
#undef INST
template<class brP, typename brP::targetP::T μ, class iksP>
void AndBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__AndBootstrap__<brP, brP::targetP::μ, iksP>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__AndBootstrap__<brP, brP::targetP::μ, iksP><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, μ, iksP) \
template void AndBootstrap<brP,μ,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::μ, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T μ>
void AndBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__AndBootstrap__<iksP, brP, brP::targetP::μ>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__AndBootstrap__<iksP, brP, μ><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, μ) \
template void AndBootstrap<iksP, brP,μ>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::μ);
#undef INST
template<class brP, typename brP::targetP::T μ, class iksP>
void AndYNBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__AndYNBootstrap__<brP, brP::targetP::μ, iksP>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__AndYNBootstrap__<brP, brP::targetP::μ, iksP><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, μ, iksP) \
template void AndYNBootstrap<brP,μ,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::μ, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T μ>
void AndYNBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__AndYNBootstrap__<iksP, brP, brP::targetP::μ>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__AndYNBootstrap__<iksP, brP, μ><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, μ) \
template void AndYNBootstrap<iksP, brP,μ>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::μ);
#undef INST
template<class brP, typename brP::targetP::T μ, class iksP>
void AndNYBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__AndNYBootstrap__<brP, brP::targetP::μ, iksP>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__AndNYBootstrap__<brP, brP::targetP::μ, iksP><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, μ, iksP) \
template void AndNYBootstrap<brP,μ,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::μ, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T μ>
void AndNYBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__AndNYBootstrap__<iksP, brP, brP::targetP::μ>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__AndNYBootstrap__<iksP, brP, μ><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, μ) \
template void AndNYBootstrap<iksP, brP,μ>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::μ);
#undef INST
template<class brP, typename brP::targetP::T μ, class iksP>
void NorBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__NorBootstrap__<brP, brP::targetP::μ, iksP>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__NorBootstrap__<brP, brP::targetP::μ, iksP><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, μ, iksP) \
template void NorBootstrap<brP,μ,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::μ, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T μ>
void NorBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__NorBootstrap__<iksP, brP, brP::targetP::μ>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__NorBootstrap__<iksP, brP, μ><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, μ) \
template void NorBootstrap<iksP, brP,μ>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::μ);
#undef INST
template<class brP, typename brP::targetP::T μ, class iksP>
void XorBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__XorBootstrap__<brP, brP::targetP::μ, iksP>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__XorBootstrap__<brP, brP::targetP::μ, iksP><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, μ, iksP) \
template void XorBootstrap<brP,μ,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::μ, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T μ>
void XorBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__XorBootstrap__<iksP, brP, brP::targetP::μ>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__XorBootstrap__<iksP, brP, μ><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, μ) \
template void XorBootstrap<iksP, brP,μ>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::μ);
#undef INST
template<class brP, typename brP::targetP::T μ, class iksP>
void XnorBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0,
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__XnorBootstrap__<brP, brP::targetP::μ, iksP>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__XnorBootstrap__<brP, brP::targetP::μ, iksP><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, μ, iksP) \
template void XnorBootstrap<brP,μ,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const in0, \
const typename brP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::μ, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T μ>
void XnorBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0,
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__XnorBootstrap__<iksP, brP, brP::targetP::μ>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
MEM4HOMGATE<typename brP::targetP>);
__XnorBootstrap__<iksP, brP, μ><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>, MEM4HOMGATE<typename brP::targetP>, st>>>(
out, in0, in1, bk_ntts[gpuNum], ksk_devs[gpuNum],
*ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, μ) \
template void XnorBootstrap<iksP, brP,μ>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const in0, \
const typename iksP::domainP::T* const in1, const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::μ);
#undef INST
template<class P>
void CopyBootstrap(typename P::T* const out, const typename P::T* const in,
const cudaStream_t st, const int gpuNum)
{
__CopyBootstrap__<P><<<1, std::min(P::n + 1,NUM_THREAD4HOMGATE<TFHEpp::lvl1param>), 0, st>>>(out, in);
CuCheckError();
}
#define INST(P) \
template void CopyBootstrap<P>(typename P::T* const out, const typename P::T* const in, \
const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl0param);
INST(TFHEpp::lvl1param);
#undef INST
template<class P>
void NotBootstrap(typename P::T* const out, const typename P::T* const in,
const cudaStream_t st, const int gpuNum)
{
__NotBootstrap__<P><<<1, std::min(P::n + 1,NUM_THREAD4HOMGATE<TFHEpp::lvl1param>), 0, st>>>(out, in);
CuCheckError();
}
#define INST(P) \
template void NotBootstrap<P>(typename P::T* const out, const typename P::T* const in, \
const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl0param);
INST(TFHEpp::lvl1param);
#undef INST
template<class brP, typename brP::targetP::T μ, class iksP>
void MuxBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const inc,
const typename brP::domainP::T* const in1, const typename brP::domainP::T* const in0,
const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__MuxBootstrap__<brP,μ,iksP>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP));
__MuxBootstrap__<brP,μ,iksP><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>,
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP),
st>>>(out, inc, in1, in0, bk_ntts[gpuNum],
ksk_devs[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, μ, iksP) \
template void MuxBootstrap<brP,μ,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const inc, \
const typename brP::domainP::T* const in1, const typename brP::domainP::T* const in0, \
const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::μ, TFHEpp::lvl10param);
#undef INST
template<class iksP, class brP, typename brP::targetP::T μ>
void MuxBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const inc,
const typename iksP::domainP::T* const in1, const typename iksP::domainP::T* const in0,
const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__MuxBootstrap__<iksP,brP,μ>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP));
__MuxBootstrap__<iksP,brP,μ><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>,
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP),
st>>>(out, inc, in1, in0, bk_ntts[gpuNum],
ksk_devs[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, μ) \
template void MuxBootstrap<iksP, brP,μ>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const inc, \
const typename iksP::domainP::T* const in1, const typename iksP::domainP::T* const in0, \
const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::μ);
#undef INST
template<class iksP, class brP, typename brP::targetP::T μ>
void NMuxBootstrap(typename brP::targetP::T* const out, const typename iksP::domainP::T* const inc,
const typename iksP::domainP::T* const in1, const typename iksP::domainP::T* const in0,
const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__NMuxBootstrap__<iksP,brP,μ>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP));
__NMuxBootstrap__<iksP, brP, μ><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>,
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP),
st>>>(out, inc, in1, in0, bk_ntts[gpuNum],
ksk_devs[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(iksP, brP, μ) \
template void NMuxBootstrap<iksP, brP,μ>(typename brP::targetP::T* const out, const typename iksP::domainP::T* const inc, \
const typename iksP::domainP::T* const in1, const typename iksP::domainP::T* const in0, \
const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl10param, TFHEpp::lvl01param, TFHEpp::lvl1param::μ);
#undef INST
template<class brP, typename brP::targetP::T μ, class iksP>
void NMuxBootstrap(typename iksP::targetP::T* const out, const typename brP::domainP::T* const inc,
const typename brP::domainP::T* const in1, const typename brP::domainP::T* const in0,
const cudaStream_t st, const int gpuNum)
{
cudaFuncSetAttribute(__NMuxBootstrap__<brP,μ,iksP>,
cudaFuncAttributeMaxDynamicSharedMemorySize,
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP));
__NMuxBootstrap__<brP,μ,iksP><<<1, NUM_THREAD4HOMGATE<typename brP::targetP>,
((brP::targetP::k+1) * brP::targetP::l + 3) * brP::targetP::n * sizeof(FFP),
st>>>(out, inc, in1, in0, bk_ntts[gpuNum],
ksk_devs[gpuNum], *ntt_handlers[gpuNum]);
CuCheckError();
}
#define INST(brP, μ, iksP) \
template void NMuxBootstrap<brP,μ,iksP>(typename iksP::targetP::T* const out, const typename brP::domainP::T* const inc, \
const typename brP::domainP::T* const in1, const typename brP::domainP::T* const in0, \
const cudaStream_t st, const int gpuNum)
INST(TFHEpp::lvl01param, TFHEpp::lvl1param::μ, TFHEpp::lvl10param);
#undef INST
} // namespace cufhe
|
19798554fa0df20cabd0ee5e43fd1d0eb5d26206.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/mismatch.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cmath> // sqrtf
#include <algorithm> // std::random_shuffle
#include <cstdlib> // std::rand, std::srand
#include <cassert>
#define myEPS 2.220446049250313e-16F
#define rcondEPS 1e-6F
#define minSinTheta 0.001F
#define areaMAGNITUDE 100.0F // assume input in metre, change Area scale to decimetre
// reference appreciation: https://stackoverflow.com/questions/34697937/unique-rows-from-linearized-matrix-cuda
// edge_sort_func<T> sort the edge array in ascending order
template <typename T>
struct edge_sort_func
{
int cols;
T* data;
edge_sort_func(int _cols, T* _data) : cols(_cols),data(_data) {};
__host__ __device__
bool operator()(int c1, int c2){
for (int i = 0; i < 2; i++){
if (data[c1+i*cols] < data[c2+i*cols])
return true;
else if (data[c1+i*cols] > data[c2+i*cols])
return false;}
return false;
}
};
// reference appreciation: https://stackoverflow.com/questions/34697937/unique-rows-from-linearized-matrix-cuda
// edge_unique_func<T> unique the edge array
template <typename T>
struct edge_unique_func
{
int cols;
T* data;
edge_unique_func(int _cols, T* _data) : cols(_cols),data(_data) {};
__device__
bool operator()(int c1, int c2){
thrust::pair<T*, T*> res1 = thrust::mismatch(thrust::seq, data+c1, data+c1+1, data+c2);
if (res1.first!=data+c1+1)
return false;
else
{
thrust::pair<T*, T*> res2 = thrust::mismatch(thrust::seq, data+cols+c1, data+cols+c1+1, data+cols+c2);
return (res2.first==data+cols+c1+1);
}
}
};
__device__ void NormalizeCrossProductDim3(const float* vecA, const float* vecB, float* vecCross)
{
vecCross[0] = vecA[1]*vecB[2] - vecA[2]*vecB[1];
vecCross[1] = vecA[2]*vecB[0] - vecA[0]*vecB[2],
vecCross[2] = vecA[0]*vecB[1] - vecA[1]*vecB[0];
float length = std::sqrt(vecCross[0]*vecCross[0] + vecCross[1]*vecCross[1] + vecCross[2]*vecCross[2]);
length = length>1e-20f?length:1e-20f;
vecCross[0] /= length; vecCross[1] /= length; vecCross[2] /= length;
}
__device__ void CrossProductDim3(const float* vecA, const float* vecB, float* vecCross)
{
vecCross[0] = vecA[1]*vecB[2] - vecA[2]*vecB[1];
vecCross[1] = vecA[2]*vecB[0] - vecA[0]*vecB[2],
vecCross[2] = vecA[0]*vecB[1] - vecA[1]*vecB[0];
}
__device__ float DotProductDim3(const float* vecA, const float* vecB)
{
return (vecA[0]*vecB[0] + vecA[1]*vecB[1] + vecA[2]*vecB[2]);
}
__device__ float RrefDim3(const float A[3][3], const float b[3], float invA[3][3], float xyz[3])
{
float Mat[3][7];
for(int row=0;row<3;row++)
{
for (int col=0;col<3;col++)
{
Mat[row][col] = A[row][col];
if (row==col)
Mat[row][col+4] = 1;
else
Mat[row][col+4] = 0;
}
}
Mat[0][3] = b[0]; Mat[1][3] = b[1]; Mat[2][3] = b[2];
// compute infinity norm of A
float normA=0;
for(int row=0;row<3;row++)
{
float rowSum = 0;
for(int col=0;col<3;col++)
rowSum += std::abs(Mat[row][col]);
if (normA<rowSum)
normA = rowSum;
}
// matlab EPS of 'single' datatype is 1.1920929e-07, of 'double' is 2.220446049250313e-16
const int m=3, n=7;
float tol = myEPS*n*normA;
int i=0, j=0;
while ((i<m) && (j<n))
{
// Find value and index of largest element in the remainder of column j.
float p=std::abs(Mat[i][j]);
int k=i;
for(int row=i+1;row<m;row++)
{
if (p<std::abs(Mat[row][j]))
{
p=std::abs(Mat[row][j]);
k = row;
}
}
if (p <= tol)
{
// The column is negligible, zero it out.
for(int row=i;row<m;row++)
Mat[row][j] = 0;
j = j + 1;
}
else
{
// Swap i-th and k-th rows.
float temp[n] = {0};
for(int col=j;col<n;col++)
temp[col] = Mat[i][col];
for(int col=j;col<n;col++)
{
Mat[i][col] = Mat[k][col];
Mat[k][col] = temp[col];
}
// Divide the pivot row by the pivot element.
float pivot = Mat[i][j];
for(int col=j;col<n;col++)
{
Mat[i][col] = Mat[i][col]/pivot;
}
// Subtract multiples of the pivot row from all the other rows.
for(int row=0;row<i;row++)
{
const float value = Mat[row][j];
for(int col=j;col<n;col++)
Mat[row][col] -= (value*Mat[i][col]);
}
for(int row=i+1;row<m;row++)
{
const float value = Mat[row][j];
for(int col=j;col<n;col++)
Mat[row][col] -= (value*Mat[i][col]);
}
i++;
j++;
}
}
xyz[0] = -Mat[0][3]; xyz[1] = -Mat[1][3]; xyz[2] = -Mat[2][3];
for(int row=0;row<3;row++)
for(int col=0;col<3;col++)
invA[row][col] = Mat[row][col+4];
// infinity norm of the inverse of A
float normInvA=0;
for(int row=0;row<3;row++)
{
float rowSum = 0;
for(int col=0;col<3;col++)
rowSum += std::abs(invA[row][col]);
if (normInvA<rowSum)
normInvA = rowSum;
}
float rcond = 1/(normA*normInvA);
return rcond;
}
__device__ float ComputeError(const float xyz[3], const float A[3][3], const float b[3], const float& c)
{
float cost = ((xyz[0]*A[0][0] + xyz[1]*A[1][0] + xyz[2]*A[2][0])*xyz[0]
+ (xyz[0]*A[0][1] + xyz[1]*A[1][1] + xyz[2]*A[2][1])*xyz[1]
+ (xyz[0]*A[0][2] + xyz[1]*A[1][2] + xyz[2]*A[2][2])*xyz[2])
+ (xyz[0]*b[0] + xyz[1]*b[1] + xyz[2]*b[2])*2
+ c;
return cost;
}
// compute Quadric = (A, b, c) = [ q11, q12, q13, q14;
// q12, q22, q23, q24;
// q13, q23, q33, q34;
// q14, q24, q34, q44; ]
__device__ void GenerateQuadrics(const float* normal, const float& d, float* Q)
{
Q[0] = normal[0]*normal[0]; Q[1] = normal[0]*normal[1]; Q[2] = normal[0]*normal[2]; // q11, q12, q13
Q[3] = normal[1]*normal[1]; Q[4] = normal[1]*normal[2]; Q[5] = normal[2]*normal[2]; // q22, q23, q33
Q[6] = normal[0]*d; Q[7] = normal[1]*d; Q[8] = normal[2]*d; // q14, q24, q34
Q[9] = d*d; // q44
}
__device__ void AddQuadrics(const float* Q1, const float* Q2, float* Q)
{
for(int i=0;i<10;i++)
Q[i] = Q1[i] + Q2[i];
}
__global__ void initVertexQuadrics(int Nf, const bool useArea, const int* faceIn,
const float* planeIn, float* vertexQuadric)
{
int fcIdx = blockIdx.x*blockDim.x + threadIdx.x; // global face index in the batch
if (fcIdx<Nf) // index must be in the legal range
{
// geometric information of the triangular face
const float* normal = &planeIn[5*fcIdx]; // [0,1,2]
float d = planeIn[5*fcIdx+3];
float area = planeIn[5*fcIdx+4]*areaMAGNITUDE;
float Q[10];
GenerateQuadrics(normal, d, Q);
// weighting value for the Quadric computation
float wgtArea = 1.0f;
if (useArea) {wgtArea = area/3;}
// accumulate Quadrics for each vertex
for (int k=0; k<3; k++)
{
int vtIdx = faceIn[3*fcIdx+k]; // assume: already global vertex index in the batch
for(int it=0;it<10;it++)
atomicAdd(&vertexQuadric[10*vtIdx+it], Q[it]*wgtArea);
}
}
}
// one column for one edge
__global__ void extractEdges(int Nf, const int* faceIn, int* edgeOut)
{
int fcIdx = blockIdx.x*blockDim.x + threadIdx.x; // global face index in the batch
if (fcIdx<Nf) // index must be in the legal range
{
// we store global vertex index for the edge such that applying thrust to them at once
// One face for 3 edges, therefore the length of edges will be 3 times of faces'.
// 3 edges, 9 integers including face index to be stored
int Ne = 3*Nf; // the total number of edges in the batch
int v1 = faceIn[3*fcIdx];
int v2 = faceIn[3*fcIdx+1];
int v3 = faceIn[3*fcIdx+2];
if (v1<=v2)
{
edgeOut[3*fcIdx] = v1; // in each edge, the vertex has its global index in the batch
edgeOut[3*fcIdx+Ne] = v2;
edgeOut[3*fcIdx+Ne*2] = fcIdx; // record mapping between face index in the batch
} else{
edgeOut[3*fcIdx] = v2;
edgeOut[3*fcIdx+Ne] = v1;
edgeOut[3*fcIdx+Ne*2] = fcIdx;
}
if (v1<=v3)
{
edgeOut[3*fcIdx+1] = v1;
edgeOut[3*fcIdx+1+Ne] = v3;
edgeOut[3*fcIdx+1+Ne*2] = fcIdx;
} else{
edgeOut[3*fcIdx+1] = v3;
edgeOut[3*fcIdx+1+Ne] = v1;
edgeOut[3*fcIdx+1+Ne*2] = fcIdx;
}
if (v2<=v3)
{
edgeOut[3*fcIdx+2] = v2;
edgeOut[3*fcIdx+2+Ne] = v3;
edgeOut[3*fcIdx+2+Ne*2] = fcIdx;
} else{
edgeOut[3*fcIdx+2] = v3;
edgeOut[3*fcIdx+2+Ne] = v2;
edgeOut[3*fcIdx+2+Ne*2] = fcIdx;
}
}
}
// Ne is the total number of edges in the batch
__global__ void addBoundaryQuadrics(const int Ne, const bool useArea, const float wgtBnd,
const float* vertexIn, const float* planeIn, const int* edgeOut,
const int* sortedEdgeIdx, float* vertexQuadric)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x; // global index of the current thread
if (idx<Ne) // edge index must be within [0,Ne-1]
{
int curr_vi = edgeOut[sortedEdgeIdx[idx]];
int curr_vj = edgeOut[sortedEdgeIdx[idx]+Ne];
// boundary edge initialized as true, if the same edge exist in its neighboring
// update it to be false
bool isBndEdge = true;
if (idx>0 && isBndEdge)
{
int prev_vi = edgeOut[sortedEdgeIdx[idx-1]];
int prev_vj = edgeOut[sortedEdgeIdx[idx-1]+Ne];
if (curr_vi==prev_vi && curr_vj==prev_vj)
{
isBndEdge = false; // not boundary edge
}
}
if (idx<(Ne-1) && isBndEdge)
{
int next_vi = edgeOut[sortedEdgeIdx[idx+1]];
int next_vj = edgeOut[sortedEdgeIdx[idx+1]+Ne];
if (curr_vi==next_vi && curr_vj==next_vj)
{
isBndEdge = false; // not boundary edge
}
}
if (isBndEdge) // if the current edge is boundary edge
{
int fcIdx = edgeOut[sortedEdgeIdx[idx]+Ne*2]; // get the corresponding global face index
// geometric information of the triangular face
const float* normal = &planeIn[5*fcIdx]; //[0,1,2]=[nx,ny,nz]
const float* xyz_vi = &vertexIn[curr_vi*3]; //[0,1,2]=[ x, y, z]
const float* xyz_vj = &vertexIn[curr_vj*3]; //[0,1,2]=[ x, y, z]
float Dxyz[3] = {xyz_vj[0]-xyz_vi[0], xyz_vj[1]-xyz_vi[1], xyz_vj[2]-xyz_vi[2]};
float bndNormal[3];
NormalizeCrossProductDim3(normal, Dxyz, bndNormal);
float d = -DotProductDim3(bndNormal, xyz_vi);
float bndArea = DotProductDim3(Dxyz, Dxyz)*areaMAGNITUDE;
if (useArea)
bndArea *= wgtBnd;
else
bndArea = wgtBnd;
float Q[10];
GenerateQuadrics(bndNormal, d, Q);
// accumulate Quadrics for edge vertices vi, vj
for(int it=0;it<10;it++)
{
atomicAdd(&vertexQuadric[10*curr_vi+it], Q[it]*bndArea);
atomicAdd(&vertexQuadric[10*curr_vj+it], Q[it]*bndArea);
}
}
}
}
// note the candidate edges are unique: arrangement of `edgeCost' corresponds to `uniqueEdgeIdx'
__global__ void computeEdgeCost(int uniqueNe, int Ne, const float wgtConsist, const float* vertexIn, const int* edgeOut,
const int* uniqueEdgeIdx, const float* vertexQuadric, float* edgeCost)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x; // global edge (cost) index in the batch
if (idx<uniqueNe) // in the range [0,numUniqueEdges-1]
{
int vi = edgeOut[uniqueEdgeIdx[idx]];
int vj = edgeOut[uniqueEdgeIdx[idx]+Ne];
// add Quadrics of vi and vj
float Q[10]; //(A,b,c)
AddQuadrics(&vertexQuadric[10*vi], &vertexQuadric[10*vj], Q);
// redundant copying for EXPLICITY
float A[3][3] = {Q[0], Q[1], Q[2], Q[1], Q[3], Q[4], Q[2], Q[4], Q[5]};
float b[3] = {Q[6], Q[7], Q[8]};
float c = Q[9];
float invA[3][3];
float opt_xyz[3];
float rcondA = RrefDim3(A, b, invA, opt_xyz);
if (rcondA>rcondEPS) // A is well invertible
{
edgeCost[idx] = ComputeError(opt_xyz, A, b, c);
}
else // A is a singular matrix
{
float errI = ComputeError(&vertexIn[vi*3], A, b, c);
float errJ = ComputeError(&vertexIn[vj*3], A, b, c);
edgeCost[idx] = errI<errJ?errI:errJ;
}
}
}
// accumulate cluster Quadrics,
__global__ void VertexClusterQuadrics(int B, const int* nvIn, const int* vtReplace, const float* vertexIn,
float* vertexQuadric, float* vertexOut)
{
int vj = blockIdx.x*blockDim.x + threadIdx.x; // old (global) vertex index in the batch
int Nv = nvIn[B-1];
if (vj<Nv && vtReplace[vj]<0) // in the legal vertex range
{
int vi = -vtReplace[vj]; // new-replaced (global) vertex index in the batch
for(int it=0;it<10;it++) // accumulate vertex Quadrics
atomicAdd(&vertexQuadric[10*vi+it], vertexQuadric[10*vj+it]);
for(int it=0;it<3;it++) // accumulate vertex XYZ
atomicAdd(&vertexOut[3*vi+it], vertexIn[3*vj+it]);
}
}
// compute optimal contracted location of output vertex in each cluster
__global__ void VertexClusterContraction(int B, const int* nvIn, const int* vtReplace, const float* vertexIn,
float* vertexQuadric, float* vertexOut)
{
int vj = blockIdx.x*blockDim.x + threadIdx.x; // old (global) vertex index in the batch
int Nv = nvIn[B-1];
if (vj<Nv) // in the legal vertex range
{
// for numerical stable of the optimal location, we use average of xyz
if (vtReplace[vj]>0) // vertex to be contracted to
{
for(int it=0;it<3;it++) // accumulate vertex XYZ
vertexOut[3*vj+it] = (vertexOut[3*vj+it]+vertexIn[3*vj+it])/(vtReplace[vj]+1);
}
if (vtReplace[vj]==0) // left out vertex, forms singular cluster, copy from original
{
for(int it=0;it<3;it++) // accumulate vertex XYZ
vertexOut[3*vj+it] = vertexIn[3*vj+it];
}
}
}
__global__ void labelDegenerateTriangles(int B, const int* mfIn, const int* vtReplace,
const float* vertexOut, const int* faceIn,
int* faceOut, bool* isDegenerate, int* isKept, int* mfOut)
{
int fcIdx = blockIdx.x*blockDim.x + threadIdx.x; // global face index in the batch
int Nf = mfIn[B-1];
if (fcIdx<Nf) // index must be in the legal range
{
// get sample index in the batch accessing by the current thread
int batIdx;
for(int it=0;it<B;it++)
{
if (fcIdx < mfIn[it])
{
batIdx = it;
break;
}
}
// old v1, v2, v3
int v1 = faceIn[3*fcIdx];
int v2 = faceIn[3*fcIdx+1];
int v3 = faceIn[3*fcIdx+2];
// new v1, v2, v3
if (vtReplace[v1]<0) v1 = -vtReplace[v1];
if (vtReplace[v2]<0) v2 = -vtReplace[v2];
if (vtReplace[v3]<0) v3 = -vtReplace[v3];
// update face list: vtReplace[.] is global vertex index in the batch BEFORE decimation
// vtMap[.] is global vertex index in the batch AFTER decimation
faceOut[3*fcIdx] = v1; //vtMap[v1];
faceOut[3*fcIdx+1] = v2; //vtMap[v2];
faceOut[3*fcIdx+2] = v3; //vtMap[v3];
if (v1==v2 || v1==v3 || v2==v3)
{
isDegenerate[fcIdx] = true;
//isKept[v1] = 0; isKept[v2] = 0; isKept[v3] = 0;
}
else
{
atomicAdd(&mfOut[batIdx],1);
isKept[v1] = 1; isKept[v2] = 1; isKept[v3] = 1;
}
// const float* xyz_v1 = &vertexOut[3*v1];
// const float* xyz_v2 = &vertexOut[3*v2];
// const float* xyz_v3 = &vertexOut[3*v3];
// float D21[3] = {xyz_v2[0]-xyz_v1[0], xyz_v2[1]-xyz_v1[1], xyz_v2[2]-xyz_v1[2]};
// float D31[3] = {xyz_v3[0]-xyz_v1[0], xyz_v3[1]-xyz_v1[1], xyz_v3[2]-xyz_v1[2]};
// float D32[3] = {xyz_v3[0]-xyz_v2[0], xyz_v3[1]-xyz_v2[1], xyz_v3[2]-xyz_v2[2]};
//
// float new_raw_normal[3]; // un-normalized normal
// CrossProductDim3(D21, D31, new_raw_normal);
// float Ln = sqrt(DotProductDim3(new_raw_normal, new_raw_normal)); // new_area = Ln/2;
// float L[3] = { sqrt(DotProductDim3(D21, D21)),
// sqrt(DotProductDim3(D31, D31)),
// sqrt(DotProductDim3(D32, D32)) };
//
// float temp = max(max(L[0]*L[1], max(L[0]*L[2], L[1]*L[2])), 1e-20f);
// float min_sin_theta = Ln/temp;
//
// if (min_sin_theta < minSinTheta) // minSinTheta=0.1392, e.g. 8 degree as threshold
// isDegenerate[fcIdx] = true;
// else
// atomicAdd(&mfOut[batIdx],1);
}
}
__global__ void sizeofOutput(const int B, const int* nvIn, const int* vtMap, int* nvOut)
{
for(int it=0;it<B;it++)
{
if (it>0)
nvOut[it] = vtMap[nvIn[it]-1] - vtMap[nvIn[it-1]-1];
else
nvOut[it] = vtMap[nvIn[it]-1];
}
}
__global__ void getIOmap(int Nv, const int* isKept, int* vtMap)
{
int vi = blockIdx.x*blockDim.x + threadIdx.x;
if (vi < Nv)
{
if(isKept[vi]==1)
vtMap[vi] -= 1; // index start from 0
else
vtMap[vi] = -1; // NOTE: vtReplace[vi]>=0 may also get vtMap[vi]=-1, because they are deleted in the vertex contraction
}
}
__global__ void updateFaces(int Nf, const int* vtMap, const bool* isDegenerate, int* faceOut)
{
int fcIdx = blockIdx.x*blockDim.x + threadIdx.x; // global face index in the batch
if (fcIdx<Nf && !isDegenerate[fcIdx]) // index must be in the legal range
{
// old (v1, v2, v3): indices in the input
int v1 = faceOut[3*fcIdx];
int v2 = faceOut[3*fcIdx+1];
int v3 = faceOut[3*fcIdx+2];
// new (v1, v2, v3): indices in the output
faceOut[3*fcIdx] = vtMap[v1];
faceOut[3*fcIdx+1] = vtMap[v2];
faceOut[3*fcIdx+2] = vtMap[v3];
}
}
__global__ void shuffleEdgeIndices(const int uniqueNe, const int bandWidth, const int shuffleBins,
const int* seedIdx, const int* edgeIdx, int* shufIdx)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<uniqueNe)
{
int binIdx = int(idx/bandWidth);
int eid = idx;
if (binIdx<shuffleBins)
{
int remainder = idx%bandWidth;
eid = seedIdx[remainder] + binIdx*bandWidth;
}
shufIdx[idx] = edgeIdx[eid];
}
}
__host__ void clusterVertices(const int Ne, const int uniqueNe, const int vtNum, const int startIdx,
const int* edgeOut, const int* edgeIdx, const int nv2Remove, int* vtReplace)
{
int vtRemoved = 0;
std::vector<char> Covered(vtNum,0);
// vertex clustering of time complexity O(n)+O(n)
for(int i=0;i<uniqueNe;i++)
{
int vi = edgeOut[edgeIdx[i]];
int vj = edgeOut[edgeIdx[i]+Ne];
if (vtRemoved>=nv2Remove)
continue;
if(Covered[vi-startIdx]==0 && Covered[vj-startIdx]==0) // both vertices of the edge are not covered
{
vtReplace[vi] = -vj; // negative contracted vertex index
vtReplace[vj]++; // cluster size except the point itself
Covered[vi-startIdx] = 1; // 1 for seed vertex
Covered[vj-startIdx] = 1; // 1 for seed vertex
vtRemoved++;
}
}
for(int i=0;i<uniqueNe;i++)
{
int vk1 = edgeOut[edgeIdx[i]];
int vk2 = edgeOut[edgeIdx[i]+Ne];
if (vtRemoved>=nv2Remove)
continue;
if(Covered[vk1-startIdx]>0 && Covered[vk2-startIdx]>0) // both vertices of the edge are covered
continue;
if (Covered[vk2-startIdx]==1) // only `vk2' is in seed vertex pair
{
// swap `vk1' and `vk2'
int temp = vk1;
vk1 = vk2;
vk2 = temp;
}
if (vtReplace[vk1]<0)
vk1 = -vtReplace[vk1];
vtReplace[vk2] = -vk1;
vtReplace[vk1]++;
if (vtReplace[vk1]<0)
std::cout<<"error clustering.\n";
Covered[vk2-startIdx] = 2; // 2 for non-seed vertex
vtRemoved++;
}
}
// NOTE: each row of array 'planIn' is composed of [normal=[nx,ny,nz],intercept=d,area]
void meshDecimationLauncher(const int shuffleBins, const bool useArea, const float wgtBnd, const float wgtConsist, //hyperparams
const int B, const int Nv, const int Nf, const int* nvIn, const int* mfIn, //inputs
const int* nv2Remove, const float* vertexIn, const int* faceIn, const float* planeIn, //inputs
int* nvOut, int* mfOut, float* vertexOut, int* faceOut, int* vtReplace, int* vtMap, //ouputs
bool* isDegenerate)
{
// copy data from devide to host
int* h_nvIn = new int[B];
int* h_mfIn = new int[B];
int* h_nv2Remove = new int[B];
hipMemcpy(h_nvIn, nvIn, B*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_mfIn, mfIn, B*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(h_nv2Remove, nv2Remove, B*sizeof(int), hipMemcpyDeviceToHost);
// Initialize per-vertex Quadric on GPU in parallel
float* vertexQuadric;
int numGrid = int(Nf/1024) + 1; // Nf is the total number of faces in the batch
hipMalloc(&vertexQuadric, (10*Nv)*sizeof(float));
hipMemset(vertexQuadric, 0, (10*Nv)*sizeof(float)); // initialize all to zeros
hipLaunchKernelGGL(( initVertexQuadrics), dim3(numGrid),dim3(1024), 0, 0, Nf, useArea, faceIn, planeIn, vertexQuadric);
//hipDeviceSynchronize();
// Extract edges from face list: resulted shape (3,Ne)
int nRows = 3; // edge(v1,v2) + faceIdx: 3 integers
int Ne = 3*Nf; // total number of edges in the batch
int* edgeOut;
hipMalloc(&edgeOut, (nRows*Ne)*sizeof(int));
hipLaunchKernelGGL(( extractEdges), dim3(numGrid),dim3(1024), 0, 0, Nf, faceIn, edgeOut);
//hipDeviceSynchronize();
// sort the edges
thrust::device_vector<int> edgeIdx(Ne);
thrust::sequence(edgeIdx.begin(), edgeIdx.end());
thrust::sort(edgeIdx.begin(), edgeIdx.end(), edge_sort_func<int>(Ne, edgeOut));
//add additional boundary Quadric
if (wgtBnd>0)
{
numGrid = int(Ne/1024) + 1;
hipLaunchKernelGGL(( addBoundaryQuadrics), dim3(numGrid),dim3(1024), 0, 0, Ne, useArea, wgtBnd, vertexIn, planeIn, edgeOut,
thrust::raw_pointer_cast(edgeIdx.data()), vertexQuadric);
//hipDeviceSynchronize();
}
int h_vtReplace[Nv] = {0};
int* h_edgeOut = new int[2*Ne];
hipMemcpy(h_edgeOut, edgeOut, 2*Ne*sizeof(int), hipMemcpyDeviceToHost);
srand(time(0)); // setting seed of random_shuffle as current time
int beginIdx=0, endIdx;
for(int b=0;b<B;b++)
{
endIdx = 3*h_mfIn[b];
// get unique edges using thrust:unique on GPU, which forms the candidate vertex pairs
// IMPORTANT: edgeOut is still in size (3,Ne), it's just the first uniquNe columns
// forms the unique edges
int uniqueNe = thrust::unique(thrust::device, edgeIdx.data()+beginIdx, edgeIdx.data()+endIdx,
edge_unique_func<int>(Ne, edgeOut)) - (edgeIdx.data()+beginIdx);
// Compute Quadric cost of each unique edge
float* edgeCost;
numGrid = int(uniqueNe/1024) + 1;
hipMalloc(&edgeCost, uniqueNe*sizeof(float));
hipLaunchKernelGGL(( computeEdgeCost), dim3(numGrid),dim3(1024), 0, 0, uniqueNe, Ne, wgtConsist, vertexIn, edgeOut,
thrust::raw_pointer_cast(edgeIdx.data()+beginIdx),
vertexQuadric, edgeCost);
//hipDeviceSynchronize();
// sorting unique edges based on their cost using thrust::sort_by_key
// edges of different samples will be mixedly ordered
thrust::sort_by_key(thrust::device, edgeCost, edgeCost+uniqueNe, edgeIdx.data()+beginIdx);
// partition the sorted edges into #the cl bins, shuffle edges in each bin
// according to the seed indices in one bin
int* h_edgeIdx = new int[uniqueNe];
if (uniqueNe>shuffleBins)
{
int bandWidth = int(uniqueNe/shuffleBins);
thrust::device_vector<int> seedIdx(bandWidth);
thrust::sequence(seedIdx.begin(), seedIdx.end());
thrust::host_vector<int> h_seedIdx = seedIdx;
std::random_shuffle(h_seedIdx.begin(), h_seedIdx.end());
seedIdx = h_seedIdx;
thrust::device_vector<int> shufIdx(uniqueNe);
hipLaunchKernelGGL(( shuffleEdgeIndices), dim3(numGrid),dim3(1024), 0, 0, uniqueNe, bandWidth, shuffleBins,
thrust::raw_pointer_cast(seedIdx.data()),
thrust::raw_pointer_cast(edgeIdx.data()+beginIdx),
thrust::raw_pointer_cast(shufIdx.data()));
hipMemcpy(h_edgeIdx, thrust::raw_pointer_cast(shufIdx.data()),
uniqueNe*sizeof(int), hipMemcpyDeviceToHost);
}
else
{
hipMemcpy(h_edgeIdx, thrust::raw_pointer_cast(edgeIdx.data()+beginIdx),
uniqueNe*sizeof(int), hipMemcpyDeviceToHost);
}
// Vertex cluster: seed/disjoint pair generation on CPU with conditional-loops
int prevNum = 0;
if(b>0) prevNum = h_nvIn[b-1];
const int vtNum = h_nvIn[b] - prevNum;
clusterVertices(Ne, uniqueNe, vtNum, prevNum, h_edgeOut, &h_edgeIdx[0],
h_nv2Remove[b], h_vtReplace);
beginIdx = endIdx;
hipFree(edgeCost);
delete[] h_edgeIdx;
}
hipMemcpy(vtReplace, h_vtReplace, Nv*sizeof(int), hipMemcpyHostToDevice);
// Vertex cluster contraction on GPU in parallel, and compute nvOut
numGrid = int(Nv/1024) + 1;
hipLaunchKernelGGL(( VertexClusterQuadrics), dim3(numGrid),dim3(1024), 0, 0, B, nvIn, vtReplace, vertexIn, vertexQuadric, vertexOut);
hipLaunchKernelGGL(( VertexClusterContraction), dim3(numGrid),dim3(1024), 0, 0, B, nvIn, vtReplace, vertexIn, vertexQuadric, vertexOut);
//hipDeviceSynchronize();
// Label degenerate faces(w/o silver triangles), and compute mfOut
int* isKept;
hipMalloc(&isKept, Nv*sizeof(int));
hipMemset(isKept,0,Nv*sizeof(int));
numGrid = int(Nf/1024) + 1;
hipLaunchKernelGGL(( labelDegenerateTriangles), dim3(numGrid),dim3(1024), 0, 0, B, mfIn, vtReplace, vertexOut, faceIn, faceOut,
isDegenerate, isKept, mfOut);
//hipDeviceSynchronize();
// update vertex indices(e.g. vtMap), some of them are not existing any more because of degenerate faces
hipMemcpy(vtMap, isKept, Nv*sizeof(int), hipMemcpyDeviceToDevice);
thrust::inclusive_scan(thrust::device, vtMap, vtMap+Nv, vtMap);
hipLaunchKernelGGL(( sizeofOutput), dim3(1),dim3(1), 0, 0, B, nvIn, vtMap, nvOut);
numGrid = int(Nv/1024) + 1;
hipLaunchKernelGGL(( getIOmap), dim3(numGrid),dim3(1024), 0, 0, Nv, isKept, vtMap);
//hipDeviceSynchronize();
// update vertex indices of the triangles
numGrid = int(Nf/1024) + 1;
hipLaunchKernelGGL(( updateFaces), dim3(numGrid),dim3(1024), 0, 0, Nf, vtMap, isDegenerate, faceOut);
//hipDeviceSynchronize();
// free the cpu and gpu memory
hipFree(vertexQuadric);
hipFree(edgeOut);
hipFree(isKept);
delete[] h_nvIn;
delete[] h_mfIn;
delete[] h_nv2Remove;
delete[] h_edgeOut;
}
__global__ void update_repB(const int nvA, int* repA, int* mapA, int* repB)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<nvA)
{
int vi = idx;
int vo = mapA[vi];
if (vo>=0 && repB[vo]>=0)
repB[vo] = vi;
}
}
__global__ void update_repA(const int nvA, int* repA, int* mapA, int* repB)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<nvA)
{
int vi = idx;
if (repA[vi]<0)
vi = -repA[vi];
int vo = mapA[vi];
if (vo>=0 && repB[vo]<0)
{
vo = -repB[vo];
repA[idx] = -repB[vo];
if (repB[vo]<0)
printf("update_repA Error: repB[vo]=%d<0!\n", repB[vo]);
atomicAdd(&repA[repB[vo]],1);
}
}
}
__global__ void update_mapA(const int nvA, int* repA, int* mapA, int* repB, const int* mapB)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<nvA)
{
int vi = idx;
int vo = mapA[vi];
if (vo>=0)
mapA[vi] = mapB[vo];
else
mapA[vi] = -1;
}
}
void combineClustersLauncher(const int nvA, const int nvB, const int* repA, const int* mapA,
const int* repB, const int* mapB, int* repOut, int* mapOut)
{
hipMemcpy(repOut, repA, nvA*sizeof(int), hipMemcpyDeviceToDevice);
hipMemcpy(mapOut, mapA, nvA*sizeof(int), hipMemcpyDeviceToDevice);
int* repBcopy;
hipMalloc(&repBcopy, nvB*sizeof(int));
hipMemcpy(repBcopy, repB, nvB*sizeof(int), hipMemcpyDeviceToDevice);
int numGrid = int(nvA/1024) + 1;
hipLaunchKernelGGL(( update_repB), dim3(numGrid),dim3(1024), 0, 0, nvA, repOut, mapOut, repBcopy);
hipLaunchKernelGGL(( update_repA), dim3(numGrid),dim3(1024), 0, 0, nvA, repOut, mapOut, repBcopy);
hipLaunchKernelGGL(( update_mapA), dim3(numGrid),dim3(1024), 0, 0, nvA, repOut, mapOut, repBcopy, mapB);
hipFree(repBcopy);
}
__global__ void count_vertex_adjface(int NfIn, const int* face, const int* vtMap, int* nfCount)
{
int fcIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (fcIdx < NfIn)
{
int v[3] = {face[fcIdx*3], face[fcIdx*3+1], face[fcIdx*3+2]};
for(int k=0;k<3;k++)
{
int vi = v[k];
int vo = vtMap[vi];
if (vo>=0)
atomicAdd(&nfCount[vo],1);
}
}
}
void countVertexAdjfaceLauncher(int NfIn, const int* face, const int* vtMap, int* nfCount)
{
int numGrid = int(NfIn/1024) + 1;
hipLaunchKernelGGL(( count_vertex_adjface), dim3(numGrid),dim3(1024), 0, 0, NfIn, face, vtMap, nfCount);
}
__global__ void compute_vertex_geometry(int Nf, const int* face, const float* faceGeo, float* vertexGeo)
{
int fcIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (fcIdx < Nf)
{
int v[3] = {face[fcIdx*3], face[fcIdx*3+1], face[fcIdx*3+2]};
float face_nx = faceGeo[fcIdx*4];
float face_ny = faceGeo[fcIdx*4+1];
float face_nz = faceGeo[fcIdx*4+2];
float face_A = faceGeo[fcIdx*4+3]; // area
for(int k=0;k<3;k++)
{
int vk = v[k];
atomicAdd(&vertexGeo[vk*4], face_A*face_nx);
atomicAdd(&vertexGeo[vk*4+1],face_A*face_ny);
atomicAdd(&vertexGeo[vk*4+2],face_A*face_nz);
atomicAdd(&vertexGeo[vk*4+3],face_A/3);
}
}
}
void computeVertexGeometryLauncher(int Nf, const int* face, const float* faceGeo, float* vertexGeo)
{
int numGrid = int(Nf/1024) + 1;
hipLaunchKernelGGL(( compute_vertex_geometry), dim3(numGrid),dim3(1024), 0, 0, Nf, face, faceGeo, vertexGeo);
}
| 19798554fa0df20cabd0ee5e43fd1d0eb5d26206.cu | #include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
#include <thrust/mismatch.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cmath> // sqrtf
#include <algorithm> // std::random_shuffle
#include <cstdlib> // std::rand, std::srand
#include <cassert>
#define myEPS 2.220446049250313e-16F
#define rcondEPS 1e-6F
#define minSinTheta 0.001F
#define areaMAGNITUDE 100.0F // assume input in metre, change Area scale to decimetre
// reference appreciation: https://stackoverflow.com/questions/34697937/unique-rows-from-linearized-matrix-cuda
// edge_sort_func<T> sort the edge array in ascending order
template <typename T>
struct edge_sort_func
{
int cols;
T* data;
edge_sort_func(int _cols, T* _data) : cols(_cols),data(_data) {};
__host__ __device__
bool operator()(int c1, int c2){
for (int i = 0; i < 2; i++){
if (data[c1+i*cols] < data[c2+i*cols])
return true;
else if (data[c1+i*cols] > data[c2+i*cols])
return false;}
return false;
}
};
// reference appreciation: https://stackoverflow.com/questions/34697937/unique-rows-from-linearized-matrix-cuda
// edge_unique_func<T> unique the edge array
template <typename T>
struct edge_unique_func
{
int cols;
T* data;
edge_unique_func(int _cols, T* _data) : cols(_cols),data(_data) {};
__device__
bool operator()(int c1, int c2){
thrust::pair<T*, T*> res1 = thrust::mismatch(thrust::seq, data+c1, data+c1+1, data+c2);
if (res1.first!=data+c1+1)
return false;
else
{
thrust::pair<T*, T*> res2 = thrust::mismatch(thrust::seq, data+cols+c1, data+cols+c1+1, data+cols+c2);
return (res2.first==data+cols+c1+1);
}
}
};
__device__ void NormalizeCrossProductDim3(const float* vecA, const float* vecB, float* vecCross)
{
vecCross[0] = vecA[1]*vecB[2] - vecA[2]*vecB[1];
vecCross[1] = vecA[2]*vecB[0] - vecA[0]*vecB[2],
vecCross[2] = vecA[0]*vecB[1] - vecA[1]*vecB[0];
float length = std::sqrt(vecCross[0]*vecCross[0] + vecCross[1]*vecCross[1] + vecCross[2]*vecCross[2]);
length = length>1e-20f?length:1e-20f;
vecCross[0] /= length; vecCross[1] /= length; vecCross[2] /= length;
}
__device__ void CrossProductDim3(const float* vecA, const float* vecB, float* vecCross)
{
vecCross[0] = vecA[1]*vecB[2] - vecA[2]*vecB[1];
vecCross[1] = vecA[2]*vecB[0] - vecA[0]*vecB[2],
vecCross[2] = vecA[0]*vecB[1] - vecA[1]*vecB[0];
}
__device__ float DotProductDim3(const float* vecA, const float* vecB)
{
return (vecA[0]*vecB[0] + vecA[1]*vecB[1] + vecA[2]*vecB[2]);
}
__device__ float RrefDim3(const float A[3][3], const float b[3], float invA[3][3], float xyz[3])
{
float Mat[3][7];
for(int row=0;row<3;row++)
{
for (int col=0;col<3;col++)
{
Mat[row][col] = A[row][col];
if (row==col)
Mat[row][col+4] = 1;
else
Mat[row][col+4] = 0;
}
}
Mat[0][3] = b[0]; Mat[1][3] = b[1]; Mat[2][3] = b[2];
// compute infinity norm of A
float normA=0;
for(int row=0;row<3;row++)
{
float rowSum = 0;
for(int col=0;col<3;col++)
rowSum += std::abs(Mat[row][col]);
if (normA<rowSum)
normA = rowSum;
}
// matlab EPS of 'single' datatype is 1.1920929e-07, of 'double' is 2.220446049250313e-16
const int m=3, n=7;
float tol = myEPS*n*normA;
int i=0, j=0;
while ((i<m) && (j<n))
{
// Find value and index of largest element in the remainder of column j.
float p=std::abs(Mat[i][j]);
int k=i;
for(int row=i+1;row<m;row++)
{
if (p<std::abs(Mat[row][j]))
{
p=std::abs(Mat[row][j]);
k = row;
}
}
if (p <= tol)
{
// The column is negligible, zero it out.
for(int row=i;row<m;row++)
Mat[row][j] = 0;
j = j + 1;
}
else
{
// Swap i-th and k-th rows.
float temp[n] = {0};
for(int col=j;col<n;col++)
temp[col] = Mat[i][col];
for(int col=j;col<n;col++)
{
Mat[i][col] = Mat[k][col];
Mat[k][col] = temp[col];
}
// Divide the pivot row by the pivot element.
float pivot = Mat[i][j];
for(int col=j;col<n;col++)
{
Mat[i][col] = Mat[i][col]/pivot;
}
// Subtract multiples of the pivot row from all the other rows.
for(int row=0;row<i;row++)
{
const float value = Mat[row][j];
for(int col=j;col<n;col++)
Mat[row][col] -= (value*Mat[i][col]);
}
for(int row=i+1;row<m;row++)
{
const float value = Mat[row][j];
for(int col=j;col<n;col++)
Mat[row][col] -= (value*Mat[i][col]);
}
i++;
j++;
}
}
xyz[0] = -Mat[0][3]; xyz[1] = -Mat[1][3]; xyz[2] = -Mat[2][3];
for(int row=0;row<3;row++)
for(int col=0;col<3;col++)
invA[row][col] = Mat[row][col+4];
// infinity norm of the inverse of A
float normInvA=0;
for(int row=0;row<3;row++)
{
float rowSum = 0;
for(int col=0;col<3;col++)
rowSum += std::abs(invA[row][col]);
if (normInvA<rowSum)
normInvA = rowSum;
}
float rcond = 1/(normA*normInvA);
return rcond;
}
__device__ float ComputeError(const float xyz[3], const float A[3][3], const float b[3], const float& c)
{
float cost = ((xyz[0]*A[0][0] + xyz[1]*A[1][0] + xyz[2]*A[2][0])*xyz[0]
+ (xyz[0]*A[0][1] + xyz[1]*A[1][1] + xyz[2]*A[2][1])*xyz[1]
+ (xyz[0]*A[0][2] + xyz[1]*A[1][2] + xyz[2]*A[2][2])*xyz[2])
+ (xyz[0]*b[0] + xyz[1]*b[1] + xyz[2]*b[2])*2
+ c;
return cost;
}
// compute Quadric = (A, b, c) = [ q11, q12, q13, q14;
// q12, q22, q23, q24;
// q13, q23, q33, q34;
// q14, q24, q34, q44; ]
__device__ void GenerateQuadrics(const float* normal, const float& d, float* Q)
{
Q[0] = normal[0]*normal[0]; Q[1] = normal[0]*normal[1]; Q[2] = normal[0]*normal[2]; // q11, q12, q13
Q[3] = normal[1]*normal[1]; Q[4] = normal[1]*normal[2]; Q[5] = normal[2]*normal[2]; // q22, q23, q33
Q[6] = normal[0]*d; Q[7] = normal[1]*d; Q[8] = normal[2]*d; // q14, q24, q34
Q[9] = d*d; // q44
}
__device__ void AddQuadrics(const float* Q1, const float* Q2, float* Q)
{
for(int i=0;i<10;i++)
Q[i] = Q1[i] + Q2[i];
}
__global__ void initVertexQuadrics(int Nf, const bool useArea, const int* faceIn,
const float* planeIn, float* vertexQuadric)
{
int fcIdx = blockIdx.x*blockDim.x + threadIdx.x; // global face index in the batch
if (fcIdx<Nf) // index must be in the legal range
{
// geometric information of the triangular face
const float* normal = &planeIn[5*fcIdx]; // [0,1,2]
float d = planeIn[5*fcIdx+3];
float area = planeIn[5*fcIdx+4]*areaMAGNITUDE;
float Q[10];
GenerateQuadrics(normal, d, Q);
// weighting value for the Quadric computation
float wgtArea = 1.0f;
if (useArea) {wgtArea = area/3;}
// accumulate Quadrics for each vertex
for (int k=0; k<3; k++)
{
int vtIdx = faceIn[3*fcIdx+k]; // assume: already global vertex index in the batch
for(int it=0;it<10;it++)
atomicAdd(&vertexQuadric[10*vtIdx+it], Q[it]*wgtArea);
}
}
}
// one column for one edge
__global__ void extractEdges(int Nf, const int* faceIn, int* edgeOut)
{
int fcIdx = blockIdx.x*blockDim.x + threadIdx.x; // global face index in the batch
if (fcIdx<Nf) // index must be in the legal range
{
// we store global vertex index for the edge such that applying thrust to them at once
// One face for 3 edges, therefore the length of edges will be 3 times of faces'.
// 3 edges, 9 integers including face index to be stored
int Ne = 3*Nf; // the total number of edges in the batch
int v1 = faceIn[3*fcIdx];
int v2 = faceIn[3*fcIdx+1];
int v3 = faceIn[3*fcIdx+2];
if (v1<=v2)
{
edgeOut[3*fcIdx] = v1; // in each edge, the vertex has its global index in the batch
edgeOut[3*fcIdx+Ne] = v2;
edgeOut[3*fcIdx+Ne*2] = fcIdx; // record mapping between face index in the batch
} else{
edgeOut[3*fcIdx] = v2;
edgeOut[3*fcIdx+Ne] = v1;
edgeOut[3*fcIdx+Ne*2] = fcIdx;
}
if (v1<=v3)
{
edgeOut[3*fcIdx+1] = v1;
edgeOut[3*fcIdx+1+Ne] = v3;
edgeOut[3*fcIdx+1+Ne*2] = fcIdx;
} else{
edgeOut[3*fcIdx+1] = v3;
edgeOut[3*fcIdx+1+Ne] = v1;
edgeOut[3*fcIdx+1+Ne*2] = fcIdx;
}
if (v2<=v3)
{
edgeOut[3*fcIdx+2] = v2;
edgeOut[3*fcIdx+2+Ne] = v3;
edgeOut[3*fcIdx+2+Ne*2] = fcIdx;
} else{
edgeOut[3*fcIdx+2] = v3;
edgeOut[3*fcIdx+2+Ne] = v2;
edgeOut[3*fcIdx+2+Ne*2] = fcIdx;
}
}
}
// Ne is the total number of edges in the batch
__global__ void addBoundaryQuadrics(const int Ne, const bool useArea, const float wgtBnd,
const float* vertexIn, const float* planeIn, const int* edgeOut,
const int* sortedEdgeIdx, float* vertexQuadric)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x; // global index of the current thread
if (idx<Ne) // edge index must be within [0,Ne-1]
{
int curr_vi = edgeOut[sortedEdgeIdx[idx]];
int curr_vj = edgeOut[sortedEdgeIdx[idx]+Ne];
// boundary edge initialized as true, if the same edge exist in its neighboring
// update it to be false
bool isBndEdge = true;
if (idx>0 && isBndEdge)
{
int prev_vi = edgeOut[sortedEdgeIdx[idx-1]];
int prev_vj = edgeOut[sortedEdgeIdx[idx-1]+Ne];
if (curr_vi==prev_vi && curr_vj==prev_vj)
{
isBndEdge = false; // not boundary edge
}
}
if (idx<(Ne-1) && isBndEdge)
{
int next_vi = edgeOut[sortedEdgeIdx[idx+1]];
int next_vj = edgeOut[sortedEdgeIdx[idx+1]+Ne];
if (curr_vi==next_vi && curr_vj==next_vj)
{
isBndEdge = false; // not boundary edge
}
}
if (isBndEdge) // if the current edge is boundary edge
{
int fcIdx = edgeOut[sortedEdgeIdx[idx]+Ne*2]; // get the corresponding global face index
// geometric information of the triangular face
const float* normal = &planeIn[5*fcIdx]; //[0,1,2]=[nx,ny,nz]
const float* xyz_vi = &vertexIn[curr_vi*3]; //[0,1,2]=[ x, y, z]
const float* xyz_vj = &vertexIn[curr_vj*3]; //[0,1,2]=[ x, y, z]
float Dxyz[3] = {xyz_vj[0]-xyz_vi[0], xyz_vj[1]-xyz_vi[1], xyz_vj[2]-xyz_vi[2]};
float bndNormal[3];
NormalizeCrossProductDim3(normal, Dxyz, bndNormal);
float d = -DotProductDim3(bndNormal, xyz_vi);
float bndArea = DotProductDim3(Dxyz, Dxyz)*areaMAGNITUDE;
if (useArea)
bndArea *= wgtBnd;
else
bndArea = wgtBnd;
float Q[10];
GenerateQuadrics(bndNormal, d, Q);
// accumulate Quadrics for edge vertices vi, vj
for(int it=0;it<10;it++)
{
atomicAdd(&vertexQuadric[10*curr_vi+it], Q[it]*bndArea);
atomicAdd(&vertexQuadric[10*curr_vj+it], Q[it]*bndArea);
}
}
}
}
// note the candidate edges are unique: arrangement of `edgeCost' corresponds to `uniqueEdgeIdx'
__global__ void computeEdgeCost(int uniqueNe, int Ne, const float wgtConsist, const float* vertexIn, const int* edgeOut,
const int* uniqueEdgeIdx, const float* vertexQuadric, float* edgeCost)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x; // global edge (cost) index in the batch
if (idx<uniqueNe) // in the range [0,numUniqueEdges-1]
{
int vi = edgeOut[uniqueEdgeIdx[idx]];
int vj = edgeOut[uniqueEdgeIdx[idx]+Ne];
// add Quadrics of vi and vj
float Q[10]; //(A,b,c)
AddQuadrics(&vertexQuadric[10*vi], &vertexQuadric[10*vj], Q);
// redundant copying for EXPLICITY
float A[3][3] = {Q[0], Q[1], Q[2], Q[1], Q[3], Q[4], Q[2], Q[4], Q[5]};
float b[3] = {Q[6], Q[7], Q[8]};
float c = Q[9];
float invA[3][3];
float opt_xyz[3];
float rcondA = RrefDim3(A, b, invA, opt_xyz);
if (rcondA>rcondEPS) // A is well invertible
{
edgeCost[idx] = ComputeError(opt_xyz, A, b, c);
}
else // A is a singular matrix
{
float errI = ComputeError(&vertexIn[vi*3], A, b, c);
float errJ = ComputeError(&vertexIn[vj*3], A, b, c);
edgeCost[idx] = errI<errJ?errI:errJ;
}
}
}
// accumulate cluster Quadrics,
__global__ void VertexClusterQuadrics(int B, const int* nvIn, const int* vtReplace, const float* vertexIn,
float* vertexQuadric, float* vertexOut)
{
int vj = blockIdx.x*blockDim.x + threadIdx.x; // old (global) vertex index in the batch
int Nv = nvIn[B-1];
if (vj<Nv && vtReplace[vj]<0) // in the legal vertex range
{
int vi = -vtReplace[vj]; // new-replaced (global) vertex index in the batch
for(int it=0;it<10;it++) // accumulate vertex Quadrics
atomicAdd(&vertexQuadric[10*vi+it], vertexQuadric[10*vj+it]);
for(int it=0;it<3;it++) // accumulate vertex XYZ
atomicAdd(&vertexOut[3*vi+it], vertexIn[3*vj+it]);
}
}
// compute optimal contracted location of output vertex in each cluster
__global__ void VertexClusterContraction(int B, const int* nvIn, const int* vtReplace, const float* vertexIn,
float* vertexQuadric, float* vertexOut)
{
int vj = blockIdx.x*blockDim.x + threadIdx.x; // old (global) vertex index in the batch
int Nv = nvIn[B-1];
if (vj<Nv) // in the legal vertex range
{
// for numerical stable of the optimal location, we use average of xyz
if (vtReplace[vj]>0) // vertex to be contracted to
{
for(int it=0;it<3;it++) // accumulate vertex XYZ
vertexOut[3*vj+it] = (vertexOut[3*vj+it]+vertexIn[3*vj+it])/(vtReplace[vj]+1);
}
if (vtReplace[vj]==0) // left out vertex, forms singular cluster, copy from original
{
for(int it=0;it<3;it++) // accumulate vertex XYZ
vertexOut[3*vj+it] = vertexIn[3*vj+it];
}
}
}
__global__ void labelDegenerateTriangles(int B, const int* mfIn, const int* vtReplace,
const float* vertexOut, const int* faceIn,
int* faceOut, bool* isDegenerate, int* isKept, int* mfOut)
{
int fcIdx = blockIdx.x*blockDim.x + threadIdx.x; // global face index in the batch
int Nf = mfIn[B-1];
if (fcIdx<Nf) // index must be in the legal range
{
// get sample index in the batch accessing by the current thread
int batIdx;
for(int it=0;it<B;it++)
{
if (fcIdx < mfIn[it])
{
batIdx = it;
break;
}
}
// old v1, v2, v3
int v1 = faceIn[3*fcIdx];
int v2 = faceIn[3*fcIdx+1];
int v3 = faceIn[3*fcIdx+2];
// new v1, v2, v3
if (vtReplace[v1]<0) v1 = -vtReplace[v1];
if (vtReplace[v2]<0) v2 = -vtReplace[v2];
if (vtReplace[v3]<0) v3 = -vtReplace[v3];
// update face list: vtReplace[.] is global vertex index in the batch BEFORE decimation
// vtMap[.] is global vertex index in the batch AFTER decimation
faceOut[3*fcIdx] = v1; //vtMap[v1];
faceOut[3*fcIdx+1] = v2; //vtMap[v2];
faceOut[3*fcIdx+2] = v3; //vtMap[v3];
if (v1==v2 || v1==v3 || v2==v3)
{
isDegenerate[fcIdx] = true;
//isKept[v1] = 0; isKept[v2] = 0; isKept[v3] = 0;
}
else
{
atomicAdd(&mfOut[batIdx],1);
isKept[v1] = 1; isKept[v2] = 1; isKept[v3] = 1;
}
// const float* xyz_v1 = &vertexOut[3*v1];
// const float* xyz_v2 = &vertexOut[3*v2];
// const float* xyz_v3 = &vertexOut[3*v3];
// float D21[3] = {xyz_v2[0]-xyz_v1[0], xyz_v2[1]-xyz_v1[1], xyz_v2[2]-xyz_v1[2]};
// float D31[3] = {xyz_v3[0]-xyz_v1[0], xyz_v3[1]-xyz_v1[1], xyz_v3[2]-xyz_v1[2]};
// float D32[3] = {xyz_v3[0]-xyz_v2[0], xyz_v3[1]-xyz_v2[1], xyz_v3[2]-xyz_v2[2]};
//
// float new_raw_normal[3]; // un-normalized normal
// CrossProductDim3(D21, D31, new_raw_normal);
// float Ln = sqrt(DotProductDim3(new_raw_normal, new_raw_normal)); // new_area = Ln/2;
// float L[3] = { sqrt(DotProductDim3(D21, D21)),
// sqrt(DotProductDim3(D31, D31)),
// sqrt(DotProductDim3(D32, D32)) };
//
// float temp = max(max(L[0]*L[1], max(L[0]*L[2], L[1]*L[2])), 1e-20f);
// float min_sin_theta = Ln/temp;
//
// if (min_sin_theta < minSinTheta) // minSinTheta=0.1392, e.g. 8 degree as threshold
// isDegenerate[fcIdx] = true;
// else
// atomicAdd(&mfOut[batIdx],1);
}
}
__global__ void sizeofOutput(const int B, const int* nvIn, const int* vtMap, int* nvOut)
{
for(int it=0;it<B;it++)
{
if (it>0)
nvOut[it] = vtMap[nvIn[it]-1] - vtMap[nvIn[it-1]-1];
else
nvOut[it] = vtMap[nvIn[it]-1];
}
}
__global__ void getIOmap(int Nv, const int* isKept, int* vtMap)
{
int vi = blockIdx.x*blockDim.x + threadIdx.x;
if (vi < Nv)
{
if(isKept[vi]==1)
vtMap[vi] -= 1; // index start from 0
else
vtMap[vi] = -1; // NOTE: vtReplace[vi]>=0 may also get vtMap[vi]=-1, because they are deleted in the vertex contraction
}
}
__global__ void updateFaces(int Nf, const int* vtMap, const bool* isDegenerate, int* faceOut)
{
int fcIdx = blockIdx.x*blockDim.x + threadIdx.x; // global face index in the batch
if (fcIdx<Nf && !isDegenerate[fcIdx]) // index must be in the legal range
{
// old (v1, v2, v3): indices in the input
int v1 = faceOut[3*fcIdx];
int v2 = faceOut[3*fcIdx+1];
int v3 = faceOut[3*fcIdx+2];
// new (v1, v2, v3): indices in the output
faceOut[3*fcIdx] = vtMap[v1];
faceOut[3*fcIdx+1] = vtMap[v2];
faceOut[3*fcIdx+2] = vtMap[v3];
}
}
__global__ void shuffleEdgeIndices(const int uniqueNe, const int bandWidth, const int shuffleBins,
const int* seedIdx, const int* edgeIdx, int* shufIdx)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<uniqueNe)
{
int binIdx = int(idx/bandWidth);
int eid = idx;
if (binIdx<shuffleBins)
{
int remainder = idx%bandWidth;
eid = seedIdx[remainder] + binIdx*bandWidth;
}
shufIdx[idx] = edgeIdx[eid];
}
}
__host__ void clusterVertices(const int Ne, const int uniqueNe, const int vtNum, const int startIdx,
const int* edgeOut, const int* edgeIdx, const int nv2Remove, int* vtReplace)
{
int vtRemoved = 0;
std::vector<char> Covered(vtNum,0);
// vertex clustering of time complexity O(n)+O(n)
for(int i=0;i<uniqueNe;i++)
{
int vi = edgeOut[edgeIdx[i]];
int vj = edgeOut[edgeIdx[i]+Ne];
if (vtRemoved>=nv2Remove)
continue;
if(Covered[vi-startIdx]==0 && Covered[vj-startIdx]==0) // both vertices of the edge are not covered
{
vtReplace[vi] = -vj; // negative contracted vertex index
vtReplace[vj]++; // cluster size except the point itself
Covered[vi-startIdx] = 1; // 1 for seed vertex
Covered[vj-startIdx] = 1; // 1 for seed vertex
vtRemoved++;
}
}
for(int i=0;i<uniqueNe;i++)
{
int vk1 = edgeOut[edgeIdx[i]];
int vk2 = edgeOut[edgeIdx[i]+Ne];
if (vtRemoved>=nv2Remove)
continue;
if(Covered[vk1-startIdx]>0 && Covered[vk2-startIdx]>0) // both vertices of the edge are covered
continue;
if (Covered[vk2-startIdx]==1) // only `vk2' is in seed vertex pair
{
// swap `vk1' and `vk2'
int temp = vk1;
vk1 = vk2;
vk2 = temp;
}
if (vtReplace[vk1]<0)
vk1 = -vtReplace[vk1];
vtReplace[vk2] = -vk1;
vtReplace[vk1]++;
if (vtReplace[vk1]<0)
std::cout<<"error clustering.\n";
Covered[vk2-startIdx] = 2; // 2 for non-seed vertex
vtRemoved++;
}
}
// NOTE: each row of array 'planIn' is composed of [normal=[nx,ny,nz],intercept=d,area]
void meshDecimationLauncher(const int shuffleBins, const bool useArea, const float wgtBnd, const float wgtConsist, //hyperparams
const int B, const int Nv, const int Nf, const int* nvIn, const int* mfIn, //inputs
const int* nv2Remove, const float* vertexIn, const int* faceIn, const float* planeIn, //inputs
int* nvOut, int* mfOut, float* vertexOut, int* faceOut, int* vtReplace, int* vtMap, //ouputs
bool* isDegenerate)
{
// copy data from devide to host
int* h_nvIn = new int[B];
int* h_mfIn = new int[B];
int* h_nv2Remove = new int[B];
cudaMemcpy(h_nvIn, nvIn, B*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_mfIn, mfIn, B*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_nv2Remove, nv2Remove, B*sizeof(int), cudaMemcpyDeviceToHost);
// Initialize per-vertex Quadric on GPU in parallel
float* vertexQuadric;
int numGrid = int(Nf/1024) + 1; // Nf is the total number of faces in the batch
cudaMalloc(&vertexQuadric, (10*Nv)*sizeof(float));
cudaMemset(vertexQuadric, 0, (10*Nv)*sizeof(float)); // initialize all to zeros
initVertexQuadrics<<<numGrid,1024>>>(Nf, useArea, faceIn, planeIn, vertexQuadric);
//cudaDeviceSynchronize();
// Extract edges from face list: resulted shape (3,Ne)
int nRows = 3; // edge(v1,v2) + faceIdx: 3 integers
int Ne = 3*Nf; // total number of edges in the batch
int* edgeOut;
cudaMalloc(&edgeOut, (nRows*Ne)*sizeof(int));
extractEdges<<<numGrid,1024>>>(Nf, faceIn, edgeOut);
//cudaDeviceSynchronize();
// sort the edges
thrust::device_vector<int> edgeIdx(Ne);
thrust::sequence(edgeIdx.begin(), edgeIdx.end());
thrust::sort(edgeIdx.begin(), edgeIdx.end(), edge_sort_func<int>(Ne, edgeOut));
//add additional boundary Quadric
if (wgtBnd>0)
{
numGrid = int(Ne/1024) + 1;
addBoundaryQuadrics<<<numGrid,1024>>>(Ne, useArea, wgtBnd, vertexIn, planeIn, edgeOut,
thrust::raw_pointer_cast(edgeIdx.data()), vertexQuadric);
//cudaDeviceSynchronize();
}
int h_vtReplace[Nv] = {0};
int* h_edgeOut = new int[2*Ne];
cudaMemcpy(h_edgeOut, edgeOut, 2*Ne*sizeof(int), cudaMemcpyDeviceToHost);
srand(time(0)); // setting seed of random_shuffle as current time
int beginIdx=0, endIdx;
for(int b=0;b<B;b++)
{
endIdx = 3*h_mfIn[b];
// get unique edges using thrust:unique on GPU, which forms the candidate vertex pairs
// IMPORTANT: edgeOut is still in size (3,Ne), it's just the first uniquNe columns
// forms the unique edges
int uniqueNe = thrust::unique(thrust::device, edgeIdx.data()+beginIdx, edgeIdx.data()+endIdx,
edge_unique_func<int>(Ne, edgeOut)) - (edgeIdx.data()+beginIdx);
// Compute Quadric cost of each unique edge
float* edgeCost;
numGrid = int(uniqueNe/1024) + 1;
cudaMalloc(&edgeCost, uniqueNe*sizeof(float));
computeEdgeCost<<<numGrid,1024>>>(uniqueNe, Ne, wgtConsist, vertexIn, edgeOut,
thrust::raw_pointer_cast(edgeIdx.data()+beginIdx),
vertexQuadric, edgeCost);
//cudaDeviceSynchronize();
// sorting unique edges based on their cost using thrust::sort_by_key
// edges of different samples will be mixedly ordered
thrust::sort_by_key(thrust::device, edgeCost, edgeCost+uniqueNe, edgeIdx.data()+beginIdx);
// partition the sorted edges into #the cl bins, shuffle edges in each bin
// according to the seed indices in one bin
int* h_edgeIdx = new int[uniqueNe];
if (uniqueNe>shuffleBins)
{
int bandWidth = int(uniqueNe/shuffleBins);
thrust::device_vector<int> seedIdx(bandWidth);
thrust::sequence(seedIdx.begin(), seedIdx.end());
thrust::host_vector<int> h_seedIdx = seedIdx;
std::random_shuffle(h_seedIdx.begin(), h_seedIdx.end());
seedIdx = h_seedIdx;
thrust::device_vector<int> shufIdx(uniqueNe);
shuffleEdgeIndices<<<numGrid,1024>>>(uniqueNe, bandWidth, shuffleBins,
thrust::raw_pointer_cast(seedIdx.data()),
thrust::raw_pointer_cast(edgeIdx.data()+beginIdx),
thrust::raw_pointer_cast(shufIdx.data()));
cudaMemcpy(h_edgeIdx, thrust::raw_pointer_cast(shufIdx.data()),
uniqueNe*sizeof(int), cudaMemcpyDeviceToHost);
}
else
{
cudaMemcpy(h_edgeIdx, thrust::raw_pointer_cast(edgeIdx.data()+beginIdx),
uniqueNe*sizeof(int), cudaMemcpyDeviceToHost);
}
// Vertex cluster: seed/disjoint pair generation on CPU with conditional-loops
int prevNum = 0;
if(b>0) prevNum = h_nvIn[b-1];
const int vtNum = h_nvIn[b] - prevNum;
clusterVertices(Ne, uniqueNe, vtNum, prevNum, h_edgeOut, &h_edgeIdx[0],
h_nv2Remove[b], h_vtReplace);
beginIdx = endIdx;
cudaFree(edgeCost);
delete[] h_edgeIdx;
}
cudaMemcpy(vtReplace, h_vtReplace, Nv*sizeof(int), cudaMemcpyHostToDevice);
// Vertex cluster contraction on GPU in parallel, and compute nvOut
numGrid = int(Nv/1024) + 1;
VertexClusterQuadrics<<<numGrid,1024>>>(B, nvIn, vtReplace, vertexIn, vertexQuadric, vertexOut);
VertexClusterContraction<<<numGrid,1024>>>(B, nvIn, vtReplace, vertexIn, vertexQuadric, vertexOut);
//cudaDeviceSynchronize();
// Label degenerate faces(w/o silver triangles), and compute mfOut
int* isKept;
cudaMalloc(&isKept, Nv*sizeof(int));
cudaMemset(isKept,0,Nv*sizeof(int));
numGrid = int(Nf/1024) + 1;
labelDegenerateTriangles<<<numGrid,1024>>>(B, mfIn, vtReplace, vertexOut, faceIn, faceOut,
isDegenerate, isKept, mfOut);
//cudaDeviceSynchronize();
// update vertex indices(e.g. vtMap), some of them are not existing any more because of degenerate faces
cudaMemcpy(vtMap, isKept, Nv*sizeof(int), cudaMemcpyDeviceToDevice);
thrust::inclusive_scan(thrust::device, vtMap, vtMap+Nv, vtMap);
sizeofOutput<<<1,1>>>(B, nvIn, vtMap, nvOut);
numGrid = int(Nv/1024) + 1;
getIOmap<<<numGrid,1024>>>(Nv, isKept, vtMap);
//cudaDeviceSynchronize();
// update vertex indices of the triangles
numGrid = int(Nf/1024) + 1;
updateFaces<<<numGrid,1024>>>(Nf, vtMap, isDegenerate, faceOut);
//cudaDeviceSynchronize();
// free the cpu and gpu memory
cudaFree(vertexQuadric);
cudaFree(edgeOut);
cudaFree(isKept);
delete[] h_nvIn;
delete[] h_mfIn;
delete[] h_nv2Remove;
delete[] h_edgeOut;
}
__global__ void update_repB(const int nvA, int* repA, int* mapA, int* repB)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<nvA)
{
int vi = idx;
int vo = mapA[vi];
if (vo>=0 && repB[vo]>=0)
repB[vo] = vi;
}
}
__global__ void update_repA(const int nvA, int* repA, int* mapA, int* repB)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<nvA)
{
int vi = idx;
if (repA[vi]<0)
vi = -repA[vi];
int vo = mapA[vi];
if (vo>=0 && repB[vo]<0)
{
vo = -repB[vo];
repA[idx] = -repB[vo];
if (repB[vo]<0)
printf("update_repA Error: repB[vo]=%d<0!\n", repB[vo]);
atomicAdd(&repA[repB[vo]],1);
}
}
}
__global__ void update_mapA(const int nvA, int* repA, int* mapA, int* repB, const int* mapB)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx<nvA)
{
int vi = idx;
int vo = mapA[vi];
if (vo>=0)
mapA[vi] = mapB[vo];
else
mapA[vi] = -1;
}
}
void combineClustersLauncher(const int nvA, const int nvB, const int* repA, const int* mapA,
const int* repB, const int* mapB, int* repOut, int* mapOut)
{
cudaMemcpy(repOut, repA, nvA*sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(mapOut, mapA, nvA*sizeof(int), cudaMemcpyDeviceToDevice);
int* repBcopy;
cudaMalloc(&repBcopy, nvB*sizeof(int));
cudaMemcpy(repBcopy, repB, nvB*sizeof(int), cudaMemcpyDeviceToDevice);
int numGrid = int(nvA/1024) + 1;
update_repB<<<numGrid,1024>>>(nvA, repOut, mapOut, repBcopy);
update_repA<<<numGrid,1024>>>(nvA, repOut, mapOut, repBcopy);
update_mapA<<<numGrid,1024>>>(nvA, repOut, mapOut, repBcopy, mapB);
cudaFree(repBcopy);
}
__global__ void count_vertex_adjface(int NfIn, const int* face, const int* vtMap, int* nfCount)
{
int fcIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (fcIdx < NfIn)
{
int v[3] = {face[fcIdx*3], face[fcIdx*3+1], face[fcIdx*3+2]};
for(int k=0;k<3;k++)
{
int vi = v[k];
int vo = vtMap[vi];
if (vo>=0)
atomicAdd(&nfCount[vo],1);
}
}
}
void countVertexAdjfaceLauncher(int NfIn, const int* face, const int* vtMap, int* nfCount)
{
int numGrid = int(NfIn/1024) + 1;
count_vertex_adjface<<<numGrid,1024>>>(NfIn, face, vtMap, nfCount);
}
__global__ void compute_vertex_geometry(int Nf, const int* face, const float* faceGeo, float* vertexGeo)
{
int fcIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (fcIdx < Nf)
{
int v[3] = {face[fcIdx*3], face[fcIdx*3+1], face[fcIdx*3+2]};
float face_nx = faceGeo[fcIdx*4];
float face_ny = faceGeo[fcIdx*4+1];
float face_nz = faceGeo[fcIdx*4+2];
float face_A = faceGeo[fcIdx*4+3]; // area
for(int k=0;k<3;k++)
{
int vk = v[k];
atomicAdd(&vertexGeo[vk*4], face_A*face_nx);
atomicAdd(&vertexGeo[vk*4+1],face_A*face_ny);
atomicAdd(&vertexGeo[vk*4+2],face_A*face_nz);
atomicAdd(&vertexGeo[vk*4+3],face_A/3);
}
}
}
void computeVertexGeometryLauncher(int Nf, const int* face, const float* faceGeo, float* vertexGeo)
{
int numGrid = int(Nf/1024) + 1;
compute_vertex_geometry<<<numGrid,1024>>>(Nf, face, faceGeo, vertexGeo);
}
|
27c8aaa3af9e38a0f0a931dc437a2d16d5e8b4a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
A GPU implementation of Andersen's analysis
Copyright (c) 2012 The University of Texas at Austin
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA, or see <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html>.
Author: Mario Mendez-Lojo
*/
#include "andersen.h"
#include <thrust/adjacent_difference.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/scan.h>
#include <thrust/unique.h>
#include <thrust/functional.h>
#include <thrust/remove.h>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
using namespace thrust;
__constant__ uint __storeStart__;
__constant__ uint __loadInvStart__;
/**
* number of variables of the input program.
*/
__constant__ uint __numVars__;
__constant__ uint* __ptsConstraints__;
__constant__ uint __numPtsConstraints__;
__constant__ uint* __copyConstraints__;
__constant__ uint __numCopyConstraints__;
__constant__ uint* __loadConstraints__;
__constant__ uint __numLoadConstraints__;
__constant__ uint* __storeConstraints__;
__constant__ uint __numStoreConstraints__;
__device__ uint __numStore__ = 0;
__constant__ uint* __gepInv__;
__constant__ uint __numGepInv__;
__constant__ uint* __size__;
__constant__ uint* __initialRep__;
__constant__ uint* __initialNonRep__;
__constant__ uint __numInitialRep__;
__constant__ uint* __nextVar__;
/**
* Table of indexes to the information inferred by HCD.
* Each entry is a pair (index, index + delta) that refers to __hcdTable__
*/
__constant__ uint* __hcdIndex__;
__constant__ uint __numHcdIndex__;
/**
* List of pairs (y, x_0, x_(delta - 2)) where pts(*y) = pts(x_0) = ... pts(x_((delta - 2))
* The equivalences have been detected during the offline phase of HCD, executed in the CPU
*/
__constant__ uint* __hcdTable__;
__constant__ uint __numHcdTable__;
/**
* Representative array
*/
__constant__ volatile uint* __rep__; // HAS to be volatile
/**
* array of elements containing all the edges in the graph.
*/
__constant__ volatile uint* __edges__; // HAS to be volatile
__constant__ uint* __graph__;
__constant__ uint* __lock__;
__constant__ uint* __key__;
__constant__ uint* __val__;
__constant__ uint* __keyAux__;
__device__ uint __numKeysCounter__ = 0;
__device__ uint __numKeys__;
__constant__ uint* __currPtsHead__;
__device__ uint __counter__ = 0;
__device__ uint __max__ = 0;
__device__ uint __min__ = 0;
__device__ bool __done__ = true;
__device__ uint __error__;
__device__ uint __worklistIndex0__ = 0;
__device__ uint __worklistIndex1__ = 1;
uint createTime = 0;
//////////// utility functions for the GPU /////////
__device__ uint __errorCode__ = 0;
__device__ uint __errorLine__ = 0;
__device__ char* __errorMsg__;
__device__ inline uint nextPowerOfTwo(uint v) {
return 1U << (uintSize * 8 - __clz(v - 1));
}
__device__ inline uint __count(int predicate) {
const uint ballot = __ballot(predicate);
return __popc(ballot);
}
__device__ inline uint isFirstThreadOfWarp(){
return !threadIdx.x;
}
__device__ inline uint getWarpIdInGrid(){
return (blockIdx.x * (blockDim.x * blockDim.y / WARP_SIZE) + threadIdx.y);
}
__device__ inline uint isFirstWarpOfGrid(){
return !(blockIdx.x || threadIdx.y);
}
__device__ inline uint isFirstWarpOfBlock(){
return !threadIdx.y;
}
__device__ inline uint getThreadIdInBlock(){
return mul32(threadIdx.y) + threadIdx.x;
}
__device__ inline uint isFirstThreadOfBlock(){
return !getThreadIdInBlock();
}
__device__ inline uint getThreadIdInGrid(){
return mul32(getWarpIdInGrid()) + threadIdx.x;
}
__device__ inline uint getThreadsPerBlock() {
return blockDim.x * blockDim.y;
}
__device__ inline uint isLastThreadOfBlock(){
return getThreadIdInBlock() == getThreadsPerBlock() - 1;
}
__device__ inline uint getWarpsPerBlock() {
return blockDim.y;
}
__device__ inline uint getWarpsPerGrid() {
return blockDim.y * gridDim.x;
}
__device__ inline uint getThreadsPerGrid() {
return mul32(getWarpsPerGrid());
}
__device__ inline uint getBlockIdInGrid(){
return blockIdx.x;
}
__device__ inline uint getBlocksPerGrid(){
return gridDim.x;
}
__device__ void syncAllThreads() {
__syncthreads();
uint to = getBlocksPerGrid() - 1;
if (isFirstThreadOfBlock()) {
volatile uint* counter = &__counter__;
if (atomicInc((uint*) counter, to) < to) {
while (*counter); // spinning...
}
}
__syncthreads();
}
__device__ uint getValAtThread(volatile uint* const _shared_, const uint myVal, const uint i) {
if (threadIdx.x == i) {
_shared_[threadIdx.y] = myVal;
}
return _shared_[threadIdx.y];
}
__device__ uint getValAtThread(const uint myVal, const uint i) {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (threadIdx.x == i) {
_shared_[threadIdx.y] = myVal;
}
return _shared_[threadIdx.y];
}
/*
* Forward declarations
*/
__device__ void insertAll(const uint storeIndex, uint* _shared_, uint numFrom, bool sort = true);
template<uint toRel, uint fromRel>
__device__ void unionAll(const uint to, uint* _shared_, uint numFrom, bool sort = true);
template<uint toRel, uint fromRel>
__device__ void map(const uint to, const uint base, const uint myBits, uint* _shared_,
uint& numFrom);
__device__ inline uint mul960(uint num) {
// 960 = 1024 - 64
return (num << 10) - (num << 6);
}
__device__ inline uint __graphGet__(const uint row, const uint col) {
return __edges__[row + col];
}
__device__ inline uint __graphGet__(const uint pos) {
return __graph__[pos];
}
__device__ inline void __graphSet__(const uint row, const uint col, const uint val) {
__edges__[row + col] = val;
}
__device__ inline void __graphSet__(const uint pos, const uint val) {
__graph__[pos] = val;
}
__device__ inline uint _sharedGet_(volatile uint* _shared_, uint index, uint offset) {
return _shared_[index + offset];
}
__device__ inline void _sharedSet_(volatile uint* _shared_, uint index, uint offset, uint val) {
_shared_[index + offset] = val;
}
__device__ inline uint getHeadIndex(uint var, uint rel){
if (rel == NEXT_DIFF_PTS) {
return NEXT_DIFF_PTS_START - mul32(var);
}
if (rel == COPY_INV) {
return COPY_INV_START + mul32(var);
}
if (rel == CURR_DIFF_PTS) {
return CURR_DIFF_PTS_START - mul32(var);
}
if (rel == PTS) {
return mul32(var);
}
if (rel == STORE) {
return __storeStart__ + mul32(var);
}
// it has to be LOAD_INV, right?
return __loadInvStart__ + mul32(var);
}
__device__ inline uint getNextDiffPtsHeadIndex(uint var){
return NEXT_DIFF_PTS_START - mul32(var);
}
__device__ inline uint getCopyInvHeadIndex(uint var){
return COPY_INV_START + mul32(var);
}
__device__ inline uint getCurrDiffPtsHeadIndex(uint var){
return CURR_DIFF_PTS_START - mul32(var);
}
__device__ inline uint getPtsHeadIndex(uint var){
return mul32(var);
}
__device__ inline uint getStoreHeadIndex(uint var){
return __storeStart__ + mul32(var);
}
__device__ inline uint getLoadInvHeadIndex(uint var){
return __loadInvStart__ + mul32(var);
}
__device__ inline int isEmpty(uint var, uint rel) {
const uint headIndex = getHeadIndex(var, rel);
return __graphGet__(headIndex, BASE) == NIL;
}
/**
* Mask that tells whether the variables contained in an element have size > offset
* There is one such mask per offset.
* stored in compressed format
*/
__constant__ uint* __offsetMask__;
/**
* Number of rows needed to represent the mask of ONE offset.
* = ceil(numObjectVars / DST_PER_ELEMENT), since non-object pointers have size 1.
*/
__constant__ uint __offsetMaskRowsPerOffset__;
__device__ inline uint __offsetMaskGet__(const uint base, const uint col, const uint offset) {
return __offsetMask__[mul32((offset - 1) * __offsetMaskRowsPerOffset__ + base) + col];
}
__device__ inline void __offsetMaskSet__(const uint base, const uint col, const uint offset,
const uint val) {
__offsetMask__[mul32((offset - 1) * __offsetMaskRowsPerOffset__ + base) + col] = val;
}
/**
* Mask that tells whether the pts-to of an element changed.
* the BASE and NEXT words are always equal to 0
* stored in compressed format
*/
__constant__ uint* __diffPtsMask__;
__device__ inline uint __diffPtsMaskGet__(const uint base, const uint col) {
return __diffPtsMask__[mul32(base) + col];
}
__device__ inline void __diffPtsMaskSet__(const uint base, const uint col, const uint val) {
__diffPtsMask__[mul32(base) + col] = val;
}
/**
* Index of the next free element in the corresponding free list.
* The index is given in words, not bytes or number of elements.
*/
__device__ uint __ptsFreeList__,__nextDiffPtsFreeList__, __currDiffPtsFreeList__, __otherFreeList__;
__device__ inline uint mallocPts(uint size = ELEMENT_WIDTH) {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicAdd(&__ptsFreeList__, size);
}
return _shared_[threadIdx.y];
}
__device__ inline uint mallocNextDiffPts() {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicSub(&__nextDiffPtsFreeList__, ELEMENT_WIDTH);
}
return _shared_[threadIdx.y];
}
__device__ inline uint mallocCurrDiffPts() {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicSub(&__currDiffPtsFreeList__, ELEMENT_WIDTH);
}
return _shared_[threadIdx.y];
}
__device__ inline uint mallocOther() {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicAdd(&__otherFreeList__, ELEMENT_WIDTH);
}
return _shared_[threadIdx.y];
}
__device__ inline uint mallocIn(uint rel) {
if (rel == NEXT_DIFF_PTS) {
return mallocNextDiffPts();
}
if (rel >= COPY_INV) {
return mallocOther();
}
if (rel == PTS) {
return mallocPts();
}
if (rel == CURR_DIFF_PTS) {
return mallocCurrDiffPts();
}
//printf("WTF! (%u)", rel);
return 0;
}
/**
* Get and increment the current worklist index
* Granularity: warp
* @param delta Number of elements to be retrieved at once
* @return Worklist index 'i'. All the work items in the [i, i + delta) interval are guaranteed
* to be assigned to the current warp.
*/
__device__ inline uint getAndIncrement(const uint delta) {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicAdd(&__worklistIndex0__, delta);
}
return _shared_[threadIdx.y];
}
__device__ inline uint getAndIncrement(uint* counter, uint delta) {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicAdd(counter, delta);
}
return _shared_[threadIdx.y];
}
/**
* Lock a given variable
* Granularity: warp
* @param var Id of the variable
* @return A non-zero value if the operation succeeded
*/
__device__ inline uint lock(const uint var) {
return __any(isFirstThreadOfWarp() && (atomicCAS(__lock__ + var, UNLOCKED, LOCKED)
== UNLOCKED));
}
/**
* Unlock a variable
* Granularity: warp or thread
* @param var Id of the variable
*/
__device__ inline void unlock(const uint var) {
__lock__[var] = UNLOCKED;
}
__device__ inline int isRep(const uint var) {
return __rep__[var] == var;
}
__device__ inline void setRep(const uint var, const uint rep) {
__rep__[var] = rep;
}
__device__ inline uint getRep(const uint var) {
return __rep__[var];
}
__device__ inline uint getRepRec(const uint var) {
uint rep = var;
uint repRep = __rep__[rep];
while (repRep != rep) {
rep = repRep;
repRep = __rep__[rep];
}
return rep;
}
__device__ ulongint recordStartTime() {
__shared__ volatile ulongint _ret_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_ret_[threadIdx.y] = clock();
}
return _ret_[threadIdx.y];
}
__device__ void recordElapsedTime(ulongint start){
if (isFirstThreadOfWarp()) {
ulongint delta;
ulongint end = clock();
if (end > start) {
delta = end - start;
} else {
delta = end + (0xffffffff - start);
}
double time = TICKS_TO_MS(delta);
printf("Block %u, Warp: %u: %8.2f ms.\n", blockIdx.x, threadIdx.y, time);
}
}
__device__ inline uint decodeWord(const uint base, const uint word, const uint bits) {
uint ret = mul960(base) + mul32(word);
return (isBitActive(bits, threadIdx.x)) ? __rep__[ret + threadIdx.x] : NIL;
}
__device__ inline void swap(volatile uint* const keyA, volatile uint* const keyB, const uint dir) {
uint n1 = *keyA;
uint n2 = *keyB;
if ((n1 < n2) != dir) {
*keyA = n2;
*keyB = n1;
}
}
// Bitonic Sort, in ascending order using one WARP
// precondition: size of _shared_ has to be a power of 2
__device__ inline void bitonicSort(volatile uint* const _shared_, const uint to) {
for (int size = 2; size <= to; size <<= 1) {
for (int stride = size / 2; stride > 0; stride >>= 1) {
for (int id = threadIdx.x; id < (to / 2); id += WARP_SIZE) {
const uint myDir = ((id & (size / 2)) == 0);
uint pos = 2 * id - mod(id, stride);
volatile uint* start = _shared_ + pos;
swap(start, start + stride, myDir);
}
}
}
}
__device__ void blockBitonicSort(volatile uint* _shared_, uint to) {
uint idInBlock = getThreadIdInBlock();
for (int size = 2; size <= to; size <<= 1) {
for (int stride = size / 2; stride > 0; stride >>= 1) {
__syncthreads();
for (int id = idInBlock; id < (to / 2); id += getThreadsPerBlock()) {
const uint myDir = ((id & (size / 2)) == 0);
uint pos = 2 * id - mod(id, stride);
volatile uint* start = _shared_ + pos;
swap(start, start + stride, myDir);
}
}
}
}
/**
* Sort an array in ascending order.
* Granularity: block
* @param _shared_ list of integers
* @param to size of the sublist we want to process
*/
__device__ void blockSort(volatile uint* _shared_, uint to) {
uint size = max(nextPowerOfTwo(to), 32);
uint id = getThreadIdInBlock();
for (int i = to + id; i < size; i += getThreadsPerBlock()) {
_shared_[i] = NIL;
}
blockBitonicSort(_shared_, size);
__syncthreads();
}
/**
* Remove duplicates on a sorted sequence, equivalent to Thrust 'unique' function but uses one warp.
* If there are NILS, they are treated like any other number
* precondition: the input list is sorted
* precondition: to >= 32
* precondition: shared_[-1] exists and is equal to NIL
* Granularity: warp
*
* @param _shared_ list of integers
* @param to size of the sublist we want to process
* @return number of unique elements in the input.
*/
__device__ inline uint unique(volatile uint* const _shared_, uint to) {
uint startPos = 0;
uint myMask = (1 << (threadIdx.x + 1)) - 1;
for (int id = threadIdx.x; id < to; id += WARP_SIZE) {
uint myVal = _shared_[id];
uint fresh = __ballot(myVal != _shared_[id - 1]);
// pos = starting position + number of 1's to my right (incl. myself) minus one
uint pos = startPos + __popc(fresh & myMask) - 1;
_shared_[pos] = myVal;
startPos += __popc(fresh);
}
return startPos;
}
__device__ uint removeDuplicates(volatile uint* const _shared_, const uint to) {
const uint size = max(nextPowerOfTwo(to), 32);
for (int i = to + threadIdx.x; i < size; i += WARP_SIZE) {
_shared_[i] = NIL;
}
bitonicSort(_shared_, size);
uint ret = unique(_shared_, size);
return (size > to) ? ret - 1 : ret;
}
__device__ void print(uint* m, const uint size) {
if (!isFirstThreadOfWarp())
return;
//printf("[");
for (int i = 0; i < size; i++) {
//printf("%u", m[i]);
if (i < size - 1) {
//printf(", ");
}
}
//printf ("]");
}
__device__ void print(int* m, const uint size) {
if (!isFirstThreadOfWarp())
return;
//printf("[");
for (int i = 0; i < size; i++) {
//printf("%d", m[i]);
if (i < size - 1) {
//printf(", ");
}
}
//printf ("]");
}
__device__ volatile uint __printBuffer__[PRINT_BUFFER_SIZE];
// TODO: assumes we print with 1 block and 1 warp...
__device__ void printElementAsSet(const uint base, volatile uint myBits, bool& first) {
for (int i = 0; i < BASE; i++) {
uint word = getValAtThread(myBits, i);
uint myDst = decodeWord(base, i, word);
for (int j = 0; j < WARP_SIZE; j++) {
uint dst = getValAtThread(myDst, j);
if (dst != NIL && isFirstThreadOfWarp()) {
if (first) {
//printf("%u", dst);
} else {
//printf(", %u", dst);
}
first = false;
}
}
}
}
__device__ void printDiffPtsMask() {
uint numVars = __numVars__;
if (isFirstThreadOfWarp()) {
//printf("DIFF_PTS_MASK: [");
}
bool first = true;
int to = ceil((float) numVars / (float) ELEMENT_CARDINALITY);
for (int base = 0; base < to; base++) {
uint myBits = __diffPtsMaskGet__(base, threadIdx.x);
printElementAsSet(base, myBits, first);
}
if (isFirstThreadOfWarp())
;//printf("]\n");
}
__global__ void __printDiffPtsMask() {
printDiffPtsMask();
}
__device__ void printOffsetMask(uint numObjectsVars, uint offset) {
if (isFirstThreadOfWarp()) {
//printf("MASK for offset %u: [", offset);
}
bool first = true;
int to = __offsetMaskRowsPerOffset__;
for (int base = 0; base < to; base++) {
uint myBits = __offsetMaskGet__(base, threadIdx.x, offset);
printElementAsSet(base, myBits, first);
}
if (isFirstThreadOfWarp())
;//printf("]\n");
}
__device__ void printOffsetMasks(uint numObjectsVars, uint maxOffset) {
if (!isFirstWarpOfGrid()) {
return;
}
for (int i = 1; i <= maxOffset; i++) {
printOffsetMask(numObjectsVars, i);
}
}
__global__ void __printOffsetMasks(uint numObjectsVars, uint maxOffset) {
printOffsetMasks(numObjectsVars, maxOffset);
}
__device__ void printElementRec(uint index) {
volatile uint myBits = __graphGet__(index, threadIdx.x);
if (__all(myBits == NIL)) {
return;
}
while (index != NIL) {
//printf("Thread: %u, value: %u\n", threadIdx.x, myBits);
index = __graphGet__(index, NEXT);
if (index != NIL) {
myBits = __graphGet__(index, threadIdx.x);
}
}
}
__device__ void printSharedElementRec(uint* volatile _shared_, uint index) {
volatile uint myBits = _sharedGet_(_shared_, index, threadIdx.x);
if (__all(myBits == NIL)) {
return;
}
while (index != NIL) {
//printf("Thread: %u, value: %u\n", threadIdx.x, myBits);
index = _sharedGet_(_shared_, index, NEXT);
if (index != NIL) {
myBits = _sharedGet_(_shared_, index, threadIdx.x);
}
}
}
__device__ void accumulate(const uint base, uint myBits, uint& numFrom, uint rel) {
uint nonEmpty = __ballot(myBits && threadIdx.x < BASE);
while (nonEmpty) {
uint pos = __ffs(nonEmpty) - 1;
nonEmpty &= (nonEmpty - 1);
uint bits = getValAtThread(myBits, pos);
uint numOnes = __popc(bits);
//cudaAssert(numFrom + numOnes > PRINT_BUFFER_SIZE);
uint var = mul960(base) + mul32(pos) + threadIdx.x;
// PTS edges: we do not use representatives. In all the other relations we do.
var = isBitActive(bits, threadIdx.x) ? (rel > CURR_DIFF_PTS ? __rep__[var] : var) : NIL;
pos = numFrom + __popc(bits & ((1 << threadIdx.x) - 1));
if (var != NIL) {
__printBuffer__[pos] = var;
}
numFrom += numOnes;
}
}
__device__ void printEdges(const uint src, const uint rel, const uint printEmptySets) {
if (isEmpty(src, rel) && !printEmptySets) {
return;
}
if (isFirstThreadOfWarp()) {
//printf("%d => [", src);
}
uint index = getHeadIndex(src, rel);
uint numFrom = 0;
do {
uint myBits = __graphGet__(index, threadIdx.x);
uint base = __graphGet__(index, BASE);
if (base == NIL) {
break;
}
index = __graphGet__(index, NEXT);
accumulate(base, myBits, numFrom, rel);
} while (index != NIL);
if (numFrom) {
if (rel > CURR_DIFF_PTS) {
numFrom = removeDuplicates(__printBuffer__, numFrom);
}
for (int i = 0; i < numFrom; i++) {
uint val = __printBuffer__[i]; // has to be non-NIL
if (isFirstThreadOfWarp()) {
if (!i) {
//printf("%u", val);
} else {
//printf(", %u", val);
}
}
}
}
if (isFirstThreadOfWarp()) {
//printf("]\n");
}
}
__device__ void printEdgesOf(const uint src, int rel) {
if (isFirstThreadOfWarp()) {
//printf("%s of ", getName(rel));
}
printEdges(src, rel, 1);
}
__device__ void printEdgesStartingAt(uint index, int rel) {
if (isFirstThreadOfWarp()) {
//printf("%s @ %u => [", getName(rel), index);
}
uint numFrom = 0;
do {
uint myBits = __graphGet__(index, threadIdx.x);
uint base = __graphGet__(index, BASE);
if (base == NIL) {
break;
}
index = __graphGet__(index, NEXT);
accumulate(base, myBits, numFrom, rel);
} while (index != NIL);
if (numFrom) {
if (rel > CURR_DIFF_PTS) {
numFrom = removeDuplicates(__printBuffer__, numFrom);
}
for (int i = 0; i < numFrom; i++) {
uint val = __printBuffer__[i]; // has to be non-NIL
if (isFirstThreadOfWarp()) {
if (!i) {
//printf("%u", val);
} else {
//printf(", %u", val);
}
}
}
}
if (isFirstThreadOfWarp()) {
//printf("]\n");
}
}
__device__ void printEdgesOf(uint src) {
for (int i = 0; i <= LAST_DYNAMIC_REL; i++) {
printEdgesOf(src, i);
}
}
__global__ void __printEdgesOf(uint src, int rel) {
printEdgesOf(src, rel);
}
__global__ void __printEdgesOf(uint src) {
printEdgesOf(src);
}
__device__ void printEdges(int rel) {
if (isFirstThreadOfWarp()) {
//printf("%s edges:\n", getName(rel));
}
for (int src = 0; src < __numVars__; src++) {
printEdges(src, rel, 0);
}
}
__global__ void __printEdges(int rel) {
printEdges(rel);
}
__device__ void printGepEdges() {
uint numVarsGepInv = __numGepInv__;
if (isFirstThreadOfWarp()) {
//printf("GEP_INV edges:\n");
}
volatile __shared__ uint _shared_[WARP_SIZE];
for (int i = 0; i < numVarsGepInv; i += WARP_SIZE) {
_shared_[threadIdx.x] = __gepInv__[i + threadIdx.x];
for (int j= 0; j < WARP_SIZE && _shared_[j] != NIL; j +=2) {
uint dst = _shared_[j];
uint srcOffset = _shared_[j + 1];
if (isFirstThreadOfWarp()) {
//printf("%u => %u (%u)\n", dst, id(srcOffset), offset(srcOffset));
}
}
}
}
__global__ void __printGepEdges() {
printGepEdges();
}
__device__ void printConstraints(uint* __constraints__, const uint numConstraints) {
volatile __shared__ uint _shared_[WARP_SIZE];
for (int i = 0; i < numConstraints * 2; i += WARP_SIZE) {
_shared_[threadIdx.x] = __constraints__[i + threadIdx.x];
for (int j = 0; j < WARP_SIZE; j += 2) {
if (i + j >= numConstraints * 2) {
return;
}
uint src = _shared_[j];
uint dst = _shared_[j + 1];
if (isFirstThreadOfWarp()) {
//printf("%u => %u\n", src, dst);
}
}
}
}
__device__ int checkForErrors(uint var, uint rel) {
uint index = getHeadIndex(var, rel);
uint lastBase = 0;
uint first = 1;
uint bits = __graphGet__(index, threadIdx.x);
if (__all(bits == NIL)) {
return 0;
}
do {
bits = __graphGet__(index, threadIdx.x);
if (__all(threadIdx.x >= BASE || bits == NIL)) {
if (isFirstThreadOfWarp()) {
//printf("ERROR: empty element at %s of %u \n", getName(rel), var);
}
//printElementRec(getHeadIndex(var, rel));
__error__ = 1;
return 1;
}
uint base = __graphGet__(index, BASE);
index = __graphGet__(index, NEXT);
if (base == NIL) {
if (isFirstThreadOfWarp()) {
//printf("ERROR: inconsistency at %s of %u: BASE is NIL but other word is not\n",
//getName(rel), var);
}
printElementRec(getHeadIndex(var, rel));
__error__ = 1;
return 1;
}
if (!first && base <= lastBase) {
if (isFirstThreadOfWarp()) {
//printf("ERROR: BASE(element) = %u <= BASE(prev(element)) = %u at %s of %u\n", base,
//lastBase, getName(rel), var);
}
//printElementRec(getHeadIndex(var, rel));
__error__ = 1;
return 1;
}
first = 0;
lastBase = base;
} while (index != NIL);
return 0;
}
__global__ void checkForErrors(uint rel) {
uint numVars = __numVars__;
int inc = mul32(getWarpsPerGrid());
int init = mul32(getWarpIdInGrid());
for (int initVar = init; initVar < numVars; initVar += inc) {
for (int i = 0; i < WARP_SIZE; i++) {
uint var = initVar + i;
if (var > numVars || checkForErrors(var, rel)) {
return;
}
}
}
}
__device__ uint hashCode(uint index) {
__shared__ uint _sh_[DEF_THREADS_PER_BLOCK];
volatile uint* _shared_ = &_sh_[threadIdx.y * WARP_SIZE];
uint myRet = 0;
uint bits = __graphGet__(index + threadIdx.x);
uint base = __graphGet__(index + BASE);
if (base == NIL) {
return 0;
}
while (1) {
uint elementHash = base * (30 + threadIdx.x) ^ bits;
if (bits) {
myRet ^= elementHash;
}
index = __graphGet__(index + NEXT);
if (index == NIL) {
break;
}
bits = __graphGet__(index + threadIdx.x);
base = __graphGet__(index + BASE);
}
_shared_[threadIdx.x] = myRet;
if (threadIdx.x < 14) {
_shared_[threadIdx.x] ^= _shared_[threadIdx.x + WARP_SIZE / 2];
}
if (threadIdx.x < 8) {
_shared_[threadIdx.x] ^= _shared_[threadIdx.x + WARP_SIZE / 4];
}
if (threadIdx.x < 4) {
_shared_[threadIdx.x] ^= _shared_[threadIdx.x + WARP_SIZE / 8];
}
return _shared_[0] ^ _shared_[1] ^ _shared_[2] ^ _shared_[3];
}
__device__ uint equal(uint index1, uint index2) {
uint bits1 = __graphGet__(index1 + threadIdx.x);
uint bits2 = __graphGet__(index2 + threadIdx.x);
while (__all((threadIdx.x == NEXT) || (bits1 == bits2))) {
index1 = __graphGet__(index1 + NEXT);
index2 = __graphGet__(index2 + NEXT);
if (index1 == NIL || index2 == NIL) {
return index1 == index2;
}
bits1 = __graphGet__(index1 + threadIdx.x);
bits2 = __graphGet__(index2 + threadIdx.x);
}
return 0;
}
__device__ uint size(uint var, uint rel) {
__shared__ uint _sh_[DEF_THREADS_PER_BLOCK];
volatile uint* _shared_ = &_sh_[threadIdx.y * WARP_SIZE];
if (isEmpty(var, rel)) {
return 0;
}
uint index = getHeadIndex(var, rel);
uint myRet = 0;
do {
uint myBits = __graphGet__(index, threadIdx.x);
index = __graphGet__(index, NEXT);
myRet += __popc(myBits);
} while (index != NIL);
_shared_[threadIdx.x] = threadIdx.x >= BASE ? 0 : myRet;
for (int stride = WARP_SIZE / 2; stride > 0; stride >>= 1) {
if (threadIdx.x < stride) {
_shared_[threadIdx.x] += _shared_[threadIdx.x + stride];
}
}
return _shared_[0];
}
__device__ void unionToCopyInv(const uint to, const uint fromIndex, uint* const _shared_,
bool applyCopy = true) {
uint toIndex = getCopyInvHeadIndex(to);
if (fromIndex == toIndex) {
return;
}
uint fromBits = __graphGet__(fromIndex + threadIdx.x);
uint fromBase = __graphGet__(fromIndex + BASE);
if (fromBase == NIL) {
return;
}
uint fromNext = __graphGet__(fromIndex + NEXT);
uint toBits = __graphGet__(toIndex + threadIdx.x);
uint toBase = __graphGet__(toIndex + BASE);
uint toNext = __graphGet__(toIndex + NEXT);
uint numFrom = 0;
uint newVal;
while (1) {
if (toBase > fromBase) {
if (toBase == NIL) {
newVal = fromNext == NIL ? NIL : mallocOther();
} else {
newVal = mallocOther();
__graphSet__(newVal + threadIdx.x, toBits);
}
fromBits = threadIdx.x == NEXT ? newVal : fromBits;
__graphSet__(toIndex + threadIdx.x, fromBits);
if (applyCopy) {
map<NEXT_DIFF_PTS, PTS>(to, fromBase, fromBits, _shared_, numFrom);
}
if (fromNext == NIL) {
break;
}
toIndex = newVal;
fromBits = __graphGet__(fromNext + threadIdx.x);
fromBase = __graphGet__(fromNext + BASE);
fromNext = __graphGet__(fromNext + NEXT);
} else if (toBase == fromBase) {
uint orBits = fromBits | toBits;
uint diffs = __any(orBits != toBits && threadIdx.x < NEXT);
bool nextWasNil = false;
if (toNext == NIL && fromNext != NIL) {
toNext = mallocOther();
nextWasNil = true;
}
uint newBits = threadIdx.x == NEXT ? toNext : orBits;
if (newBits != toBits) {
__graphSet__(toIndex + threadIdx.x, newBits);
}
// if there was any element added to COPY_INV, apply COPY_INV rule
if (applyCopy && diffs) {
uint diffBits = fromBits & ~toBits;
map<NEXT_DIFF_PTS, PTS > (to, fromBase, diffBits, _shared_, numFrom);
}
//advance `to` and `from`
if (fromNext == NIL) {
break;
}
toIndex = toNext;
if (nextWasNil) {
toBits = NIL;
toBase = NIL;
toNext = NIL;
} else {
toBits = __graphGet__(toIndex + threadIdx.x);
toBase = __graphGet__(toIndex + BASE);
toNext = __graphGet__(toIndex + NEXT);
}
fromBits = __graphGet__(fromNext + threadIdx.x);
fromBase = __graphGet__(fromNext + BASE);
fromNext = __graphGet__(fromNext + NEXT);
} else { //toBase < fromBase
if (toNext == NIL) {
uint newNext = mallocOther();
__graphSet__(toIndex + NEXT, newNext);
toIndex = newNext;
toBits = NIL;
toBase = NIL;
} else {
toIndex = toNext;
toBits = __graphGet__(toNext + threadIdx.x);
toBase = __graphGet__(toIndex + BASE);
toNext = __graphGet__(toNext + NEXT);
}
}
}
if (applyCopy && numFrom) {
// flush pending unions
unionAll<NEXT_DIFF_PTS, PTS> (to, _shared_, numFrom);
}
}
__device__ void clone(uint toIndex, uint fromBits, uint fromNext, const uint toRel) {
while (1) {
uint newIndex = fromNext == NIL ? NIL : mallocIn(toRel);
uint val = threadIdx.x == NEXT ? newIndex : fromBits;
__graphSet__(toIndex + threadIdx.x, val);
if (fromNext == NIL) {
break;
}
toIndex = newIndex;
fromBits = __graphGet__(fromNext + threadIdx.x);
fromNext = __graphGet__(fromNext + NEXT);
}
}
// toRel = any non-static relationship
__device__ void unionG2G(const uint to, const uint toRel, const uint fromIndex) {
uint toIndex = getHeadIndex(to, toRel);
uint fromBits = __graphGet__(fromIndex + threadIdx.x);
uint fromBase = __graphGet__(fromIndex + BASE);
if (fromBase == NIL) {
return;
}
uint fromNext = __graphGet__(fromIndex + NEXT);
uint toBits = __graphGet__(toIndex + threadIdx.x);
uint toBase = __graphGet__(toIndex + BASE);
if (toBase == NIL) {
clone(toIndex, fromBits, fromNext, toRel);
return;
}
uint toNext = __graphGet__(toIndex + NEXT);
while (1) {
if (toBase > fromBase) {
uint newIndex = mallocIn(toRel);
__graphSet__(newIndex + threadIdx.x, toBits);
uint val = threadIdx.x == NEXT ? newIndex : fromBits;
__graphSet__(toIndex + threadIdx.x, val);
// advance 'from'
if (fromNext == NIL) {
return;
}
toIndex = newIndex;
fromBits = __graphGet__(fromNext + threadIdx.x);
fromBase = __graphGet__(fromNext + BASE);
fromNext = __graphGet__(fromNext + NEXT);
} else if (toBase == fromBase) {
uint newToNext = (toNext == NIL && fromNext != NIL) ? mallocIn(toRel) : toNext;
uint orBits = fromBits | toBits;
uint newBits = threadIdx.x == NEXT ? newToNext : orBits;
if (newBits != toBits) {
__graphSet__(toIndex + threadIdx.x, newBits);
}
//advance `to` and `from`
if (fromNext == NIL) {
return;
}
fromBits = __graphGet__(fromNext + threadIdx.x);
fromBase = __graphGet__(fromNext + BASE);
fromNext = __graphGet__(fromNext + NEXT);
if (toNext == NIL) {
clone(newToNext, fromBits, fromNext, toRel);
return;
}
toIndex = newToNext;
toBits = __graphGet__(toNext + threadIdx.x);
toBase = __graphGet__(toNext + BASE);
toNext = __graphGet__(toNext + NEXT);
} else { // toBase < fromBase
if (toNext == NIL) {
toNext = mallocIn(toRel);
__graphSet__(toIndex + NEXT, toNext);
clone(toNext, fromBits, fromNext, toRel);
return;
}
toIndex = toNext;
toBits = __graphGet__(toNext + threadIdx.x);
toBase = __graphGet__(toNext + BASE);
toNext = __graphGet__(toNext + NEXT);
}
}
}
// WATCH OUT: ASSUMES fromRel==toRel
// like unionTo, but reusing the elements of 'from' (introduces sharing of elements)
// toRel = any non-static relationship
__device__ void unionG2GRecycling(const uint to, const uint toRel, uint fromIndex) {
uint fromBits = __graphGet__(fromIndex, threadIdx.x);
uint fromBase = __graphGet__(fromIndex, BASE);
if (fromBase == NIL) {
return;
}
uint toIndex = getHeadIndex(to, toRel);
uint toBits = __graphGet__(toIndex, threadIdx.x);
uint toBase = __graphGet__(toIndex, BASE);
if (toBase == NIL) {
__graphSet__(toIndex, threadIdx.x, fromBits);
return;
}
uint toNext = __graphGet__(toIndex, NEXT);
uint fromNext = __graphGet__(fromIndex, NEXT);
uint fromHeadIndex = fromIndex;
do {
if (toBase == fromBase) {
uint newToNext = (toNext == NIL) ? fromNext : toNext;
uint orBits = fromBits | toBits;
uint newBits = threadIdx.x == NEXT ? newToNext : orBits;
if (newBits != toBits) {
__graphSet__(toIndex, threadIdx.x, newBits);
}
//advance `to` and `from`
if (toNext == NIL || fromNext == NIL) { // done with current elt and there is no NEXT => exit
return;
}
fromIndex = fromNext;
fromBits = __graphGet__(fromIndex, threadIdx.x);
fromBase = __graphGet__(fromIndex, BASE);
fromNext = __graphGet__(fromIndex, NEXT);
toIndex = toNext;
toBits = __graphGet__(toIndex, threadIdx.x);
toBase = __graphGet__(toIndex, BASE);
toNext = __graphGet__(toIndex, NEXT);
} else if (toBase < fromBase) {
if (toNext == NIL) {
if (fromIndex == fromHeadIndex) {
fromIndex = mallocIn(toRel);
__graphSet__(fromIndex, threadIdx.x, fromBits);
}
__graphSet__(toIndex, NEXT, fromIndex);
return;
}
// advance 'to'
toIndex = toNext;
toBits = __graphGet__(toIndex, threadIdx.x);
toBase = __graphGet__(toIndex, BASE);
toNext = __graphGet__(toIndex, NEXT);
} else { // toBase > fromBase
if (fromIndex == fromHeadIndex) {
fromIndex = mallocIn(toRel);
}
__graphSet__(fromIndex, threadIdx.x, toBits);
int val = threadIdx.x == NEXT ? fromIndex : fromBits;
__graphSet__(toIndex, threadIdx.x, val);
toIndex = fromIndex; // toBits does not change
fromIndex = fromNext;
if (fromNext != NIL) {
//advance 'from'
fromBits = __graphGet__(fromIndex, threadIdx.x);
fromBase = __graphGet__(fromIndex, BASE);
fromNext = __graphGet__(fromIndex, NEXT);
}
}
} while (fromIndex != NIL);
}
__device__ uint addVirtualElement(uint index, const uint fromBase, const uint fromBits,
const uint toRel) {
for (;;) {
uint toBits = __graphGet__(index + threadIdx.x);
uint toBase = __graphGet__(index + BASE);
if (toBase == NIL) {
// can only happen if the adjancency list of `to` is empty
// cost: exactly one global write
__graphSet__(index + threadIdx.x, fromBits);
return index;
}
if (toBase == fromBase) {
// cost: at most one global write
uint orBits = toBits | fromBits;
if (orBits != toBits && threadIdx.x < NEXT) {
__graphSet__(index + threadIdx.x, orBits);
}
return index;
}
if (toBase < fromBase) {
uint toNext = getValAtThread(toBits, NEXT);
if (toNext == NIL) {
// appending; cost: two global writes
uint newIndex = mallocIn(toRel);
__graphSet__(newIndex + threadIdx.x, fromBits);
__graphSet__(index + NEXT, newIndex);
return newIndex;
}
index = toNext;
} else {
// cost: two global writes
uint newIndex = mallocIn(toRel);
__graphSet__(newIndex + threadIdx.x, toBits);
uint val = threadIdx.x == NEXT ? newIndex : fromBits;
__graphSet__(index + threadIdx.x, val);
return index;
}
}
}
__device__ uint insert(const uint index, const uint var, const int rel) {
uint base = BASE_OF(var);
uint word = WORD_OF(var);
uint bit = BIT_OF(var);
uint myBits = 0;
if (threadIdx.x == word) {
myBits = 1 << bit;
} else if (threadIdx.x == BASE) {
myBits = base;
} else if (threadIdx.x == NEXT) {
myBits = NIL;
}
return addVirtualElement(index, base, myBits, rel);
}
__device__ inline uint resetWorklistIndex() {
__syncthreads();
uint numBlocks = getBlocksPerGrid();
if (isFirstThreadOfBlock() && atomicInc(&__counter__, numBlocks - 1) == (numBlocks - 1)) {
__worklistIndex0__ = 0;
__counter__ = 0;
return 1;
}
return 0;
}
__global__ void addEdges(uint* __key__, uint* __keyAux__, uint* __val__, const uint to, uint rel) {
__shared__ uint _sh_[WARPS_PER_BLOCK(DEF_THREADS_PER_BLOCK) * WARP_SIZE];
uint* _shared_ = &_sh_[threadIdx.y * WARP_SIZE];
uint i = getAndIncrement(1);
while (i < to) {
uint src = __key__[i];
if (src == NIL) {
break;
}
uint index = getHeadIndex(src, rel);
uint startIndex = __keyAux__[i];
uint end = __keyAux__[i + 1];
uint start = roundToPrevMultipleOf(startIndex, WARP_SIZE); // to ensure alignment
for (int j = start; j < end; j += WARP_SIZE) {
uint myIndex = j + threadIdx.x;
_shared_[threadIdx.x] = myIndex < end ? __val__[myIndex] : NIL;
uint startK = max(((int) startIndex) - j, 0);
uint endK = min(end - j, WARP_SIZE);
for (int k = startK; k < endK; k++) {
uint dst = _shared_[k];
index = insert(index, dst, rel);
}
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
template<uint toRel, uint fromRel>
__device__ inline void unionAll(const uint to, uint* const _shared_, uint numFrom, bool sort) {
if (numFrom > 1 && sort) {
numFrom = removeDuplicates(_shared_, numFrom);
}
for (int i = 0; i < numFrom; i++) {
uint fromIndex = _shared_[i];
if (fromRel != CURR_DIFF_PTS) {
fromIndex = getHeadIndex(fromIndex, fromRel);
}
if (toRel == COPY_INV) {
unionToCopyInv(to, fromIndex, _shared_ + DECODE_VECTOR_SIZE + 1);
} else {
unionG2G(to, toRel, fromIndex);
}
}
}
template<uint toRel, uint fromRel>
__device__ void map(uint to, const uint base, const uint myBits, uint* const _shared_,
uint& numFrom) {
uint nonEmpty = __ballot(myBits) & LT_BASE;
const uint threadMask = 1 << threadIdx.x;
const uint myMask = threadMask - 1;
const uint mul960base = mul960(base);
while (nonEmpty) {
uint pos = __ffs(nonEmpty) - 1;
nonEmpty &= (nonEmpty - 1);
uint bits = getValAtThread(myBits, pos);
uint var = getRep(mul960base + mul32(pos) + threadIdx.x); //coalesced
uint bitActive = (var != I2P) && (bits & threadMask);
bits = __ballot(bitActive);
uint numOnes = __popc(bits);
if (numFrom + numOnes > DECODE_VECTOR_SIZE) {
numFrom = removeDuplicates(_shared_, numFrom);
if (numFrom + numOnes > DECODE_VECTOR_SIZE) {
if (toRel == STORE) {
insertAll(to, _shared_, numFrom, false);
} else {
unionAll<toRel, fromRel>(to, _shared_, numFrom, false);
}
numFrom = 0;
}
}
pos = numFrom + __popc(bits & myMask);
if (bitActive) {
_shared_[pos] = (fromRel == CURR_DIFF_PTS) ? __currPtsHead__[var] : var;
}
numFrom += numOnes;
}
}
template<uint firstRel, uint secondRel, uint thirdRel>
__device__ void apply(const uint src, uint* const _shared_) {
uint numFrom = 0;
uint index = getHeadIndex(src, firstRel);
do {
uint myBits = __graphGet__(index + threadIdx.x);
uint base = __graphGet__(index + BASE);
if (base == NIL) {
break;
}
index = __graphGet__(index + NEXT);
if (secondRel == CURR_DIFF_PTS) {
myBits &= __diffPtsMaskGet__(base, threadIdx.x);
}
map<thirdRel, secondRel>(src, base, myBits, _shared_, numFrom);
} while (index != NIL);
if (numFrom) {
unionAll<thirdRel, secondRel>(src, _shared_, numFrom);
}
}
__device__ void insertAll(const uint src, uint* const _shared_, uint numFrom, const bool sort) {
if (numFrom > 1 && sort) {
numFrom = removeDuplicates(_shared_, numFrom);
}
const uint storeIndex = getStoreHeadIndex(src);
for (int i = 0; i < numFrom; i += WARP_SIZE) {
uint size = min(numFrom - i, WARP_SIZE);
uint next = getAndIncrement(&__numKeysCounter__, size);
// TODO: we need to make sure that (next + threadIdx.x < MAX_HASH_SIZE)
if (threadIdx.x < size) {
__key__[next + threadIdx.x] = _shared_[i + threadIdx.x]; // at most 2 transactions
__val__[next + threadIdx.x] = storeIndex;
}
}
}
__device__ void store2storeInv(const uint src, uint* const _shared_) {
uint currDiffPtsIndex = getCurrDiffPtsHeadIndex(src);
uint numFrom = 0;
do {
uint myBits = __graphGet__(currDiffPtsIndex + threadIdx.x);
uint base = __graphGet__(currDiffPtsIndex + BASE);
if (base == NIL) {
break;
}
currDiffPtsIndex = __graphGet__(currDiffPtsIndex + NEXT);
map<STORE, STORE>(src, base, myBits, _shared_, numFrom);
} while (currDiffPtsIndex != NIL);
if (numFrom) {
insertAll(src, _shared_, numFrom);
}
}
__global__ void copyInv_loadInv_store2storeInv() {
__shared__ uint _sh_[WARPS_PER_BLOCK(COPY_INV_THREADS_PER_BLOCK) * (DECODE_VECTOR_SIZE * 2 + 2)];
uint* const _shared_ = &_sh_[threadIdx.y * (DECODE_VECTOR_SIZE * 2 + 2)];
_shared_[0] = NIL;
_shared_[DECODE_VECTOR_SIZE + 1] = NIL;
uint to = __numVars__;
uint src = getAndIncrement(&__worklistIndex1__, 1);
while (src < to) {
apply<COPY_INV, CURR_DIFF_PTS, NEXT_DIFF_PTS>(src, _shared_ + 1 + DECODE_VECTOR_SIZE + 1);
apply<LOAD_INV, CURR_DIFF_PTS, COPY_INV>(src, _shared_ + 1);
src = getAndIncrement(&__worklistIndex1__,1);
}
to = __numStore__;
src = getAndIncrement(1);
while (src < to) {
src = __storeConstraints__[src];
if (src != NIL) {
store2storeInv(src, _shared_ + 1);
}
src = getAndIncrement(1);
}
if (resetWorklistIndex()) {
__key__[__numKeysCounter__] = NIL;
__val__[__numKeysCounter__] = NIL;
__numKeys__ = __numKeysCounter__ + 1;
__numKeysCounter__ = 0;
__worklistIndex1__ = 0;
}
}
__device__ void warpStoreInv(const uint i, uint* const _pending_, uint* _numPending_) {
uint src = __key__[i];
uint startIndex = __keyAux__[i];
uint end = __keyAux__[i + 1];
if (end - startIndex > WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK) * 4) {
// too big for a single warp => add to pending, so the whole block will process this variable
if (isFirstThreadOfWarp()) {
uint where = 3 * atomicAdd(_numPending_, 1);
_pending_[where] = src;
_pending_[where + 1] = startIndex;
_pending_[where + 2] = end;
}
return;
}
uint* const _shared_ = _pending_ + WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK) * 3 +
threadIdx.y * (WARP_SIZE + DECODE_VECTOR_SIZE + 1);
_shared_[WARP_SIZE] = NIL;
uint start = roundToPrevMultipleOf(startIndex, WARP_SIZE); // to ensure alignment
for (int j = start; j < end; j += WARP_SIZE) {
uint myIndex = j + threadIdx.x;
_shared_[threadIdx.x] = myIndex < end ? __val__[myIndex] : NIL;
uint startK = max(((int) startIndex) - j, 0);
uint endK = min(end - j, WARP_SIZE);
for (int k = startK; k < endK; k++) {
uint fromIndex = _shared_[k];
unionToCopyInv(src, fromIndex, _shared_ + 1 + WARP_SIZE);
}
}
}
__device__ void blockStoreInv(uint src, uint* const _dummyVars_, volatile uint* _warpInfo_,
uint& _numPending_) {
uint* _shared_ = _dummyVars_ + WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK) * 4 +
threadIdx.y * (WARP_SIZE + DECODE_VECTOR_SIZE + 1);
__shared__ uint _counter_, _start_, _end_;
_shared_[WARP_SIZE] = NIL;
_shared_ += WARP_SIZE + 1;
__syncthreads();
for (int i = 0; i < _numPending_; i++) {
if (isFirstWarpOfBlock()) {
uint* pending = _dummyVars_ + WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK);
src = pending[3 * i];
_start_ = pending[3 * i + 1];
_end_ = pending[3 * i + 2];
_counter_ = _start_;
}
__syncthreads();
if (isFirstThreadOfWarp()) {
_warpInfo_[threadIdx.y] = atomicAdd(&_counter_, 1);
}
uint j = _warpInfo_[threadIdx.y];
while (j < _end_) {
uint fromIndex = __val__[j];
unionToCopyInv(src, fromIndex, _shared_, isFirstWarpOfBlock());
if (isFirstThreadOfWarp()) {
_warpInfo_[threadIdx.y] = atomicAdd(&_counter_, 1);
}
j = _warpInfo_[threadIdx.y];
}
__syncthreads();
if (isFirstWarpOfBlock()) {
for (int i = 1; i < WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK); i++) {
uint var2 = _dummyVars_[i];
unionToCopyInv(src, getCopyInvHeadIndex(var2), _shared_);
}
}
__syncthreads();
if (!isFirstWarpOfBlock()) { //reset fields so updateDiffPts doesn't work on dummy variables
uint index = getHeadIndex(src, COPY_INV);
__graphSet__(index, threadIdx.x, NIL);
}
}
if (isFirstWarpOfBlock()) {
_numPending_ = 0;
}
__syncthreads();
}
__global__ void storeInv() {
__shared__ uint _sh_[WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK) *
(5 + WARP_SIZE + DECODE_VECTOR_SIZE + 1)];
__shared__ volatile uint* _warpInfo_;
__shared__ volatile uint _warpsWorking_;
__shared__ uint* _dummyVars_;
__shared__ uint _numPending_, _to_;
if (isFirstWarpOfBlock()) {
_to_ = __numKeys__ - 1; // because the last one is NIL
_dummyVars_ = _sh_ + WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK);
if (threadIdx.x < WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK)) {
_dummyVars_[threadIdx.x] = __initialNonRep__[mul32(blockIdx.x) + threadIdx.x];
}
_warpInfo_ = _sh_;
_numPending_ = 0;
_warpsWorking_ = WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK);
}
__syncthreads();
uint counter, src;
if (!isFirstWarpOfBlock()) {
src = _dummyVars_[threadIdx.y];
}
if (isFirstThreadOfWarp()) {
uint next = atomicAdd(&__worklistIndex0__, 1);
if (next >= _to_) {
atomicSub((uint*) &_warpsWorking_, 1);
}
_warpInfo_[threadIdx.y] = next;
}
counter = _warpInfo_[threadIdx.y];
while (_warpsWorking_) {
if (counter < _to_) {
warpStoreInv(counter, _sh_ + WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK) * 2, &_numPending_);
}
__syncthreads();
if (_numPending_) {
blockStoreInv(src, _dummyVars_, _warpInfo_, _numPending_);
}
if (counter < _to_ ) {
if (isFirstThreadOfWarp()) {
uint next = atomicAdd(&__worklistIndex0__, 1);
if (next >= _to_) {
atomicSub((uint*) &_warpsWorking_, 1);
}
_warpInfo_[threadIdx.y] = next;
}
counter = _warpInfo_[threadIdx.y];
}
}
resetWorklistIndex();
}
__device__ void shift(const uint base, const uint bits, const uint offset,
volatile uint* _shifted_) {
_shifted_[threadIdx.x] = 0;
_shifted_[threadIdx.x + WARP_SIZE] = 0;
_shifted_[threadIdx.x + WARP_SIZE * 2] = 0;
uint delta = div32(offset);
uint highWidth = mod32(offset);
uint lowWidth = WARP_SIZE - highWidth;
// these memory accesses do not conflict
_shifted_[threadIdx.x + delta] = (bits << highWidth);
_shifted_[threadIdx.x + delta + 1] |= (bits >> lowWidth);
_shifted_[threadIdx.x + WARP_SIZE * 2] = _shifted_[threadIdx.x + BASE * 2];
_shifted_[threadIdx.x + WARP_SIZE] = _shifted_[threadIdx.x + BASE];
_shifted_[BASE] = base;
_shifted_[BASE + WARP_SIZE] = base + 1;
_shifted_[BASE + WARP_SIZE * 2] = base + 2;
}
__device__ void applyGepInvRule(uint x, const uint y, const uint offset, volatile uint* _shared_) {
uint yIndex = getCurrDiffPtsHeadIndex(y);
uint myBits = __graphGet__(yIndex, threadIdx.x);
if (__all(myBits == NIL)) {
return;
}
uint xIndex = getNextDiffPtsHeadIndex(x);
do {
myBits = __graphGet__(yIndex, threadIdx.x);
uint base = __graphGet__(yIndex, BASE);
yIndex = __graphGet__(yIndex, NEXT);
myBits &= __offsetMaskGet__(base, threadIdx.x, offset);
if (__all(myBits == 0)) {
continue;
}
shift(base, myBits, offset, _shared_);
for (int i = 0; i < 3; i++) {
uint myBits = threadIdx.x == NEXT ? NIL : _shared_[threadIdx.x + WARP_SIZE * i];
if (__any(myBits && threadIdx.x < BASE)) {
xIndex = addVirtualElement(xIndex, base + i, myBits, NEXT_DIFF_PTS);
}
}
} while (yIndex != NIL);
}
__global__ void gepInv() {
__shared__ uint _sh_[WARPS_PER_BLOCK(GEP_INV_THREADS_PER_BLOCK) * (WARP_SIZE * 3)];
volatile uint* _shared_ = &_sh_[threadIdx.y * (WARP_SIZE * 3)];
const uint to = __numGepInv__ * 2;
uint index = getAndIncrement(2);
while (index < to) {
uint x = __gepInv__[index];
x = getRep(x);
uint val1 = __gepInv__[index + 1];
while (!lock(x)); // busy wait, should be short
const uint y = getRep(id(val1));
applyGepInvRule(x, y, offset(val1), _shared_);
unlock(x);
index = getAndIncrement(2);
}
if (resetWorklistIndex()) {
__done__ = true;
}
}
__device__ void cloneAndLink(const uint var, const uint ptsIndex, uint& currDiffPtsIndex,
const uint diffPtsBits, const uint diffPtsNext) {
clone(ptsIndex, diffPtsBits, diffPtsNext, PTS);
if (currDiffPtsIndex != NIL) {
__graphSet__(currDiffPtsIndex + NEXT, ptsIndex);
} else {
currDiffPtsIndex = getCurrDiffPtsHeadIndex(var);
uint ptsBits = __graphGet__(ptsIndex + threadIdx.x);
__graphSet__(currDiffPtsIndex + threadIdx.x, ptsBits);
}
}
/**
* Update the current, next and total PTS sets of a variable. In the last iteration of the main
* loop, points-to edges have been added to NEXT_DIFF_PTS. However, many of them might already be
* present in PTS. The purpose of this function is to update PTS as PTS U NEXT_DIFF_PTS, and set
* CURR_DIFF_PTS as the difference between the old and new PTS for the given variable.
*
* @param var ID of the variable
* @return true if new pts edges have been added to this variable
*/
__device__ bool updatePtsAndDiffPts(const uint var) {
const uint diffPtsHeadIndex = getNextDiffPtsHeadIndex(var);
uint diffPtsBits = __graphGet__(diffPtsHeadIndex + threadIdx.x);
uint diffPtsBase = __graphGet__(diffPtsHeadIndex + BASE);
if (diffPtsBase == NIL) {
return false;
}
uint diffPtsNext = __graphGet__(diffPtsHeadIndex + NEXT);
__graphSet__(diffPtsHeadIndex + threadIdx.x, NIL);
uint ptsIndex = getPtsHeadIndex(var);
uint ptsBits = __graphGet__(ptsIndex + threadIdx.x);
uint ptsBase = __graphGet__(ptsIndex + BASE);
if (ptsBase == NIL) {
//we pass ptsBase instead of NIL because it's also NIL but it can be modified
cloneAndLink(var, ptsIndex, ptsBase, diffPtsBits, diffPtsNext);
return true;
}
uint ptsNext = __graphGet__(ptsIndex + NEXT);
uint currDiffPtsIndex = NIL;
while (1) {
if (ptsBase > diffPtsBase) {
uint newIndex = mallocPts();
__graphSet__(newIndex + threadIdx.x, ptsBits);
uint val = threadIdx.x == NEXT ? newIndex : diffPtsBits;
__graphSet__(ptsIndex + threadIdx.x, val);
ptsIndex = newIndex;
// update CURR_DIFF_PTS
newIndex = currDiffPtsIndex == NIL ? getCurrDiffPtsHeadIndex(var) : mallocCurrDiffPts();
val = threadIdx.x == NEXT ? NIL : diffPtsBits;
__graphSet__(newIndex + threadIdx.x, val);
if (currDiffPtsIndex != NIL) {
__graphSet__(currDiffPtsIndex + NEXT, newIndex);
}
if (diffPtsNext == NIL) {
return true;
}
currDiffPtsIndex = newIndex;
diffPtsBits = __graphGet__(diffPtsNext + threadIdx.x);
diffPtsBase = __graphGet__(diffPtsNext + BASE);
diffPtsNext = __graphGet__(diffPtsNext + NEXT);
} else if (ptsBase == diffPtsBase) {
uint newPtsNext = (ptsNext == NIL && diffPtsNext != NIL) ? mallocPts() : ptsNext;
uint orBits = threadIdx.x == NEXT ? newPtsNext : ptsBits | diffPtsBits;
uint ballot = __ballot(orBits != ptsBits);
if (ballot) {
__graphSet__(ptsIndex + threadIdx.x, orBits);
if (ballot & LT_BASE) {
// update CURR_DIFF_PTS
orBits = diffPtsBits & ~ptsBits;
if (threadIdx.x == BASE) {
orBits = ptsBase;
} else if (threadIdx.x == NEXT) {
orBits = NIL;
}
uint newIndex;
if (currDiffPtsIndex != NIL) {
newIndex = mallocCurrDiffPts();
__graphSet__(currDiffPtsIndex + NEXT, newIndex);
} else {
newIndex = getCurrDiffPtsHeadIndex(var);
}
__graphSet__(newIndex + threadIdx.x, orBits);
currDiffPtsIndex = newIndex;
}
}
if (diffPtsNext == NIL) {
return (currDiffPtsIndex != NIL);
}
diffPtsBits = __graphGet__(diffPtsNext + threadIdx.x);
diffPtsBase = __graphGet__(diffPtsNext + BASE);
diffPtsNext = __graphGet__(diffPtsNext + NEXT);
if (ptsNext == NIL) {
cloneAndLink(var, newPtsNext, currDiffPtsIndex, diffPtsBits, diffPtsNext);
return true;
}
ptsIndex = ptsNext;
ptsBits = __graphGet__(ptsIndex + threadIdx.x);
ptsBase = __graphGet__(ptsIndex + BASE);
ptsNext = __graphGet__(ptsIndex + NEXT);
} else { // ptsBase > diffPtsBase
if (ptsNext == NIL) {
uint newPtsIndex = mallocPts();
__graphSet__(ptsIndex + NEXT, newPtsIndex);
cloneAndLink(var, newPtsIndex, currDiffPtsIndex, diffPtsBits, diffPtsNext);
return true;
}
ptsIndex = ptsNext;
ptsBits = __graphGet__(ptsIndex + threadIdx.x);
ptsBase = __graphGet__(ptsIndex + BASE);
ptsNext = __graphGet__(ptsIndex + NEXT);
}
}
}
__global__ void updatePtsInformation() {
bool newWork = false;
const uint numVars = __numVars__;
const uint CHUNK_SIZE = 12;
//ulongint start = recordStartTime();
int i = getAndIncrement(CHUNK_SIZE);
while (i < numVars) {
for (int var = i; var < min(i + CHUNK_SIZE, numVars); var++) {
bool newStuff = updatePtsAndDiffPts(var);
newWork |= newStuff;
if (!newStuff) {
const uint currPtsHeadIndex = getCurrDiffPtsHeadIndex(var);
__graphSet__(currPtsHeadIndex + threadIdx.x, NIL);
}
}
i = getAndIncrement(CHUNK_SIZE);
}
if (newWork) {
__done__ = false;
}
// if (isFirstThreadOfWarp()) {
// //printf("Warp %u: %u\n", getWarpIdInGrid(), getEllapsedTime(start));
// }
uint headerSize = numVars * ELEMENT_WIDTH;
if (resetWorklistIndex()) {
__currDiffPtsFreeList__ = CURR_DIFF_PTS_START - headerSize;
__nextDiffPtsFreeList__ = NEXT_DIFF_PTS_START - headerSize;
}
}
__global__ void createOffsetMasks(int numObjectVars, uint maxOffset) {
__shared__ uint _sh_[DEF_THREADS_PER_BLOCK];
volatile uint* _mask_ = &_sh_[threadIdx.y * WARP_SIZE];
int inc = mul960(getWarpsPerGrid());
int init = mul960(getWarpIdInGrid());
for (int i = init; i < numObjectVars; i += inc) {
uint base = BASE_OF(i);
for (int offset = 1; offset <= maxOffset; offset++) {
_mask_[threadIdx.x] = 0;
for (int src = i; src < min(i + ELEMENT_CARDINALITY, numObjectVars); src += WARP_SIZE) {
uint size = __size__[src + threadIdx.x];
if (__all(size <= offset)) {
continue;
}
uint word = WORD_OF(src - i);
_mask_[word] = ballot(size > offset);
}
__offsetMaskSet__(base, threadIdx.x, offset, _mask_[threadIdx.x]);
}
}
}
__device__ uint lockToVar(uint lock) {
if ((lock < VAR(0)) || (lock >= LOCKED)) {
return lock;
}
return lock - VAR(0);
}
__device__ void merge(const uint var1, const uint var2, const uint rep) {
//if (isFirstThreadOfWarp()) //printf("%u <= %u\n", var1, var2);
uint headIndex = getPtsHeadIndex(var2);
unionG2GRecycling(var1, PTS, headIndex);
__graphSet__(headIndex, threadIdx.x, NIL);
headIndex = getCopyInvHeadIndex(var2);
unionG2GRecycling(var1, COPY_INV, headIndex);
__graphSet__(headIndex, threadIdx.x, NIL);
headIndex = getStoreHeadIndex(var2);
unionG2GRecycling(var1, STORE, headIndex);
__graphSet__(headIndex, threadIdx.x, NIL);
headIndex = getLoadInvHeadIndex(var2);
unionG2GRecycling(var1, LOAD_INV, headIndex);
__graphSet__(headIndex, threadIdx.x, NIL);
// clear CURR_DIFF_PTS
headIndex = getCurrDiffPtsHeadIndex(var2);
//unionG2GRecycling(var1, CURR_DIFF_PTS, headIndex);
__graphSet__(headIndex, threadIdx.x, NIL);
setRep(var2, rep);
__threadfence();
unlock(var2);
}
/**
* Merge a list of pointer-equivalent variables
* Granularity: block
* @param _list_ Pointer-equivalent variables
* @param _listSize_ Number of variables to be processed
*/
__device__ void mergeCycle(const uint* const _list_, const uint _listSize_) {
__shared__ uint _counter_;
if (!_listSize_) {
__syncthreads();
return;
}
// 'ry' will be the representative of this cycle
uint ry = _list_[0];
if (_listSize_ == 1) {
if (isFirstWarpOfBlock()) {
unlock(ry);
}
__syncthreads();
return;
}
uint warpsPerBlock = getWarpsPerBlock();
if (_listSize_ > warpsPerBlock) {
// each warp chooses a local representative and then merges each popped worklist item with it.
uint var1 = _list_[threadIdx.y];
_counter_ = warpsPerBlock;
__syncthreads();
uint index = getAndIncrement(&_counter_, 1);
while (index < _listSize_) {
uint var2 = _list_[index];
merge(var1, var2, ry);
index = getAndIncrement(&_counter_, 1);
}
}
__syncthreads();
// the first warp merges the local representatives. This is actually faster (and simpler)
// than performing a reduction of the list using the entire block, due to load imbalance.
if (isFirstWarpOfBlock()) {
uint to = min(_listSize_, warpsPerBlock);
for (int i = 1; i < to; i++) {
uint var = _list_[i];
merge(ry, var, ry);
}
//reset CURR_PTS of the cycle representative to be PTS
uint myBits = __graphGet__(getPtsHeadIndex(ry), threadIdx.x);
__graphSet__(getCurrDiffPtsHeadIndex(ry), threadIdx.x, myBits);
__threadfence();
unlock(ry);
}
__syncthreads();
}
// to be executed by one thread
__device__ uint lockVarRep(uint& var) {
while (1) {
uint rep = getRepRec(var);
uint old = atomicCAS(__lock__ + rep, UNLOCKED, VAR(blockIdx.x));
if (old == PTR(blockIdx.x)) {
// try to promote lock to type VAR
old = atomicCAS(__lock__ + rep, PTR(blockIdx.x), VAR(blockIdx.x));
}
if (old != UNLOCKED && old != PTR(blockIdx.x)) {
var = rep;
return old;
}
// we locked it, but maybe is not a representative anymore
var = getRep(rep);
if (var == rep) {
return UNLOCKED;
}
if (old == PTR(blockIdx.x)) { // back to PTR
__lock__[rep] = PTR(blockIdx.x);
} else {
unlock(rep);
}
}
}
/**
* Lock a list of variables
* Granularity: block
* @param _currVar_ List of variables to lock, sorted in ascending order
* @param _currVarSize_ Number of variables we want to process. At the end of the function,
* it stores the number of variables we were able to lock.
* @param _nextVar_ List where to add all the variables we could not lock
* @param _nextVarSize_ Number of variables we could not lock
*/
__device__ void lockVars(uint* const _currVar_, uint& _currVarSize_, uint* const _nextVar_,
uint* _nextVarSize_) {
__shared__ uint _count_;
_count_ = 0;
__syncthreads();
for (int i = getThreadIdInBlock(); i < _currVarSize_; i+= getThreadsPerBlock()) {
uint var = _currVar_[i];
// block culling to filter out some duplicates
if (i && var == _currVar_[i - 1]) {
continue;
}
uint stat = lockVarRep(var);
uint pos;
if (stat == UNLOCKED) {
pos = atomicAdd(&_count_, 1);
_currVar_[pos] = var;
} else if (stat != VAR(blockIdx.x)) {
uint pos = atomicAdd(_nextVarSize_, 1);
_nextVar_[pos] = var;
}
}
__syncthreads();
_currVarSize_ = _count_; //first currVarSize positions are populated
__syncthreads();
}
// to be executed by one WARP
__device__ uint lockPtr(uint ptr) {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
uint intended = PTR(getBlockIdInGrid());
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicCAS(__lock__ + ptr, UNLOCKED, intended);
}
return _shared_[threadIdx.y];
}
/**
* Lock every variable in the current points-to set of the input variable.
* Granularity: warp
* @param x A variable locked by the current block
* @param _currVar_ List of locked variables
* @param _currVarSize_ Number of locked variables
* @param _nextVar_ List of variables we could not lock
* @param _nextVarSize_ Number of variables we could not lock
*/
__device__ void decodeCurrPts(const uint x, uint* const _currVar_, uint* const _currVarSize_,
uint* const _nextVar_, uint* const _nextVarSize_) {
uint index = getCurrDiffPtsHeadIndex(x);
do {
uint myBits = __graphGet__(index, threadIdx.x);
uint base = __graphGet__(index, BASE);
if (base == NIL) {
break;
}
index = __graphGet__(index, NEXT);
uint nonEmpty = __ballot(myBits && threadIdx.x < BASE);
uint lastVar = NIL;
while (nonEmpty) {
uint pos = __ffs(nonEmpty) - 1;
nonEmpty &= (nonEmpty - 1);
uint bits = getValAtThread(myBits, pos);
uint var = mul960(base) + mul32(pos) + threadIdx.x;
if (var == I2P || !isBitActive(bits, threadIdx.x)) {
var = NIL;
} else {
uint stat = lockVarRep(var);
if (stat != UNLOCKED) {
if (stat != VAR(blockIdx.x) && var != lastVar) {
// TODO: do something so we do not lose equivalences. This only affects Linux, though
uint where = atomicInc(_nextVarSize_, HCD_DECODE_VECTOR_SIZE - 1);
_nextVar_[where] = var;
lastVar = var;
}
var = NIL;
}
}
bits = __ballot(var != NIL);
if (!bits) {
continue;
}
uint numOnes = __popc(bits);
uint prevNumFrom = 0;
if (isFirstThreadOfWarp()) {
prevNumFrom = atomicAdd(_currVarSize_, numOnes);
}
prevNumFrom = getValAtThread(prevNumFrom, 0);
// TODO: make sure that (prevNumFrom + numOnes < HCD_DECODE_VECTOR_SIZE)
//if (isFirstThreadOfWarp() && ((prevNumFrom + numOnes) >= HCD_DECODE_VECTOR_SIZE)) {
// //printf("Exceeded HCD_DECODE_VECTOR_SIZE!!\n");
//}
pos = prevNumFrom + __popc(bits & ((1 << threadIdx.x) - 1));
if (var != NIL) {
_currVar_[pos] = var;
}
}
} while (index != NIL);
}
/**
* Lock a list of (pointer) variables and their points-to sets
* Granularity: block
*/
__device__ void lockPtrs(uint* const _currPtr_, uint& _currPtrSize_, uint* const _nextPtr_,
uint* _nextPtrSize_, uint* const _currVar_, uint* _currVarSize_, uint* const _nextVar_,
uint* _nextVarSize_) {
const uint warpsPerBlock = getWarpsPerBlock();
for (int i = threadIdx.y; i < _currPtrSize_; i += warpsPerBlock) {
uint ptr = _currPtr_[i];
uint stat = lockPtr(ptr);
if (stat != UNLOCKED && stat != VAR(blockIdx.x)) {
_currPtr_[i] = NIL;
if (isFirstThreadOfWarp()) {
uint pos = atomicAdd(_nextPtrSize_, 1);
_nextPtr_[pos] = ptr;
}
} else {
decodeCurrPts(ptr, _currVar_, _currVarSize_, _nextVar_, _nextVarSize_);
}
}
__syncthreads();
}
__device__ void unlockPtrs(const uint* const _list_, const uint _listSize_) {
int init = getThreadIdInBlock();
int inc = getThreadsPerBlock();
for (int i = init; i < _listSize_; i += inc) {
uint var = _list_[i];
if (var != NIL) {
// if it is locked by VAR(blockIdx.x), keep it that way
atomicCAS(__lock__ + var, PTR(blockIdx.x), UNLOCKED);
}
}
__syncthreads();
}
/**
* Online phase of Hybrid Cycle Detection
* This is when things get really hairy -- but the overall performance of the algorithm is
* dramatically improved by removing the equivalents discovered during the offline analysis, so
* there is not way around it AFAIK.
* The kernel takes a list of tuples (y, x_0, ..., x_N) where pts(*y) = pts(x_0) = ... pts(x_N)
* Each block pops a pair out of the worklist, and performs the following logic:
* a) lock variables y,x_0,...,x_N
* b) decode and lock the points-to of x_0,...,x_N
* c) merge all the variables that we were able to lock
* d) unlock the merged variables
* e) repeat a-d for all the variables we were not able to lock
* Note that e) is not strictly necessary, but we would be missing some (maybe relevant)
* equivalences that will eventually result in more work for the standard graph rules.
*/
__global__ void hcd() {
__shared__ uint _counter_;
/**
* list of variables (x,...,x_N) such that all the variables in the set {pts(x),...pts(x_N)}
* are pointer-equivalent.
*/
__shared__ uint _ptr_[HCD_TABLE_SIZE * 2];
/*
* pointer to _ptr_ indicating where the current list starts
*/
__shared__ uint *_currPtr_;
/**
* pointer to _ptr_ indicating where the next list starts.
* The reason why need of sublists within _ptr_ is because we might not have been able to lock
* all the variables in _currPtr_, so everything that is pending (=needs to be processed in the
* next iteration) is placed in the subarray pointed by _nextPtr_
*/
__shared__ uint *_nextPtr_;
/**
* list of variables that are pointer equivalent (thus need to be merged)
*/
__shared__ uint _currVar_[HCD_DECODE_VECTOR_SIZE];
/**
* list of variables that are pointer equivalent but could not be locked in the current iteration
*/
__shared__ uint *_nextVar_;
__shared__ uint _currPtrSize_, _nextPtrSize_, _currVarSize_, _nextVarSize_;
const uint threadIdInBlock = getThreadIdInBlock();
const uint threadsInBlock = getThreadsPerBlock();
const uint to = __numHcdIndex__;
// first thread of the block picks next hcd pair to work on
if (isFirstThreadOfBlock()) {
_counter_ = atomicAdd(&__worklistIndex0__, 1);
_nextVar_ = __nextVar__ + getBlockIdInGrid() * HCD_DECODE_VECTOR_SIZE;
}
__syncthreads();
while (_counter_ < to) {
uint pair = __hcdIndex__[_counter_];
uint start = getFirst(pair);
uint end = getSecond(pair);
// move the (x0,...,x_N) sublist to shared memory
for (int i = start + 1 + threadIdInBlock; i < end; i += threadsInBlock) {
_ptr_[i - start - 1] = __hcdTable__[i];
}
if (isFirstWarpOfBlock()) {
_currPtrSize_ = end - start - 1;
_currVar_[0] = __hcdTable__[start];
_currVarSize_ = 1;
_currPtr_ = _ptr_;
// we do not know how many variables we will not be able to lock, so unfortunately we have
// use a statically fixed index
_nextPtr_ = _ptr_ + HCD_TABLE_SIZE;
}
while (1) {
_nextPtrSize_ = 0;
_nextVarSize_ = 0;
__syncthreads();
// lock variables in the current variable list (variables that belong to the points-to set
// of x_I and could not be locked in a previous iteration)
lockVars(_currVar_, _currVarSize_, _nextVar_, &_nextVarSize_);
// lock variables in current pointer list, then decode their points-to sets and lock those too
lockPtrs(_currPtr_, _currPtrSize_, _nextPtr_, &_nextPtrSize_, _currVar_, &_currVarSize_, _nextVar_, &_nextVarSize_);
// unlock variables in pointer list if they are not in the variable list
unlockPtrs(_currPtr_, _currPtrSize_);
blockSort(_currVar_, _currVarSize_);
// merge variable list!
mergeCycle(_currVar_, _currVarSize_);
// if there is any pending work -because variables or pointers could not be locked-, update
// the corresponding information and retry
if (!_nextPtrSize_ && (!_nextVarSize_ || (_currVarSize_ + _nextVarSize_ == 1))) {
break;
}
if (isFirstWarpOfBlock() && _currVarSize_) {
_currVar_[_nextVarSize_] = _currVar_[0]; // merge representative with pending
}
__syncthreads();
for (int i = threadIdInBlock; i < _nextVarSize_; i+= threadsInBlock) {
_currVar_[i] = _nextVar_[i];
}
if (isFirstWarpOfBlock()) {
_currVarSize_ = _nextVarSize_ + (_currVarSize_ > 0);
_currPtrSize_ = _nextPtrSize_;
uint* tmp = _nextPtr_;
_nextPtr_ = _currPtr_;
_currPtr_ = tmp;
}
__syncthreads();
blockSort(_currVar_, _currVarSize_);
}
if (isFirstThreadOfBlock()) {
_counter_ = atomicAdd(&__worklistIndex0__, 1);
}
__syncthreads();
}
resetWorklistIndex();
}
__global__ void updateInfo() {
int inc = getThreadsPerGrid();
int init = getThreadIdInGrid();
uint to = __numVars__;
// a) path compression
for (int var = init; var < to; var += inc) {
uint rep = getRepRec(var); // non-coalesced
if (rep != var) {
setRep(var, rep); //coalesced
}
uint diffPtsMask = __ballot(!isEmpty(rep, CURR_DIFF_PTS)); //non aligned
__diffPtsMaskSet__(BASE_OF(var), WORD_OF(var), diffPtsMask); //aligned
}
syncAllThreads();
// b) update store rules
to = __numStore__;
for (int index = init; index < to; index += inc) {
// the size of store has been rounded to a multiple of 32, so no out-of-bounds
uint src = __storeConstraints__[index];
if (src != NIL) {
src = getRep(src);
uint val = (atomicCAS(__lock__ + src, UNLOCKED, LOCKED) == UNLOCKED) ? src : NIL;
__storeConstraints__[index] = val;
}
}
syncAllThreads();
// c) unlock
for (int index = init; index < to; index += inc) {
uint src = __storeConstraints__[index];
if (src != NIL) {
unlock(getRep(src));
}
}
}
__launch_bounds__ (DEF_THREADS_PER_BLOCK)
__global__ void initialize() {
uint to = __numVars__;
uint headerSize = to * ELEMENT_WIDTH;
if (isFirstThreadOfBlock()) {
__ptsFreeList__ = headerSize;
__currDiffPtsFreeList__ = CURR_DIFF_PTS_START - headerSize;
__nextDiffPtsFreeList__ = NEXT_DIFF_PTS_START - headerSize;
// after LOAD_INV, STORE and CURR_DIFF_PTS_INV header regions
__otherFreeList__ = COPY_INV_START + headerSize * (LAST_DYNAMIC_REL - COPY_INV + 1);
}
__syncthreads();
int inc = mul32(getWarpsPerGrid());
int init = mul32(getWarpIdInGrid());
for (int var = init; var < to; var += inc) {
unlock(var + threadIdx.x);
setRep(var + threadIdx.x, var + threadIdx.x);
for (int i = 0; i < WARP_SIZE; i++) {
uint index = getHeadIndex(var + i, PTS);
__graphSet__(index + threadIdx.x, NIL);
index = getHeadIndex(var + i, NEXT_DIFF_PTS);
__graphSet__(index + threadIdx.x, NIL);
index = getHeadIndex(var + i, CURR_DIFF_PTS);
__graphSet__(index + threadIdx.x, NIL);
index = getHeadIndex(var + i, COPY_INV);
__graphSet__(index + threadIdx.x, NIL);
index = getHeadIndex(var + i, STORE);
__graphSet__(index + threadIdx.x, NIL);
index = getHeadIndex(var + i, LOAD_INV);
__graphSet__(index + threadIdx.x, NIL);
}
}
inc = mul960(getWarpsPerGrid());
init = mul960(getWarpIdInGrid());
for (int i = init; i < to; i += inc) {
uint base = BASE_OF(i);
__diffPtsMaskSet__(base, threadIdx.x, 0);
}
syncAllThreads();
to = __numInitialRep__;
init = getThreadIdInGrid();
inc = getThreadsPerGrid();
// the offline phase of Hybrid Cycle Detection already detected some pointer equivalent variables.
for (int i = init; i < to; i += inc) {
setRep(__initialNonRep__[i], __initialRep__[i]);
}
}
__global__ void computeCurrPtsHash() {
const uint to = __numVars__;
uint src = getAndIncrement(WARP_SIZE);
while (src < to) {
for (int i = 0; i < WARP_SIZE; i++) {
if (!isEmpty(src + i, CURR_DIFF_PTS)) {
uint hash = hashCode(getHeadIndex(src + i, CURR_DIFF_PTS));
uint next = getAndIncrement(&__numKeysCounter__, 1);
__key__[next] = hash;
__val__[next] = src + i;
}
}
src = getAndIncrement(WARP_SIZE);
}
if (resetWorklistIndex()) {
__numKeys__ = __numKeysCounter__;
__numKeysCounter__ = 0;
}
}
__global__ void findCurrPtsEquivalents() {
__shared__ uint _sh_[WARPS_PER_BLOCK(UPDATE_THREADS_PER_BLOCK) * WARP_SIZE * 2];
uint* _key_ = &_sh_[threadIdx.y * WARP_SIZE * 2];
uint* _val_ = _key_ + WARP_SIZE;
const uint to = __numKeys__;
uint index = getAndIncrement(WARP_SIZE);
while (index < to) {
if (index + threadIdx.x < to) {
_key_[threadIdx.x] = __key__[index + threadIdx.x];
_val_[threadIdx.x] = __val__[index + threadIdx.x];
}
for (int i = 0; i < WARP_SIZE && index + i < to; i++) {
uint var1 = _val_[i];
uint var1Head = getHeadIndex(var1, CURR_DIFF_PTS);
uint j = _key_[i];
while (j < index + i) {
uint var2 = __val__[j];
uint var2Head = getHeadIndex(var2, CURR_DIFF_PTS);
if (equal(var1Head, var2Head)) {
__currPtsHead__[var1] = var2Head;
break;
}
j++;
}
if (j == index + i) {
__currPtsHead__[var1] = var1Head;
}
}
index = getAndIncrement(WARP_SIZE);
}
resetWorklistIndex();
}
__host__ void checkKernelErrors(char *msg) {
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
printf("\n%s: %s\n", msg, hipGetErrorString(e));
exit(-1);
}
}
__host__ void checkErrors(uint rel) {
#if CHECK_SPV
uint error = 0;
checkForErrors << <getBlocks(), THREADS_PER_BLOCK >> >(rel);
checkKernelErrors("ERROR while checking for errors");
cudaSafeCall(hipMemcpyFromSymbol(&error, __error__, uintSize, 0, D2H));
if (error) {
exit(-1);
}
#endif
}
__host__ void checkAllErrors() {
checkErrors(PTS);
checkErrors(NEXT_DIFF_PTS);
checkErrors(CURR_DIFF_PTS);
checkErrors(COPY_INV);
checkErrors(LOAD_INV);
checkErrors(STORE);
}
__host__ void addTimeToRule(uint& counter, clock_t& startTime) {
uint ellapsedTime = (int) (1000.0f * (clock() - startTime) / CLOCKS_PER_SEC);
counter += ellapsedTime;
startTime = clock();
}
__host__ void printRule(const char* msg) {
#if PRINT_RULES
printf("%s", msg);
#endif
}
template <typename Vector>
__host__ void printVector(const Vector& v, uint size) {
std::cout << "[";
for (size_t i = 0; i < size; i++) {
uint num = v[i];
if (num != NIL) {
std::cout << num;
if (i < size - 1) {
std::cout << ", ";
}
}
}
std::cout << "]";
}
__host__ void initializeEdges(uint* &constraintsName, uint &constraintNumber, uint rel) {
dim3 dimInitialize(WARP_SIZE, getThreadsPerBlock(UPDATE_THREADS_PER_BLOCK) / WARP_SIZE);
uint* constraints;
uint numConstraints;
cudaSafeCall(hipMemcpyFromSymbol(&constraints, constraintsName, sizeof(uint*)));
cudaSafeCall(hipMemcpyFromSymbol(&numConstraints, constraintNumber, uintSize));
device_ptr<uint> src(constraints);
device_vector<uint> dstIndex(numConstraints);
sequence(dstIndex.begin(), dstIndex.begin() + numConstraints);
uint numSrc = unique_by_key(src, src + numConstraints, dstIndex.begin()).first - src;
hipLaunchKernelGGL(( addEdges), dim3(getBlocks() * 3), dim3(dimInitialize), 0, 0, constraints, raw_pointer_cast(&dstIndex[0]),
constraints + numConstraints, numSrc, rel);
if (rel == STORE) {
cudaSafeCall(hipMemcpyToSymbol(__numStore__, &numSrc, uintSize));
} else {
hipFree(constraints);
}
checkKernelErrors("ERROR while adding initial edges");
}
extern "C" void createGraph(const uint numObjectVars, const uint maxOffset) {
setbuf(stdout, NULL);
printf("[dev] Creating graph and masks out of constraints...");
const uint startTime = clock();
dim3 dim(WARP_SIZE, getThreadsPerBlock(DEF_THREADS_PER_BLOCK)/ WARP_SIZE);
hipLaunchKernelGGL(( initialize), dim3(getBlocks()), dim3(dim), 0, 0, );
checkKernelErrors("ERROR at initialize");
initializeEdges(__ptsConstraints__, __numPtsConstraints__, NEXT_DIFF_PTS);
initializeEdges(__copyConstraints__, __numCopyConstraints__, COPY_INV);
initializeEdges(__loadConstraints__, __numLoadConstraints__, LOAD_INV);
initializeEdges(__storeConstraints__, __numStoreConstraints__, STORE);
// no need to add GEP_INV edges, there is only one per variable
hipLaunchKernelGGL(( createOffsetMasks), dim3(getBlocks()), dim3(dim), 0, 0, numObjectVars, maxOffset);
checkKernelErrors("ERROR while creating the offset mask");
uint* size;
cudaSafeCall(hipMemcpyFromSymbol(&size, __size__, sizeof(uint*)));
hipFree(size);
printf("OK.\n");
createTime = getEllapsedTime(startTime);
}
struct neqAdapter : public thrust::unary_function<tuple<uint, uint>, uint>{
__host__ __device__
uint operator()(const tuple<uint, uint>& a) {
return get<0>(a) != get<1>(a);
}
};
struct mulAdapter : public thrust::unary_function<tuple<uint, uint>, uint>{
__host__ __device__
uint operator()(const tuple<uint, uint>& a) {
return get<0>(a) * get<1>(a);
}
};
__host__ void buildHashMap(device_vector<uint>& key, device_vector<uint>& val,const uint size) {
sort_by_key(key.begin(), key.begin() + size, val.begin());
thrust::maximum<uint> uintMax;
inclusive_scan(
make_transform_iterator(
make_zip_iterator(make_tuple(
make_transform_iterator(
make_zip_iterator(make_tuple(key.begin() + 1, key.begin())),
neqAdapter()),
counting_iterator<uint>(1))),
mulAdapter()),
make_transform_iterator(
make_zip_iterator(make_tuple(
make_transform_iterator(
make_zip_iterator(make_tuple(key.begin() + size, key.begin() + size - 1)),
neqAdapter()),
counting_iterator<uint>(1))),
mulAdapter()), key.begin() + 1, uintMax);
key[0] = 0;
}
extern "C" uint andersen(uint numVars) {
setbuf(stdout, NULL);
printf("[dev] Solving: ");
const uint startTime = clock();
uint iteration = 0;
uint updatePtsTime = 0;
uint hcdTime = 0;
uint ptsEquivTime = 0;
uint copyInvTime = 0;
uint storeInvTime = 0;
uint gepInvTime = 0;
dim3 dim512(WARP_SIZE, getThreadsPerBlock(512) / WARP_SIZE);
dim3 dimUpdate2(WARP_SIZE, getThreadsPerBlock(UPDATE_THREADS_PER_BLOCK) / WARP_SIZE);
dim3 dimHcd(WARP_SIZE, getThreadsPerBlock(HCD_THREADS_PER_BLOCK) / WARP_SIZE);
dim3 dimCopy(WARP_SIZE, getThreadsPerBlock(COPY_INV_THREADS_PER_BLOCK) / WARP_SIZE);
dim3 dimStore(WARP_SIZE, getThreadsPerBlock(STORE_INV_THREADS_PER_BLOCK) / WARP_SIZE);
dim3 dimGep(WARP_SIZE, getThreadsPerBlock(GEP_INV_THREADS_PER_BLOCK) / WARP_SIZE);
device_vector<uint> key(MAX_HASH_SIZE);
uint* ptr = raw_pointer_cast(&key[0]);
cudaSafeCall(hipMemcpyToSymbol(__key__, &ptr, sizeof(uint*)));
device_vector<uint> keyAux(MAX_HASH_SIZE);
ptr = raw_pointer_cast(&keyAux[0]);
cudaSafeCall(hipMemcpyToSymbol(__keyAux__, &ptr, sizeof(uint*)));
device_vector<uint> val(MAX_HASH_SIZE);
ptr = raw_pointer_cast(&val[0]);
cudaSafeCall(hipMemcpyToSymbol(__val__, &ptr, sizeof(uint*)));
clock_t ruleTime = clock();
uint blocks = getBlocks();
// TODO: mega-hack to avoid race condition on 'gcc' input.
uint hcdBlocks = getenv("GCC") ? 4 : blocks;
/**
* TODO (Jan'11)
*
* a) use pointers instead of integers for the indexes, which is possible because all the
* inputs can be analyzed using a 4GB heap. Advantages:
* a.1) when dereferencing an index, currently we assume that in reality is a delta with
* respect to __edges__. Because of that, every access to an element becomes *(__edges__ + delta).
* If we are using pointers, we could simply do *ptr. Note that __edges__ is in constant memory.
* a.2.) we could use the malloc in the CUDA libraries. Malloc could potentially be used in two
* places: OTHER and PTS edges. In practice, we currently keep the PTS edges together because they
* contain the solution so we would restric malloc to allocating copy/load/store edges. Since
* malloc returns a pointer, it would be compatible with the index-is-a-pointer system
*
* b) HCD is buggy when many blocks are used. This happens only for the gcc input, so the
* temporal path (see "hcdBlocks" variable) is to set the limit of blocks to four.
*
* c) retrieve the amount of memory and use that as HEAP_SIZE.
*
* d) devise a better representation scheme st all the benchmarks fit in 3GB, so I can effectively
* use an MSI GTX580 (=> much faster than the Tesla C2070 or Quadro 6000) for all the inputs.
*/
uint ptsStartIndex;
while (1) {
//printf("\n\nIteration: %u\n", iteration);
cudaSafeCall(hipMemcpyFromSymbol(&ptsStartIndex, __ptsFreeList__, uintSize));
//printf("\tstart = %d.\n", ptsStartIndex);
printRule(" updating pts...");
hipLaunchKernelGGL(( updatePtsInformation), dim3(blocks), dim3(dimUpdate2), 0, 0, );
checkKernelErrors("ERROR at update pts");
printRule("done\n");
addTimeToRule(updatePtsTime, ruleTime);
bool done = true;
cudaSafeCall(hipMemcpyFromSymbol(&done, __done__, sizeof(bool)));
if (done) {
break;
}
// Ideally, we would use one stream to copy all the points-to edges discovered during the
// last iteration (resident in the interval [CURR_DIFF_PTS_START, __currDiffPtsFreeList__])
// back to the host while the other stream computes the next iteration, computation that does
// not modify the CURR_DIFF_PTS set. However, Thrust does not currently support streams, and
// kernel invocations using the default stream add a implicit synchronization point [CUDA 4.1
// programming guide, 3.2.5.5.4]
// If you do want to implement the simultaneous copy-kernel scheme, you can always modify
// the Thrust source code or create your custom Thrust library with the stream hardcoded on it.
// To avoid going that way, I chose to publish the version of the code that does pay a penalty
// for the data transfer.
printRule(" hcd...");
hipLaunchKernelGGL(( hcd), dim3(hcdBlocks), dim3(dimHcd), 0, 0, );
checkKernelErrors("ERROR at hcd rule");
hipLaunchKernelGGL(( updateInfo), dim3(3 * blocks), dim3(dim512), 0, 0, );
checkKernelErrors("ERROR while updating information after collapsing");
printRule("done\n");
addTimeToRule(hcdTime, ruleTime);
printRule(" finding curr_pts equivalences...");
hipLaunchKernelGGL(( computeCurrPtsHash), dim3(3 * blocks), dim3(dim512), 0, 0, );
checkKernelErrors("ERROR at compute hash");
uint numKeys;
cudaSafeCall(hipMemcpyFromSymbol(&numKeys, __numKeys__, uintSize));
buildHashMap(key, val, numKeys);
hipLaunchKernelGGL(( findCurrPtsEquivalents), dim3(3 * blocks), dim3(dim512), 0, 0, );
checkKernelErrors("ERROR in finding CURR_PTS equivalents");
printRule("done\n");
addTimeToRule(ptsEquivTime, ruleTime);
printRule(" copy_inv and load_inv and store2storeInv...");
hipLaunchKernelGGL(( copyInv_loadInv_store2storeInv), dim3(blocks), dim3(dimCopy), 0, 0, );
checkKernelErrors("ERROR at copy_inv/load_inv/store2storeinv rule");
cudaSafeCall(hipMemcpyFromSymbol(&numKeys, __numKeys__, uintSize));
assert(numKeys <= MAX_HASH_SIZE);
sort_by_key(key.begin(), key.begin() + numKeys, val.begin());
sequence(keyAux.begin(), keyAux.begin() + numKeys);
numKeys = unique_by_key(key.begin(), key.begin() + numKeys, keyAux.begin()).first - key.begin();
cudaSafeCall(hipMemcpyToSymbol(__numKeys__, &numKeys, uintSize));
printRule("done\n");
addTimeToRule(copyInvTime, ruleTime);
printRule(" store_inv...");
hipLaunchKernelGGL(( storeInv), dim3(blocks), dim3(dimStore), 0, 0, );
checkKernelErrors("ERROR at store_inv rule");
printRule("done\n");
addTimeToRule(storeInvTime, ruleTime);
printRule(" gep_inv...");
hipLaunchKernelGGL(( gepInv), dim3(blocks), dim3(dimGep), 0, 0, );
checkKernelErrors("ERROR at gep_inv rule");
printRule("done\n");
addTimeToRule(gepInvTime, ruleTime);
iteration++;
printf(".");
}
printf("OK.\n");
printf("Iterations = %u.\n", iteration);
// store the last index for the PTS elements
uint ptsEndIndex;
cudaSafeCall(hipMemcpyFromSymbol(&ptsEndIndex, __ptsFreeList__, uintSize));
uint solveTime = getEllapsedTime(startTime);
printf("SOLVE runtime: %u ms.\n", createTime + solveTime);
printf(" create graph : %u ms.\n", createTime);
printf(" rule solving : %u ms.\n", solveTime);
printf(" updatePts : %u ms.\n", updatePtsTime);
printf(" hcd : %u ms.\n", hcdTime);
printf(" equiv : %u ms.\n", ptsEquivTime);
printf(" cpLdSt2inv : %u ms.\n", copyInvTime);
printf(" store : %u ms.\n", storeInvTime);
printf(" gepInv : %u ms.\n", gepInvTime);
//printf("amount of points-to info = %d.\n", ptsEndIndex - ptsStartIndex);
// return ptsEndIndex - ptsStartIndex;
return ptsEndIndex;
}
| 27c8aaa3af9e38a0f0a931dc437a2d16d5e8b4a3.cu | /*
A GPU implementation of Andersen's analysis
Copyright (c) 2012 The University of Texas at Austin
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA, or see <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html>.
Author: Mario Mendez-Lojo
*/
#include "andersen.h"
#include <thrust/adjacent_difference.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/scan.h>
#include <thrust/unique.h>
#include <thrust/functional.h>
#include <thrust/remove.h>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
using namespace thrust;
__constant__ uint __storeStart__;
__constant__ uint __loadInvStart__;
/**
* number of variables of the input program.
*/
__constant__ uint __numVars__;
__constant__ uint* __ptsConstraints__;
__constant__ uint __numPtsConstraints__;
__constant__ uint* __copyConstraints__;
__constant__ uint __numCopyConstraints__;
__constant__ uint* __loadConstraints__;
__constant__ uint __numLoadConstraints__;
__constant__ uint* __storeConstraints__;
__constant__ uint __numStoreConstraints__;
__device__ uint __numStore__ = 0;
__constant__ uint* __gepInv__;
__constant__ uint __numGepInv__;
__constant__ uint* __size__;
__constant__ uint* __initialRep__;
__constant__ uint* __initialNonRep__;
__constant__ uint __numInitialRep__;
__constant__ uint* __nextVar__;
/**
* Table of indexes to the information inferred by HCD.
* Each entry is a pair (index, index + delta) that refers to __hcdTable__
*/
__constant__ uint* __hcdIndex__;
__constant__ uint __numHcdIndex__;
/**
* List of pairs (y, x_0, x_(delta - 2)) where pts(*y) = pts(x_0) = ... pts(x_((delta - 2))
* The equivalences have been detected during the offline phase of HCD, executed in the CPU
*/
__constant__ uint* __hcdTable__;
__constant__ uint __numHcdTable__;
/**
* Representative array
*/
__constant__ volatile uint* __rep__; // HAS to be volatile
/**
* array of elements containing all the edges in the graph.
*/
__constant__ volatile uint* __edges__; // HAS to be volatile
__constant__ uint* __graph__;
__constant__ uint* __lock__;
__constant__ uint* __key__;
__constant__ uint* __val__;
__constant__ uint* __keyAux__;
__device__ uint __numKeysCounter__ = 0;
__device__ uint __numKeys__;
__constant__ uint* __currPtsHead__;
__device__ uint __counter__ = 0;
__device__ uint __max__ = 0;
__device__ uint __min__ = 0;
__device__ bool __done__ = true;
__device__ uint __error__;
__device__ uint __worklistIndex0__ = 0;
__device__ uint __worklistIndex1__ = 1;
uint createTime = 0;
//////////// utility functions for the GPU /////////
__device__ uint __errorCode__ = 0;
__device__ uint __errorLine__ = 0;
__device__ char* __errorMsg__;
__device__ inline uint nextPowerOfTwo(uint v) {
return 1U << (uintSize * 8 - __clz(v - 1));
}
__device__ inline uint __count(int predicate) {
const uint ballot = __ballot(predicate);
return __popc(ballot);
}
__device__ inline uint isFirstThreadOfWarp(){
return !threadIdx.x;
}
__device__ inline uint getWarpIdInGrid(){
return (blockIdx.x * (blockDim.x * blockDim.y / WARP_SIZE) + threadIdx.y);
}
__device__ inline uint isFirstWarpOfGrid(){
return !(blockIdx.x || threadIdx.y);
}
__device__ inline uint isFirstWarpOfBlock(){
return !threadIdx.y;
}
__device__ inline uint getThreadIdInBlock(){
return mul32(threadIdx.y) + threadIdx.x;
}
__device__ inline uint isFirstThreadOfBlock(){
return !getThreadIdInBlock();
}
__device__ inline uint getThreadIdInGrid(){
return mul32(getWarpIdInGrid()) + threadIdx.x;
}
__device__ inline uint getThreadsPerBlock() {
return blockDim.x * blockDim.y;
}
__device__ inline uint isLastThreadOfBlock(){
return getThreadIdInBlock() == getThreadsPerBlock() - 1;
}
__device__ inline uint getWarpsPerBlock() {
return blockDim.y;
}
__device__ inline uint getWarpsPerGrid() {
return blockDim.y * gridDim.x;
}
__device__ inline uint getThreadsPerGrid() {
return mul32(getWarpsPerGrid());
}
__device__ inline uint getBlockIdInGrid(){
return blockIdx.x;
}
__device__ inline uint getBlocksPerGrid(){
return gridDim.x;
}
__device__ void syncAllThreads() {
__syncthreads();
uint to = getBlocksPerGrid() - 1;
if (isFirstThreadOfBlock()) {
volatile uint* counter = &__counter__;
if (atomicInc((uint*) counter, to) < to) {
while (*counter); // spinning...
}
}
__syncthreads();
}
__device__ uint getValAtThread(volatile uint* const _shared_, const uint myVal, const uint i) {
if (threadIdx.x == i) {
_shared_[threadIdx.y] = myVal;
}
return _shared_[threadIdx.y];
}
__device__ uint getValAtThread(const uint myVal, const uint i) {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (threadIdx.x == i) {
_shared_[threadIdx.y] = myVal;
}
return _shared_[threadIdx.y];
}
/*
* Forward declarations
*/
__device__ void insertAll(const uint storeIndex, uint* _shared_, uint numFrom, bool sort = true);
template<uint toRel, uint fromRel>
__device__ void unionAll(const uint to, uint* _shared_, uint numFrom, bool sort = true);
template<uint toRel, uint fromRel>
__device__ void map(const uint to, const uint base, const uint myBits, uint* _shared_,
uint& numFrom);
__device__ inline uint mul960(uint num) {
// 960 = 1024 - 64
return (num << 10) - (num << 6);
}
__device__ inline uint __graphGet__(const uint row, const uint col) {
return __edges__[row + col];
}
__device__ inline uint __graphGet__(const uint pos) {
return __graph__[pos];
}
__device__ inline void __graphSet__(const uint row, const uint col, const uint val) {
__edges__[row + col] = val;
}
__device__ inline void __graphSet__(const uint pos, const uint val) {
__graph__[pos] = val;
}
__device__ inline uint _sharedGet_(volatile uint* _shared_, uint index, uint offset) {
return _shared_[index + offset];
}
__device__ inline void _sharedSet_(volatile uint* _shared_, uint index, uint offset, uint val) {
_shared_[index + offset] = val;
}
__device__ inline uint getHeadIndex(uint var, uint rel){
if (rel == NEXT_DIFF_PTS) {
return NEXT_DIFF_PTS_START - mul32(var);
}
if (rel == COPY_INV) {
return COPY_INV_START + mul32(var);
}
if (rel == CURR_DIFF_PTS) {
return CURR_DIFF_PTS_START - mul32(var);
}
if (rel == PTS) {
return mul32(var);
}
if (rel == STORE) {
return __storeStart__ + mul32(var);
}
// it has to be LOAD_INV, right?
return __loadInvStart__ + mul32(var);
}
__device__ inline uint getNextDiffPtsHeadIndex(uint var){
return NEXT_DIFF_PTS_START - mul32(var);
}
__device__ inline uint getCopyInvHeadIndex(uint var){
return COPY_INV_START + mul32(var);
}
__device__ inline uint getCurrDiffPtsHeadIndex(uint var){
return CURR_DIFF_PTS_START - mul32(var);
}
__device__ inline uint getPtsHeadIndex(uint var){
return mul32(var);
}
__device__ inline uint getStoreHeadIndex(uint var){
return __storeStart__ + mul32(var);
}
__device__ inline uint getLoadInvHeadIndex(uint var){
return __loadInvStart__ + mul32(var);
}
__device__ inline int isEmpty(uint var, uint rel) {
const uint headIndex = getHeadIndex(var, rel);
return __graphGet__(headIndex, BASE) == NIL;
}
/**
* Mask that tells whether the variables contained in an element have size > offset
* There is one such mask per offset.
* stored in compressed format
*/
__constant__ uint* __offsetMask__;
/**
* Number of rows needed to represent the mask of ONE offset.
* = ceil(numObjectVars / DST_PER_ELEMENT), since non-object pointers have size 1.
*/
__constant__ uint __offsetMaskRowsPerOffset__;
__device__ inline uint __offsetMaskGet__(const uint base, const uint col, const uint offset) {
return __offsetMask__[mul32((offset - 1) * __offsetMaskRowsPerOffset__ + base) + col];
}
__device__ inline void __offsetMaskSet__(const uint base, const uint col, const uint offset,
const uint val) {
__offsetMask__[mul32((offset - 1) * __offsetMaskRowsPerOffset__ + base) + col] = val;
}
/**
* Mask that tells whether the pts-to of an element changed.
* the BASE and NEXT words are always equal to 0
* stored in compressed format
*/
__constant__ uint* __diffPtsMask__;
__device__ inline uint __diffPtsMaskGet__(const uint base, const uint col) {
return __diffPtsMask__[mul32(base) + col];
}
__device__ inline void __diffPtsMaskSet__(const uint base, const uint col, const uint val) {
__diffPtsMask__[mul32(base) + col] = val;
}
/**
* Index of the next free element in the corresponding free list.
* The index is given in words, not bytes or number of elements.
*/
__device__ uint __ptsFreeList__,__nextDiffPtsFreeList__, __currDiffPtsFreeList__, __otherFreeList__;
__device__ inline uint mallocPts(uint size = ELEMENT_WIDTH) {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicAdd(&__ptsFreeList__, size);
}
return _shared_[threadIdx.y];
}
__device__ inline uint mallocNextDiffPts() {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicSub(&__nextDiffPtsFreeList__, ELEMENT_WIDTH);
}
return _shared_[threadIdx.y];
}
__device__ inline uint mallocCurrDiffPts() {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicSub(&__currDiffPtsFreeList__, ELEMENT_WIDTH);
}
return _shared_[threadIdx.y];
}
__device__ inline uint mallocOther() {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicAdd(&__otherFreeList__, ELEMENT_WIDTH);
}
return _shared_[threadIdx.y];
}
__device__ inline uint mallocIn(uint rel) {
if (rel == NEXT_DIFF_PTS) {
return mallocNextDiffPts();
}
if (rel >= COPY_INV) {
return mallocOther();
}
if (rel == PTS) {
return mallocPts();
}
if (rel == CURR_DIFF_PTS) {
return mallocCurrDiffPts();
}
//printf("WTF! (%u)", rel);
return 0;
}
/**
* Get and increment the current worklist index
* Granularity: warp
* @param delta Number of elements to be retrieved at once
* @return Worklist index 'i'. All the work items in the [i, i + delta) interval are guaranteed
* to be assigned to the current warp.
*/
__device__ inline uint getAndIncrement(const uint delta) {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicAdd(&__worklistIndex0__, delta);
}
return _shared_[threadIdx.y];
}
__device__ inline uint getAndIncrement(uint* counter, uint delta) {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicAdd(counter, delta);
}
return _shared_[threadIdx.y];
}
/**
* Lock a given variable
* Granularity: warp
* @param var Id of the variable
* @return A non-zero value if the operation succeeded
*/
__device__ inline uint lock(const uint var) {
return __any(isFirstThreadOfWarp() && (atomicCAS(__lock__ + var, UNLOCKED, LOCKED)
== UNLOCKED));
}
/**
* Unlock a variable
* Granularity: warp or thread
* @param var Id of the variable
*/
__device__ inline void unlock(const uint var) {
__lock__[var] = UNLOCKED;
}
__device__ inline int isRep(const uint var) {
return __rep__[var] == var;
}
__device__ inline void setRep(const uint var, const uint rep) {
__rep__[var] = rep;
}
__device__ inline uint getRep(const uint var) {
return __rep__[var];
}
__device__ inline uint getRepRec(const uint var) {
uint rep = var;
uint repRep = __rep__[rep];
while (repRep != rep) {
rep = repRep;
repRep = __rep__[rep];
}
return rep;
}
__device__ ulongint recordStartTime() {
__shared__ volatile ulongint _ret_[MAX_WARPS_PER_BLOCK];
if (isFirstThreadOfWarp()) {
_ret_[threadIdx.y] = clock();
}
return _ret_[threadIdx.y];
}
__device__ void recordElapsedTime(ulongint start){
if (isFirstThreadOfWarp()) {
ulongint delta;
ulongint end = clock();
if (end > start) {
delta = end - start;
} else {
delta = end + (0xffffffff - start);
}
double time = TICKS_TO_MS(delta);
printf("Block %u, Warp: %u: %8.2f ms.\n", blockIdx.x, threadIdx.y, time);
}
}
__device__ inline uint decodeWord(const uint base, const uint word, const uint bits) {
uint ret = mul960(base) + mul32(word);
return (isBitActive(bits, threadIdx.x)) ? __rep__[ret + threadIdx.x] : NIL;
}
__device__ inline void swap(volatile uint* const keyA, volatile uint* const keyB, const uint dir) {
uint n1 = *keyA;
uint n2 = *keyB;
if ((n1 < n2) != dir) {
*keyA = n2;
*keyB = n1;
}
}
// Bitonic Sort, in ascending order using one WARP
// precondition: size of _shared_ has to be a power of 2
__device__ inline void bitonicSort(volatile uint* const _shared_, const uint to) {
for (int size = 2; size <= to; size <<= 1) {
for (int stride = size / 2; stride > 0; stride >>= 1) {
for (int id = threadIdx.x; id < (to / 2); id += WARP_SIZE) {
const uint myDir = ((id & (size / 2)) == 0);
uint pos = 2 * id - mod(id, stride);
volatile uint* start = _shared_ + pos;
swap(start, start + stride, myDir);
}
}
}
}
__device__ void blockBitonicSort(volatile uint* _shared_, uint to) {
uint idInBlock = getThreadIdInBlock();
for (int size = 2; size <= to; size <<= 1) {
for (int stride = size / 2; stride > 0; stride >>= 1) {
__syncthreads();
for (int id = idInBlock; id < (to / 2); id += getThreadsPerBlock()) {
const uint myDir = ((id & (size / 2)) == 0);
uint pos = 2 * id - mod(id, stride);
volatile uint* start = _shared_ + pos;
swap(start, start + stride, myDir);
}
}
}
}
/**
* Sort an array in ascending order.
* Granularity: block
* @param _shared_ list of integers
* @param to size of the sublist we want to process
*/
__device__ void blockSort(volatile uint* _shared_, uint to) {
uint size = max(nextPowerOfTwo(to), 32);
uint id = getThreadIdInBlock();
for (int i = to + id; i < size; i += getThreadsPerBlock()) {
_shared_[i] = NIL;
}
blockBitonicSort(_shared_, size);
__syncthreads();
}
/**
* Remove duplicates on a sorted sequence, equivalent to Thrust 'unique' function but uses one warp.
* If there are NILS, they are treated like any other number
* precondition: the input list is sorted
* precondition: to >= 32
* precondition: shared_[-1] exists and is equal to NIL
* Granularity: warp
*
* @param _shared_ list of integers
* @param to size of the sublist we want to process
* @return number of unique elements in the input.
*/
__device__ inline uint unique(volatile uint* const _shared_, uint to) {
uint startPos = 0;
uint myMask = (1 << (threadIdx.x + 1)) - 1;
for (int id = threadIdx.x; id < to; id += WARP_SIZE) {
uint myVal = _shared_[id];
uint fresh = __ballot(myVal != _shared_[id - 1]);
// pos = starting position + number of 1's to my right (incl. myself) minus one
uint pos = startPos + __popc(fresh & myMask) - 1;
_shared_[pos] = myVal;
startPos += __popc(fresh);
}
return startPos;
}
__device__ uint removeDuplicates(volatile uint* const _shared_, const uint to) {
const uint size = max(nextPowerOfTwo(to), 32);
for (int i = to + threadIdx.x; i < size; i += WARP_SIZE) {
_shared_[i] = NIL;
}
bitonicSort(_shared_, size);
uint ret = unique(_shared_, size);
return (size > to) ? ret - 1 : ret;
}
__device__ void print(uint* m, const uint size) {
if (!isFirstThreadOfWarp())
return;
//printf("[");
for (int i = 0; i < size; i++) {
//printf("%u", m[i]);
if (i < size - 1) {
//printf(", ");
}
}
//printf ("]");
}
__device__ void print(int* m, const uint size) {
if (!isFirstThreadOfWarp())
return;
//printf("[");
for (int i = 0; i < size; i++) {
//printf("%d", m[i]);
if (i < size - 1) {
//printf(", ");
}
}
//printf ("]");
}
__device__ volatile uint __printBuffer__[PRINT_BUFFER_SIZE];
// TODO: assumes we print with 1 block and 1 warp...
__device__ void printElementAsSet(const uint base, volatile uint myBits, bool& first) {
for (int i = 0; i < BASE; i++) {
uint word = getValAtThread(myBits, i);
uint myDst = decodeWord(base, i, word);
for (int j = 0; j < WARP_SIZE; j++) {
uint dst = getValAtThread(myDst, j);
if (dst != NIL && isFirstThreadOfWarp()) {
if (first) {
//printf("%u", dst);
} else {
//printf(", %u", dst);
}
first = false;
}
}
}
}
__device__ void printDiffPtsMask() {
uint numVars = __numVars__;
if (isFirstThreadOfWarp()) {
//printf("DIFF_PTS_MASK: [");
}
bool first = true;
int to = ceil((float) numVars / (float) ELEMENT_CARDINALITY);
for (int base = 0; base < to; base++) {
uint myBits = __diffPtsMaskGet__(base, threadIdx.x);
printElementAsSet(base, myBits, first);
}
if (isFirstThreadOfWarp())
;//printf("]\n");
}
__global__ void __printDiffPtsMask() {
printDiffPtsMask();
}
__device__ void printOffsetMask(uint numObjectsVars, uint offset) {
if (isFirstThreadOfWarp()) {
//printf("MASK for offset %u: [", offset);
}
bool first = true;
int to = __offsetMaskRowsPerOffset__;
for (int base = 0; base < to; base++) {
uint myBits = __offsetMaskGet__(base, threadIdx.x, offset);
printElementAsSet(base, myBits, first);
}
if (isFirstThreadOfWarp())
;//printf("]\n");
}
__device__ void printOffsetMasks(uint numObjectsVars, uint maxOffset) {
if (!isFirstWarpOfGrid()) {
return;
}
for (int i = 1; i <= maxOffset; i++) {
printOffsetMask(numObjectsVars, i);
}
}
__global__ void __printOffsetMasks(uint numObjectsVars, uint maxOffset) {
printOffsetMasks(numObjectsVars, maxOffset);
}
__device__ void printElementRec(uint index) {
volatile uint myBits = __graphGet__(index, threadIdx.x);
if (__all(myBits == NIL)) {
return;
}
while (index != NIL) {
//printf("Thread: %u, value: %u\n", threadIdx.x, myBits);
index = __graphGet__(index, NEXT);
if (index != NIL) {
myBits = __graphGet__(index, threadIdx.x);
}
}
}
__device__ void printSharedElementRec(uint* volatile _shared_, uint index) {
volatile uint myBits = _sharedGet_(_shared_, index, threadIdx.x);
if (__all(myBits == NIL)) {
return;
}
while (index != NIL) {
//printf("Thread: %u, value: %u\n", threadIdx.x, myBits);
index = _sharedGet_(_shared_, index, NEXT);
if (index != NIL) {
myBits = _sharedGet_(_shared_, index, threadIdx.x);
}
}
}
__device__ void accumulate(const uint base, uint myBits, uint& numFrom, uint rel) {
uint nonEmpty = __ballot(myBits && threadIdx.x < BASE);
while (nonEmpty) {
uint pos = __ffs(nonEmpty) - 1;
nonEmpty &= (nonEmpty - 1);
uint bits = getValAtThread(myBits, pos);
uint numOnes = __popc(bits);
//cudaAssert(numFrom + numOnes > PRINT_BUFFER_SIZE);
uint var = mul960(base) + mul32(pos) + threadIdx.x;
// PTS edges: we do not use representatives. In all the other relations we do.
var = isBitActive(bits, threadIdx.x) ? (rel > CURR_DIFF_PTS ? __rep__[var] : var) : NIL;
pos = numFrom + __popc(bits & ((1 << threadIdx.x) - 1));
if (var != NIL) {
__printBuffer__[pos] = var;
}
numFrom += numOnes;
}
}
__device__ void printEdges(const uint src, const uint rel, const uint printEmptySets) {
if (isEmpty(src, rel) && !printEmptySets) {
return;
}
if (isFirstThreadOfWarp()) {
//printf("%d => [", src);
}
uint index = getHeadIndex(src, rel);
uint numFrom = 0;
do {
uint myBits = __graphGet__(index, threadIdx.x);
uint base = __graphGet__(index, BASE);
if (base == NIL) {
break;
}
index = __graphGet__(index, NEXT);
accumulate(base, myBits, numFrom, rel);
} while (index != NIL);
if (numFrom) {
if (rel > CURR_DIFF_PTS) {
numFrom = removeDuplicates(__printBuffer__, numFrom);
}
for (int i = 0; i < numFrom; i++) {
uint val = __printBuffer__[i]; // has to be non-NIL
if (isFirstThreadOfWarp()) {
if (!i) {
//printf("%u", val);
} else {
//printf(", %u", val);
}
}
}
}
if (isFirstThreadOfWarp()) {
//printf("]\n");
}
}
__device__ void printEdgesOf(const uint src, int rel) {
if (isFirstThreadOfWarp()) {
//printf("%s of ", getName(rel));
}
printEdges(src, rel, 1);
}
__device__ void printEdgesStartingAt(uint index, int rel) {
if (isFirstThreadOfWarp()) {
//printf("%s @ %u => [", getName(rel), index);
}
uint numFrom = 0;
do {
uint myBits = __graphGet__(index, threadIdx.x);
uint base = __graphGet__(index, BASE);
if (base == NIL) {
break;
}
index = __graphGet__(index, NEXT);
accumulate(base, myBits, numFrom, rel);
} while (index != NIL);
if (numFrom) {
if (rel > CURR_DIFF_PTS) {
numFrom = removeDuplicates(__printBuffer__, numFrom);
}
for (int i = 0; i < numFrom; i++) {
uint val = __printBuffer__[i]; // has to be non-NIL
if (isFirstThreadOfWarp()) {
if (!i) {
//printf("%u", val);
} else {
//printf(", %u", val);
}
}
}
}
if (isFirstThreadOfWarp()) {
//printf("]\n");
}
}
__device__ void printEdgesOf(uint src) {
for (int i = 0; i <= LAST_DYNAMIC_REL; i++) {
printEdgesOf(src, i);
}
}
__global__ void __printEdgesOf(uint src, int rel) {
printEdgesOf(src, rel);
}
__global__ void __printEdgesOf(uint src) {
printEdgesOf(src);
}
__device__ void printEdges(int rel) {
if (isFirstThreadOfWarp()) {
//printf("%s edges:\n", getName(rel));
}
for (int src = 0; src < __numVars__; src++) {
printEdges(src, rel, 0);
}
}
__global__ void __printEdges(int rel) {
printEdges(rel);
}
__device__ void printGepEdges() {
uint numVarsGepInv = __numGepInv__;
if (isFirstThreadOfWarp()) {
//printf("GEP_INV edges:\n");
}
volatile __shared__ uint _shared_[WARP_SIZE];
for (int i = 0; i < numVarsGepInv; i += WARP_SIZE) {
_shared_[threadIdx.x] = __gepInv__[i + threadIdx.x];
for (int j= 0; j < WARP_SIZE && _shared_[j] != NIL; j +=2) {
uint dst = _shared_[j];
uint srcOffset = _shared_[j + 1];
if (isFirstThreadOfWarp()) {
//printf("%u => %u (%u)\n", dst, id(srcOffset), offset(srcOffset));
}
}
}
}
__global__ void __printGepEdges() {
printGepEdges();
}
__device__ void printConstraints(uint* __constraints__, const uint numConstraints) {
volatile __shared__ uint _shared_[WARP_SIZE];
for (int i = 0; i < numConstraints * 2; i += WARP_SIZE) {
_shared_[threadIdx.x] = __constraints__[i + threadIdx.x];
for (int j = 0; j < WARP_SIZE; j += 2) {
if (i + j >= numConstraints * 2) {
return;
}
uint src = _shared_[j];
uint dst = _shared_[j + 1];
if (isFirstThreadOfWarp()) {
//printf("%u => %u\n", src, dst);
}
}
}
}
__device__ int checkForErrors(uint var, uint rel) {
uint index = getHeadIndex(var, rel);
uint lastBase = 0;
uint first = 1;
uint bits = __graphGet__(index, threadIdx.x);
if (__all(bits == NIL)) {
return 0;
}
do {
bits = __graphGet__(index, threadIdx.x);
if (__all(threadIdx.x >= BASE || bits == NIL)) {
if (isFirstThreadOfWarp()) {
//printf("ERROR: empty element at %s of %u \n", getName(rel), var);
}
//printElementRec(getHeadIndex(var, rel));
__error__ = 1;
return 1;
}
uint base = __graphGet__(index, BASE);
index = __graphGet__(index, NEXT);
if (base == NIL) {
if (isFirstThreadOfWarp()) {
//printf("ERROR: inconsistency at %s of %u: BASE is NIL but other word is not\n",
//getName(rel), var);
}
printElementRec(getHeadIndex(var, rel));
__error__ = 1;
return 1;
}
if (!first && base <= lastBase) {
if (isFirstThreadOfWarp()) {
//printf("ERROR: BASE(element) = %u <= BASE(prev(element)) = %u at %s of %u\n", base,
//lastBase, getName(rel), var);
}
//printElementRec(getHeadIndex(var, rel));
__error__ = 1;
return 1;
}
first = 0;
lastBase = base;
} while (index != NIL);
return 0;
}
__global__ void checkForErrors(uint rel) {
uint numVars = __numVars__;
int inc = mul32(getWarpsPerGrid());
int init = mul32(getWarpIdInGrid());
for (int initVar = init; initVar < numVars; initVar += inc) {
for (int i = 0; i < WARP_SIZE; i++) {
uint var = initVar + i;
if (var > numVars || checkForErrors(var, rel)) {
return;
}
}
}
}
__device__ uint hashCode(uint index) {
__shared__ uint _sh_[DEF_THREADS_PER_BLOCK];
volatile uint* _shared_ = &_sh_[threadIdx.y * WARP_SIZE];
uint myRet = 0;
uint bits = __graphGet__(index + threadIdx.x);
uint base = __graphGet__(index + BASE);
if (base == NIL) {
return 0;
}
while (1) {
uint elementHash = base * (30 + threadIdx.x) ^ bits;
if (bits) {
myRet ^= elementHash;
}
index = __graphGet__(index + NEXT);
if (index == NIL) {
break;
}
bits = __graphGet__(index + threadIdx.x);
base = __graphGet__(index + BASE);
}
_shared_[threadIdx.x] = myRet;
if (threadIdx.x < 14) {
_shared_[threadIdx.x] ^= _shared_[threadIdx.x + WARP_SIZE / 2];
}
if (threadIdx.x < 8) {
_shared_[threadIdx.x] ^= _shared_[threadIdx.x + WARP_SIZE / 4];
}
if (threadIdx.x < 4) {
_shared_[threadIdx.x] ^= _shared_[threadIdx.x + WARP_SIZE / 8];
}
return _shared_[0] ^ _shared_[1] ^ _shared_[2] ^ _shared_[3];
}
__device__ uint equal(uint index1, uint index2) {
uint bits1 = __graphGet__(index1 + threadIdx.x);
uint bits2 = __graphGet__(index2 + threadIdx.x);
while (__all((threadIdx.x == NEXT) || (bits1 == bits2))) {
index1 = __graphGet__(index1 + NEXT);
index2 = __graphGet__(index2 + NEXT);
if (index1 == NIL || index2 == NIL) {
return index1 == index2;
}
bits1 = __graphGet__(index1 + threadIdx.x);
bits2 = __graphGet__(index2 + threadIdx.x);
}
return 0;
}
__device__ uint size(uint var, uint rel) {
__shared__ uint _sh_[DEF_THREADS_PER_BLOCK];
volatile uint* _shared_ = &_sh_[threadIdx.y * WARP_SIZE];
if (isEmpty(var, rel)) {
return 0;
}
uint index = getHeadIndex(var, rel);
uint myRet = 0;
do {
uint myBits = __graphGet__(index, threadIdx.x);
index = __graphGet__(index, NEXT);
myRet += __popc(myBits);
} while (index != NIL);
_shared_[threadIdx.x] = threadIdx.x >= BASE ? 0 : myRet;
for (int stride = WARP_SIZE / 2; stride > 0; stride >>= 1) {
if (threadIdx.x < stride) {
_shared_[threadIdx.x] += _shared_[threadIdx.x + stride];
}
}
return _shared_[0];
}
__device__ void unionToCopyInv(const uint to, const uint fromIndex, uint* const _shared_,
bool applyCopy = true) {
uint toIndex = getCopyInvHeadIndex(to);
if (fromIndex == toIndex) {
return;
}
uint fromBits = __graphGet__(fromIndex + threadIdx.x);
uint fromBase = __graphGet__(fromIndex + BASE);
if (fromBase == NIL) {
return;
}
uint fromNext = __graphGet__(fromIndex + NEXT);
uint toBits = __graphGet__(toIndex + threadIdx.x);
uint toBase = __graphGet__(toIndex + BASE);
uint toNext = __graphGet__(toIndex + NEXT);
uint numFrom = 0;
uint newVal;
while (1) {
if (toBase > fromBase) {
if (toBase == NIL) {
newVal = fromNext == NIL ? NIL : mallocOther();
} else {
newVal = mallocOther();
__graphSet__(newVal + threadIdx.x, toBits);
}
fromBits = threadIdx.x == NEXT ? newVal : fromBits;
__graphSet__(toIndex + threadIdx.x, fromBits);
if (applyCopy) {
map<NEXT_DIFF_PTS, PTS>(to, fromBase, fromBits, _shared_, numFrom);
}
if (fromNext == NIL) {
break;
}
toIndex = newVal;
fromBits = __graphGet__(fromNext + threadIdx.x);
fromBase = __graphGet__(fromNext + BASE);
fromNext = __graphGet__(fromNext + NEXT);
} else if (toBase == fromBase) {
uint orBits = fromBits | toBits;
uint diffs = __any(orBits != toBits && threadIdx.x < NEXT);
bool nextWasNil = false;
if (toNext == NIL && fromNext != NIL) {
toNext = mallocOther();
nextWasNil = true;
}
uint newBits = threadIdx.x == NEXT ? toNext : orBits;
if (newBits != toBits) {
__graphSet__(toIndex + threadIdx.x, newBits);
}
// if there was any element added to COPY_INV, apply COPY_INV rule
if (applyCopy && diffs) {
uint diffBits = fromBits & ~toBits;
map<NEXT_DIFF_PTS, PTS > (to, fromBase, diffBits, _shared_, numFrom);
}
//advance `to` and `from`
if (fromNext == NIL) {
break;
}
toIndex = toNext;
if (nextWasNil) {
toBits = NIL;
toBase = NIL;
toNext = NIL;
} else {
toBits = __graphGet__(toIndex + threadIdx.x);
toBase = __graphGet__(toIndex + BASE);
toNext = __graphGet__(toIndex + NEXT);
}
fromBits = __graphGet__(fromNext + threadIdx.x);
fromBase = __graphGet__(fromNext + BASE);
fromNext = __graphGet__(fromNext + NEXT);
} else { //toBase < fromBase
if (toNext == NIL) {
uint newNext = mallocOther();
__graphSet__(toIndex + NEXT, newNext);
toIndex = newNext;
toBits = NIL;
toBase = NIL;
} else {
toIndex = toNext;
toBits = __graphGet__(toNext + threadIdx.x);
toBase = __graphGet__(toIndex + BASE);
toNext = __graphGet__(toNext + NEXT);
}
}
}
if (applyCopy && numFrom) {
// flush pending unions
unionAll<NEXT_DIFF_PTS, PTS> (to, _shared_, numFrom);
}
}
__device__ void clone(uint toIndex, uint fromBits, uint fromNext, const uint toRel) {
while (1) {
uint newIndex = fromNext == NIL ? NIL : mallocIn(toRel);
uint val = threadIdx.x == NEXT ? newIndex : fromBits;
__graphSet__(toIndex + threadIdx.x, val);
if (fromNext == NIL) {
break;
}
toIndex = newIndex;
fromBits = __graphGet__(fromNext + threadIdx.x);
fromNext = __graphGet__(fromNext + NEXT);
}
}
// toRel = any non-static relationship
__device__ void unionG2G(const uint to, const uint toRel, const uint fromIndex) {
uint toIndex = getHeadIndex(to, toRel);
uint fromBits = __graphGet__(fromIndex + threadIdx.x);
uint fromBase = __graphGet__(fromIndex + BASE);
if (fromBase == NIL) {
return;
}
uint fromNext = __graphGet__(fromIndex + NEXT);
uint toBits = __graphGet__(toIndex + threadIdx.x);
uint toBase = __graphGet__(toIndex + BASE);
if (toBase == NIL) {
clone(toIndex, fromBits, fromNext, toRel);
return;
}
uint toNext = __graphGet__(toIndex + NEXT);
while (1) {
if (toBase > fromBase) {
uint newIndex = mallocIn(toRel);
__graphSet__(newIndex + threadIdx.x, toBits);
uint val = threadIdx.x == NEXT ? newIndex : fromBits;
__graphSet__(toIndex + threadIdx.x, val);
// advance 'from'
if (fromNext == NIL) {
return;
}
toIndex = newIndex;
fromBits = __graphGet__(fromNext + threadIdx.x);
fromBase = __graphGet__(fromNext + BASE);
fromNext = __graphGet__(fromNext + NEXT);
} else if (toBase == fromBase) {
uint newToNext = (toNext == NIL && fromNext != NIL) ? mallocIn(toRel) : toNext;
uint orBits = fromBits | toBits;
uint newBits = threadIdx.x == NEXT ? newToNext : orBits;
if (newBits != toBits) {
__graphSet__(toIndex + threadIdx.x, newBits);
}
//advance `to` and `from`
if (fromNext == NIL) {
return;
}
fromBits = __graphGet__(fromNext + threadIdx.x);
fromBase = __graphGet__(fromNext + BASE);
fromNext = __graphGet__(fromNext + NEXT);
if (toNext == NIL) {
clone(newToNext, fromBits, fromNext, toRel);
return;
}
toIndex = newToNext;
toBits = __graphGet__(toNext + threadIdx.x);
toBase = __graphGet__(toNext + BASE);
toNext = __graphGet__(toNext + NEXT);
} else { // toBase < fromBase
if (toNext == NIL) {
toNext = mallocIn(toRel);
__graphSet__(toIndex + NEXT, toNext);
clone(toNext, fromBits, fromNext, toRel);
return;
}
toIndex = toNext;
toBits = __graphGet__(toNext + threadIdx.x);
toBase = __graphGet__(toNext + BASE);
toNext = __graphGet__(toNext + NEXT);
}
}
}
// WATCH OUT: ASSUMES fromRel==toRel
// like unionTo, but reusing the elements of 'from' (introduces sharing of elements)
// toRel = any non-static relationship
__device__ void unionG2GRecycling(const uint to, const uint toRel, uint fromIndex) {
uint fromBits = __graphGet__(fromIndex, threadIdx.x);
uint fromBase = __graphGet__(fromIndex, BASE);
if (fromBase == NIL) {
return;
}
uint toIndex = getHeadIndex(to, toRel);
uint toBits = __graphGet__(toIndex, threadIdx.x);
uint toBase = __graphGet__(toIndex, BASE);
if (toBase == NIL) {
__graphSet__(toIndex, threadIdx.x, fromBits);
return;
}
uint toNext = __graphGet__(toIndex, NEXT);
uint fromNext = __graphGet__(fromIndex, NEXT);
uint fromHeadIndex = fromIndex;
do {
if (toBase == fromBase) {
uint newToNext = (toNext == NIL) ? fromNext : toNext;
uint orBits = fromBits | toBits;
uint newBits = threadIdx.x == NEXT ? newToNext : orBits;
if (newBits != toBits) {
__graphSet__(toIndex, threadIdx.x, newBits);
}
//advance `to` and `from`
if (toNext == NIL || fromNext == NIL) { // done with current elt and there is no NEXT => exit
return;
}
fromIndex = fromNext;
fromBits = __graphGet__(fromIndex, threadIdx.x);
fromBase = __graphGet__(fromIndex, BASE);
fromNext = __graphGet__(fromIndex, NEXT);
toIndex = toNext;
toBits = __graphGet__(toIndex, threadIdx.x);
toBase = __graphGet__(toIndex, BASE);
toNext = __graphGet__(toIndex, NEXT);
} else if (toBase < fromBase) {
if (toNext == NIL) {
if (fromIndex == fromHeadIndex) {
fromIndex = mallocIn(toRel);
__graphSet__(fromIndex, threadIdx.x, fromBits);
}
__graphSet__(toIndex, NEXT, fromIndex);
return;
}
// advance 'to'
toIndex = toNext;
toBits = __graphGet__(toIndex, threadIdx.x);
toBase = __graphGet__(toIndex, BASE);
toNext = __graphGet__(toIndex, NEXT);
} else { // toBase > fromBase
if (fromIndex == fromHeadIndex) {
fromIndex = mallocIn(toRel);
}
__graphSet__(fromIndex, threadIdx.x, toBits);
int val = threadIdx.x == NEXT ? fromIndex : fromBits;
__graphSet__(toIndex, threadIdx.x, val);
toIndex = fromIndex; // toBits does not change
fromIndex = fromNext;
if (fromNext != NIL) {
//advance 'from'
fromBits = __graphGet__(fromIndex, threadIdx.x);
fromBase = __graphGet__(fromIndex, BASE);
fromNext = __graphGet__(fromIndex, NEXT);
}
}
} while (fromIndex != NIL);
}
__device__ uint addVirtualElement(uint index, const uint fromBase, const uint fromBits,
const uint toRel) {
for (;;) {
uint toBits = __graphGet__(index + threadIdx.x);
uint toBase = __graphGet__(index + BASE);
if (toBase == NIL) {
// can only happen if the adjancency list of `to` is empty
// cost: exactly one global write
__graphSet__(index + threadIdx.x, fromBits);
return index;
}
if (toBase == fromBase) {
// cost: at most one global write
uint orBits = toBits | fromBits;
if (orBits != toBits && threadIdx.x < NEXT) {
__graphSet__(index + threadIdx.x, orBits);
}
return index;
}
if (toBase < fromBase) {
uint toNext = getValAtThread(toBits, NEXT);
if (toNext == NIL) {
// appending; cost: two global writes
uint newIndex = mallocIn(toRel);
__graphSet__(newIndex + threadIdx.x, fromBits);
__graphSet__(index + NEXT, newIndex);
return newIndex;
}
index = toNext;
} else {
// cost: two global writes
uint newIndex = mallocIn(toRel);
__graphSet__(newIndex + threadIdx.x, toBits);
uint val = threadIdx.x == NEXT ? newIndex : fromBits;
__graphSet__(index + threadIdx.x, val);
return index;
}
}
}
__device__ uint insert(const uint index, const uint var, const int rel) {
uint base = BASE_OF(var);
uint word = WORD_OF(var);
uint bit = BIT_OF(var);
uint myBits = 0;
if (threadIdx.x == word) {
myBits = 1 << bit;
} else if (threadIdx.x == BASE) {
myBits = base;
} else if (threadIdx.x == NEXT) {
myBits = NIL;
}
return addVirtualElement(index, base, myBits, rel);
}
__device__ inline uint resetWorklistIndex() {
__syncthreads();
uint numBlocks = getBlocksPerGrid();
if (isFirstThreadOfBlock() && atomicInc(&__counter__, numBlocks - 1) == (numBlocks - 1)) {
__worklistIndex0__ = 0;
__counter__ = 0;
return 1;
}
return 0;
}
__global__ void addEdges(uint* __key__, uint* __keyAux__, uint* __val__, const uint to, uint rel) {
__shared__ uint _sh_[WARPS_PER_BLOCK(DEF_THREADS_PER_BLOCK) * WARP_SIZE];
uint* _shared_ = &_sh_[threadIdx.y * WARP_SIZE];
uint i = getAndIncrement(1);
while (i < to) {
uint src = __key__[i];
if (src == NIL) {
break;
}
uint index = getHeadIndex(src, rel);
uint startIndex = __keyAux__[i];
uint end = __keyAux__[i + 1];
uint start = roundToPrevMultipleOf(startIndex, WARP_SIZE); // to ensure alignment
for (int j = start; j < end; j += WARP_SIZE) {
uint myIndex = j + threadIdx.x;
_shared_[threadIdx.x] = myIndex < end ? __val__[myIndex] : NIL;
uint startK = max(((int) startIndex) - j, 0);
uint endK = min(end - j, WARP_SIZE);
for (int k = startK; k < endK; k++) {
uint dst = _shared_[k];
index = insert(index, dst, rel);
}
}
i = getAndIncrement(1);
}
resetWorklistIndex();
}
template<uint toRel, uint fromRel>
__device__ inline void unionAll(const uint to, uint* const _shared_, uint numFrom, bool sort) {
if (numFrom > 1 && sort) {
numFrom = removeDuplicates(_shared_, numFrom);
}
for (int i = 0; i < numFrom; i++) {
uint fromIndex = _shared_[i];
if (fromRel != CURR_DIFF_PTS) {
fromIndex = getHeadIndex(fromIndex, fromRel);
}
if (toRel == COPY_INV) {
unionToCopyInv(to, fromIndex, _shared_ + DECODE_VECTOR_SIZE + 1);
} else {
unionG2G(to, toRel, fromIndex);
}
}
}
template<uint toRel, uint fromRel>
__device__ void map(uint to, const uint base, const uint myBits, uint* const _shared_,
uint& numFrom) {
uint nonEmpty = __ballot(myBits) & LT_BASE;
const uint threadMask = 1 << threadIdx.x;
const uint myMask = threadMask - 1;
const uint mul960base = mul960(base);
while (nonEmpty) {
uint pos = __ffs(nonEmpty) - 1;
nonEmpty &= (nonEmpty - 1);
uint bits = getValAtThread(myBits, pos);
uint var = getRep(mul960base + mul32(pos) + threadIdx.x); //coalesced
uint bitActive = (var != I2P) && (bits & threadMask);
bits = __ballot(bitActive);
uint numOnes = __popc(bits);
if (numFrom + numOnes > DECODE_VECTOR_SIZE) {
numFrom = removeDuplicates(_shared_, numFrom);
if (numFrom + numOnes > DECODE_VECTOR_SIZE) {
if (toRel == STORE) {
insertAll(to, _shared_, numFrom, false);
} else {
unionAll<toRel, fromRel>(to, _shared_, numFrom, false);
}
numFrom = 0;
}
}
pos = numFrom + __popc(bits & myMask);
if (bitActive) {
_shared_[pos] = (fromRel == CURR_DIFF_PTS) ? __currPtsHead__[var] : var;
}
numFrom += numOnes;
}
}
template<uint firstRel, uint secondRel, uint thirdRel>
__device__ void apply(const uint src, uint* const _shared_) {
uint numFrom = 0;
uint index = getHeadIndex(src, firstRel);
do {
uint myBits = __graphGet__(index + threadIdx.x);
uint base = __graphGet__(index + BASE);
if (base == NIL) {
break;
}
index = __graphGet__(index + NEXT);
if (secondRel == CURR_DIFF_PTS) {
myBits &= __diffPtsMaskGet__(base, threadIdx.x);
}
map<thirdRel, secondRel>(src, base, myBits, _shared_, numFrom);
} while (index != NIL);
if (numFrom) {
unionAll<thirdRel, secondRel>(src, _shared_, numFrom);
}
}
__device__ void insertAll(const uint src, uint* const _shared_, uint numFrom, const bool sort) {
if (numFrom > 1 && sort) {
numFrom = removeDuplicates(_shared_, numFrom);
}
const uint storeIndex = getStoreHeadIndex(src);
for (int i = 0; i < numFrom; i += WARP_SIZE) {
uint size = min(numFrom - i, WARP_SIZE);
uint next = getAndIncrement(&__numKeysCounter__, size);
// TODO: we need to make sure that (next + threadIdx.x < MAX_HASH_SIZE)
if (threadIdx.x < size) {
__key__[next + threadIdx.x] = _shared_[i + threadIdx.x]; // at most 2 transactions
__val__[next + threadIdx.x] = storeIndex;
}
}
}
__device__ void store2storeInv(const uint src, uint* const _shared_) {
uint currDiffPtsIndex = getCurrDiffPtsHeadIndex(src);
uint numFrom = 0;
do {
uint myBits = __graphGet__(currDiffPtsIndex + threadIdx.x);
uint base = __graphGet__(currDiffPtsIndex + BASE);
if (base == NIL) {
break;
}
currDiffPtsIndex = __graphGet__(currDiffPtsIndex + NEXT);
map<STORE, STORE>(src, base, myBits, _shared_, numFrom);
} while (currDiffPtsIndex != NIL);
if (numFrom) {
insertAll(src, _shared_, numFrom);
}
}
__global__ void copyInv_loadInv_store2storeInv() {
__shared__ uint _sh_[WARPS_PER_BLOCK(COPY_INV_THREADS_PER_BLOCK) * (DECODE_VECTOR_SIZE * 2 + 2)];
uint* const _shared_ = &_sh_[threadIdx.y * (DECODE_VECTOR_SIZE * 2 + 2)];
_shared_[0] = NIL;
_shared_[DECODE_VECTOR_SIZE + 1] = NIL;
uint to = __numVars__;
uint src = getAndIncrement(&__worklistIndex1__, 1);
while (src < to) {
apply<COPY_INV, CURR_DIFF_PTS, NEXT_DIFF_PTS>(src, _shared_ + 1 + DECODE_VECTOR_SIZE + 1);
apply<LOAD_INV, CURR_DIFF_PTS, COPY_INV>(src, _shared_ + 1);
src = getAndIncrement(&__worklistIndex1__,1);
}
to = __numStore__;
src = getAndIncrement(1);
while (src < to) {
src = __storeConstraints__[src];
if (src != NIL) {
store2storeInv(src, _shared_ + 1);
}
src = getAndIncrement(1);
}
if (resetWorklistIndex()) {
__key__[__numKeysCounter__] = NIL;
__val__[__numKeysCounter__] = NIL;
__numKeys__ = __numKeysCounter__ + 1;
__numKeysCounter__ = 0;
__worklistIndex1__ = 0;
}
}
__device__ void warpStoreInv(const uint i, uint* const _pending_, uint* _numPending_) {
uint src = __key__[i];
uint startIndex = __keyAux__[i];
uint end = __keyAux__[i + 1];
if (end - startIndex > WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK) * 4) {
// too big for a single warp => add to pending, so the whole block will process this variable
if (isFirstThreadOfWarp()) {
uint where = 3 * atomicAdd(_numPending_, 1);
_pending_[where] = src;
_pending_[where + 1] = startIndex;
_pending_[where + 2] = end;
}
return;
}
uint* const _shared_ = _pending_ + WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK) * 3 +
threadIdx.y * (WARP_SIZE + DECODE_VECTOR_SIZE + 1);
_shared_[WARP_SIZE] = NIL;
uint start = roundToPrevMultipleOf(startIndex, WARP_SIZE); // to ensure alignment
for (int j = start; j < end; j += WARP_SIZE) {
uint myIndex = j + threadIdx.x;
_shared_[threadIdx.x] = myIndex < end ? __val__[myIndex] : NIL;
uint startK = max(((int) startIndex) - j, 0);
uint endK = min(end - j, WARP_SIZE);
for (int k = startK; k < endK; k++) {
uint fromIndex = _shared_[k];
unionToCopyInv(src, fromIndex, _shared_ + 1 + WARP_SIZE);
}
}
}
__device__ void blockStoreInv(uint src, uint* const _dummyVars_, volatile uint* _warpInfo_,
uint& _numPending_) {
uint* _shared_ = _dummyVars_ + WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK) * 4 +
threadIdx.y * (WARP_SIZE + DECODE_VECTOR_SIZE + 1);
__shared__ uint _counter_, _start_, _end_;
_shared_[WARP_SIZE] = NIL;
_shared_ += WARP_SIZE + 1;
__syncthreads();
for (int i = 0; i < _numPending_; i++) {
if (isFirstWarpOfBlock()) {
uint* pending = _dummyVars_ + WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK);
src = pending[3 * i];
_start_ = pending[3 * i + 1];
_end_ = pending[3 * i + 2];
_counter_ = _start_;
}
__syncthreads();
if (isFirstThreadOfWarp()) {
_warpInfo_[threadIdx.y] = atomicAdd(&_counter_, 1);
}
uint j = _warpInfo_[threadIdx.y];
while (j < _end_) {
uint fromIndex = __val__[j];
unionToCopyInv(src, fromIndex, _shared_, isFirstWarpOfBlock());
if (isFirstThreadOfWarp()) {
_warpInfo_[threadIdx.y] = atomicAdd(&_counter_, 1);
}
j = _warpInfo_[threadIdx.y];
}
__syncthreads();
if (isFirstWarpOfBlock()) {
for (int i = 1; i < WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK); i++) {
uint var2 = _dummyVars_[i];
unionToCopyInv(src, getCopyInvHeadIndex(var2), _shared_);
}
}
__syncthreads();
if (!isFirstWarpOfBlock()) { //reset fields so updateDiffPts doesn't work on dummy variables
uint index = getHeadIndex(src, COPY_INV);
__graphSet__(index, threadIdx.x, NIL);
}
}
if (isFirstWarpOfBlock()) {
_numPending_ = 0;
}
__syncthreads();
}
__global__ void storeInv() {
__shared__ uint _sh_[WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK) *
(5 + WARP_SIZE + DECODE_VECTOR_SIZE + 1)];
__shared__ volatile uint* _warpInfo_;
__shared__ volatile uint _warpsWorking_;
__shared__ uint* _dummyVars_;
__shared__ uint _numPending_, _to_;
if (isFirstWarpOfBlock()) {
_to_ = __numKeys__ - 1; // because the last one is NIL
_dummyVars_ = _sh_ + WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK);
if (threadIdx.x < WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK)) {
_dummyVars_[threadIdx.x] = __initialNonRep__[mul32(blockIdx.x) + threadIdx.x];
}
_warpInfo_ = _sh_;
_numPending_ = 0;
_warpsWorking_ = WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK);
}
__syncthreads();
uint counter, src;
if (!isFirstWarpOfBlock()) {
src = _dummyVars_[threadIdx.y];
}
if (isFirstThreadOfWarp()) {
uint next = atomicAdd(&__worklistIndex0__, 1);
if (next >= _to_) {
atomicSub((uint*) &_warpsWorking_, 1);
}
_warpInfo_[threadIdx.y] = next;
}
counter = _warpInfo_[threadIdx.y];
while (_warpsWorking_) {
if (counter < _to_) {
warpStoreInv(counter, _sh_ + WARPS_PER_BLOCK(STORE_INV_THREADS_PER_BLOCK) * 2, &_numPending_);
}
__syncthreads();
if (_numPending_) {
blockStoreInv(src, _dummyVars_, _warpInfo_, _numPending_);
}
if (counter < _to_ ) {
if (isFirstThreadOfWarp()) {
uint next = atomicAdd(&__worklistIndex0__, 1);
if (next >= _to_) {
atomicSub((uint*) &_warpsWorking_, 1);
}
_warpInfo_[threadIdx.y] = next;
}
counter = _warpInfo_[threadIdx.y];
}
}
resetWorklistIndex();
}
__device__ void shift(const uint base, const uint bits, const uint offset,
volatile uint* _shifted_) {
_shifted_[threadIdx.x] = 0;
_shifted_[threadIdx.x + WARP_SIZE] = 0;
_shifted_[threadIdx.x + WARP_SIZE * 2] = 0;
uint delta = div32(offset);
uint highWidth = mod32(offset);
uint lowWidth = WARP_SIZE - highWidth;
// these memory accesses do not conflict
_shifted_[threadIdx.x + delta] = (bits << highWidth);
_shifted_[threadIdx.x + delta + 1] |= (bits >> lowWidth);
_shifted_[threadIdx.x + WARP_SIZE * 2] = _shifted_[threadIdx.x + BASE * 2];
_shifted_[threadIdx.x + WARP_SIZE] = _shifted_[threadIdx.x + BASE];
_shifted_[BASE] = base;
_shifted_[BASE + WARP_SIZE] = base + 1;
_shifted_[BASE + WARP_SIZE * 2] = base + 2;
}
__device__ void applyGepInvRule(uint x, const uint y, const uint offset, volatile uint* _shared_) {
uint yIndex = getCurrDiffPtsHeadIndex(y);
uint myBits = __graphGet__(yIndex, threadIdx.x);
if (__all(myBits == NIL)) {
return;
}
uint xIndex = getNextDiffPtsHeadIndex(x);
do {
myBits = __graphGet__(yIndex, threadIdx.x);
uint base = __graphGet__(yIndex, BASE);
yIndex = __graphGet__(yIndex, NEXT);
myBits &= __offsetMaskGet__(base, threadIdx.x, offset);
if (__all(myBits == 0)) {
continue;
}
shift(base, myBits, offset, _shared_);
for (int i = 0; i < 3; i++) {
uint myBits = threadIdx.x == NEXT ? NIL : _shared_[threadIdx.x + WARP_SIZE * i];
if (__any(myBits && threadIdx.x < BASE)) {
xIndex = addVirtualElement(xIndex, base + i, myBits, NEXT_DIFF_PTS);
}
}
} while (yIndex != NIL);
}
__global__ void gepInv() {
__shared__ uint _sh_[WARPS_PER_BLOCK(GEP_INV_THREADS_PER_BLOCK) * (WARP_SIZE * 3)];
volatile uint* _shared_ = &_sh_[threadIdx.y * (WARP_SIZE * 3)];
const uint to = __numGepInv__ * 2;
uint index = getAndIncrement(2);
while (index < to) {
uint x = __gepInv__[index];
x = getRep(x);
uint val1 = __gepInv__[index + 1];
while (!lock(x)); // busy wait, should be short
const uint y = getRep(id(val1));
applyGepInvRule(x, y, offset(val1), _shared_);
unlock(x);
index = getAndIncrement(2);
}
if (resetWorklistIndex()) {
__done__ = true;
}
}
__device__ void cloneAndLink(const uint var, const uint ptsIndex, uint& currDiffPtsIndex,
const uint diffPtsBits, const uint diffPtsNext) {
clone(ptsIndex, diffPtsBits, diffPtsNext, PTS);
if (currDiffPtsIndex != NIL) {
__graphSet__(currDiffPtsIndex + NEXT, ptsIndex);
} else {
currDiffPtsIndex = getCurrDiffPtsHeadIndex(var);
uint ptsBits = __graphGet__(ptsIndex + threadIdx.x);
__graphSet__(currDiffPtsIndex + threadIdx.x, ptsBits);
}
}
/**
* Update the current, next and total PTS sets of a variable. In the last iteration of the main
* loop, points-to edges have been added to NEXT_DIFF_PTS. However, many of them might already be
* present in PTS. The purpose of this function is to update PTS as PTS U NEXT_DIFF_PTS, and set
* CURR_DIFF_PTS as the difference between the old and new PTS for the given variable.
*
* @param var ID of the variable
* @return true if new pts edges have been added to this variable
*/
__device__ bool updatePtsAndDiffPts(const uint var) {
const uint diffPtsHeadIndex = getNextDiffPtsHeadIndex(var);
uint diffPtsBits = __graphGet__(diffPtsHeadIndex + threadIdx.x);
uint diffPtsBase = __graphGet__(diffPtsHeadIndex + BASE);
if (diffPtsBase == NIL) {
return false;
}
uint diffPtsNext = __graphGet__(diffPtsHeadIndex + NEXT);
__graphSet__(diffPtsHeadIndex + threadIdx.x, NIL);
uint ptsIndex = getPtsHeadIndex(var);
uint ptsBits = __graphGet__(ptsIndex + threadIdx.x);
uint ptsBase = __graphGet__(ptsIndex + BASE);
if (ptsBase == NIL) {
//we pass ptsBase instead of NIL because it's also NIL but it can be modified
cloneAndLink(var, ptsIndex, ptsBase, diffPtsBits, diffPtsNext);
return true;
}
uint ptsNext = __graphGet__(ptsIndex + NEXT);
uint currDiffPtsIndex = NIL;
while (1) {
if (ptsBase > diffPtsBase) {
uint newIndex = mallocPts();
__graphSet__(newIndex + threadIdx.x, ptsBits);
uint val = threadIdx.x == NEXT ? newIndex : diffPtsBits;
__graphSet__(ptsIndex + threadIdx.x, val);
ptsIndex = newIndex;
// update CURR_DIFF_PTS
newIndex = currDiffPtsIndex == NIL ? getCurrDiffPtsHeadIndex(var) : mallocCurrDiffPts();
val = threadIdx.x == NEXT ? NIL : diffPtsBits;
__graphSet__(newIndex + threadIdx.x, val);
if (currDiffPtsIndex != NIL) {
__graphSet__(currDiffPtsIndex + NEXT, newIndex);
}
if (diffPtsNext == NIL) {
return true;
}
currDiffPtsIndex = newIndex;
diffPtsBits = __graphGet__(diffPtsNext + threadIdx.x);
diffPtsBase = __graphGet__(diffPtsNext + BASE);
diffPtsNext = __graphGet__(diffPtsNext + NEXT);
} else if (ptsBase == diffPtsBase) {
uint newPtsNext = (ptsNext == NIL && diffPtsNext != NIL) ? mallocPts() : ptsNext;
uint orBits = threadIdx.x == NEXT ? newPtsNext : ptsBits | diffPtsBits;
uint ballot = __ballot(orBits != ptsBits);
if (ballot) {
__graphSet__(ptsIndex + threadIdx.x, orBits);
if (ballot & LT_BASE) {
// update CURR_DIFF_PTS
orBits = diffPtsBits & ~ptsBits;
if (threadIdx.x == BASE) {
orBits = ptsBase;
} else if (threadIdx.x == NEXT) {
orBits = NIL;
}
uint newIndex;
if (currDiffPtsIndex != NIL) {
newIndex = mallocCurrDiffPts();
__graphSet__(currDiffPtsIndex + NEXT, newIndex);
} else {
newIndex = getCurrDiffPtsHeadIndex(var);
}
__graphSet__(newIndex + threadIdx.x, orBits);
currDiffPtsIndex = newIndex;
}
}
if (diffPtsNext == NIL) {
return (currDiffPtsIndex != NIL);
}
diffPtsBits = __graphGet__(diffPtsNext + threadIdx.x);
diffPtsBase = __graphGet__(diffPtsNext + BASE);
diffPtsNext = __graphGet__(diffPtsNext + NEXT);
if (ptsNext == NIL) {
cloneAndLink(var, newPtsNext, currDiffPtsIndex, diffPtsBits, diffPtsNext);
return true;
}
ptsIndex = ptsNext;
ptsBits = __graphGet__(ptsIndex + threadIdx.x);
ptsBase = __graphGet__(ptsIndex + BASE);
ptsNext = __graphGet__(ptsIndex + NEXT);
} else { // ptsBase > diffPtsBase
if (ptsNext == NIL) {
uint newPtsIndex = mallocPts();
__graphSet__(ptsIndex + NEXT, newPtsIndex);
cloneAndLink(var, newPtsIndex, currDiffPtsIndex, diffPtsBits, diffPtsNext);
return true;
}
ptsIndex = ptsNext;
ptsBits = __graphGet__(ptsIndex + threadIdx.x);
ptsBase = __graphGet__(ptsIndex + BASE);
ptsNext = __graphGet__(ptsIndex + NEXT);
}
}
}
__global__ void updatePtsInformation() {
bool newWork = false;
const uint numVars = __numVars__;
const uint CHUNK_SIZE = 12;
//ulongint start = recordStartTime();
int i = getAndIncrement(CHUNK_SIZE);
while (i < numVars) {
for (int var = i; var < min(i + CHUNK_SIZE, numVars); var++) {
bool newStuff = updatePtsAndDiffPts(var);
newWork |= newStuff;
if (!newStuff) {
const uint currPtsHeadIndex = getCurrDiffPtsHeadIndex(var);
__graphSet__(currPtsHeadIndex + threadIdx.x, NIL);
}
}
i = getAndIncrement(CHUNK_SIZE);
}
if (newWork) {
__done__ = false;
}
// if (isFirstThreadOfWarp()) {
// //printf("Warp %u: %u\n", getWarpIdInGrid(), getEllapsedTime(start));
// }
uint headerSize = numVars * ELEMENT_WIDTH;
if (resetWorklistIndex()) {
__currDiffPtsFreeList__ = CURR_DIFF_PTS_START - headerSize;
__nextDiffPtsFreeList__ = NEXT_DIFF_PTS_START - headerSize;
}
}
__global__ void createOffsetMasks(int numObjectVars, uint maxOffset) {
__shared__ uint _sh_[DEF_THREADS_PER_BLOCK];
volatile uint* _mask_ = &_sh_[threadIdx.y * WARP_SIZE];
int inc = mul960(getWarpsPerGrid());
int init = mul960(getWarpIdInGrid());
for (int i = init; i < numObjectVars; i += inc) {
uint base = BASE_OF(i);
for (int offset = 1; offset <= maxOffset; offset++) {
_mask_[threadIdx.x] = 0;
for (int src = i; src < min(i + ELEMENT_CARDINALITY, numObjectVars); src += WARP_SIZE) {
uint size = __size__[src + threadIdx.x];
if (__all(size <= offset)) {
continue;
}
uint word = WORD_OF(src - i);
_mask_[word] = ballot(size > offset);
}
__offsetMaskSet__(base, threadIdx.x, offset, _mask_[threadIdx.x]);
}
}
}
__device__ uint lockToVar(uint lock) {
if ((lock < VAR(0)) || (lock >= LOCKED)) {
return lock;
}
return lock - VAR(0);
}
__device__ void merge(const uint var1, const uint var2, const uint rep) {
//if (isFirstThreadOfWarp()) //printf("%u <= %u\n", var1, var2);
uint headIndex = getPtsHeadIndex(var2);
unionG2GRecycling(var1, PTS, headIndex);
__graphSet__(headIndex, threadIdx.x, NIL);
headIndex = getCopyInvHeadIndex(var2);
unionG2GRecycling(var1, COPY_INV, headIndex);
__graphSet__(headIndex, threadIdx.x, NIL);
headIndex = getStoreHeadIndex(var2);
unionG2GRecycling(var1, STORE, headIndex);
__graphSet__(headIndex, threadIdx.x, NIL);
headIndex = getLoadInvHeadIndex(var2);
unionG2GRecycling(var1, LOAD_INV, headIndex);
__graphSet__(headIndex, threadIdx.x, NIL);
// clear CURR_DIFF_PTS
headIndex = getCurrDiffPtsHeadIndex(var2);
//unionG2GRecycling(var1, CURR_DIFF_PTS, headIndex);
__graphSet__(headIndex, threadIdx.x, NIL);
setRep(var2, rep);
__threadfence();
unlock(var2);
}
/**
* Merge a list of pointer-equivalent variables
* Granularity: block
* @param _list_ Pointer-equivalent variables
* @param _listSize_ Number of variables to be processed
*/
__device__ void mergeCycle(const uint* const _list_, const uint _listSize_) {
__shared__ uint _counter_;
if (!_listSize_) {
__syncthreads();
return;
}
// 'ry' will be the representative of this cycle
uint ry = _list_[0];
if (_listSize_ == 1) {
if (isFirstWarpOfBlock()) {
unlock(ry);
}
__syncthreads();
return;
}
uint warpsPerBlock = getWarpsPerBlock();
if (_listSize_ > warpsPerBlock) {
// each warp chooses a local representative and then merges each popped worklist item with it.
uint var1 = _list_[threadIdx.y];
_counter_ = warpsPerBlock;
__syncthreads();
uint index = getAndIncrement(&_counter_, 1);
while (index < _listSize_) {
uint var2 = _list_[index];
merge(var1, var2, ry);
index = getAndIncrement(&_counter_, 1);
}
}
__syncthreads();
// the first warp merges the local representatives. This is actually faster (and simpler)
// than performing a reduction of the list using the entire block, due to load imbalance.
if (isFirstWarpOfBlock()) {
uint to = min(_listSize_, warpsPerBlock);
for (int i = 1; i < to; i++) {
uint var = _list_[i];
merge(ry, var, ry);
}
//reset CURR_PTS of the cycle representative to be PTS
uint myBits = __graphGet__(getPtsHeadIndex(ry), threadIdx.x);
__graphSet__(getCurrDiffPtsHeadIndex(ry), threadIdx.x, myBits);
__threadfence();
unlock(ry);
}
__syncthreads();
}
// to be executed by one thread
__device__ uint lockVarRep(uint& var) {
while (1) {
uint rep = getRepRec(var);
uint old = atomicCAS(__lock__ + rep, UNLOCKED, VAR(blockIdx.x));
if (old == PTR(blockIdx.x)) {
// try to promote lock to type VAR
old = atomicCAS(__lock__ + rep, PTR(blockIdx.x), VAR(blockIdx.x));
}
if (old != UNLOCKED && old != PTR(blockIdx.x)) {
var = rep;
return old;
}
// we locked it, but maybe is not a representative anymore
var = getRep(rep);
if (var == rep) {
return UNLOCKED;
}
if (old == PTR(blockIdx.x)) { // back to PTR
__lock__[rep] = PTR(blockIdx.x);
} else {
unlock(rep);
}
}
}
/**
* Lock a list of variables
* Granularity: block
* @param _currVar_ List of variables to lock, sorted in ascending order
* @param _currVarSize_ Number of variables we want to process. At the end of the function,
* it stores the number of variables we were able to lock.
* @param _nextVar_ List where to add all the variables we could not lock
* @param _nextVarSize_ Number of variables we could not lock
*/
__device__ void lockVars(uint* const _currVar_, uint& _currVarSize_, uint* const _nextVar_,
uint* _nextVarSize_) {
__shared__ uint _count_;
_count_ = 0;
__syncthreads();
for (int i = getThreadIdInBlock(); i < _currVarSize_; i+= getThreadsPerBlock()) {
uint var = _currVar_[i];
// block culling to filter out some duplicates
if (i && var == _currVar_[i - 1]) {
continue;
}
uint stat = lockVarRep(var);
uint pos;
if (stat == UNLOCKED) {
pos = atomicAdd(&_count_, 1);
_currVar_[pos] = var;
} else if (stat != VAR(blockIdx.x)) {
uint pos = atomicAdd(_nextVarSize_, 1);
_nextVar_[pos] = var;
}
}
__syncthreads();
_currVarSize_ = _count_; //first currVarSize positions are populated
__syncthreads();
}
// to be executed by one WARP
__device__ uint lockPtr(uint ptr) {
__shared__ volatile uint _shared_[MAX_WARPS_PER_BLOCK];
uint intended = PTR(getBlockIdInGrid());
if (isFirstThreadOfWarp()) {
_shared_[threadIdx.y] = atomicCAS(__lock__ + ptr, UNLOCKED, intended);
}
return _shared_[threadIdx.y];
}
/**
* Lock every variable in the current points-to set of the input variable.
* Granularity: warp
* @param x A variable locked by the current block
* @param _currVar_ List of locked variables
* @param _currVarSize_ Number of locked variables
* @param _nextVar_ List of variables we could not lock
* @param _nextVarSize_ Number of variables we could not lock
*/
__device__ void decodeCurrPts(const uint x, uint* const _currVar_, uint* const _currVarSize_,
uint* const _nextVar_, uint* const _nextVarSize_) {
uint index = getCurrDiffPtsHeadIndex(x);
do {
uint myBits = __graphGet__(index, threadIdx.x);
uint base = __graphGet__(index, BASE);
if (base == NIL) {
break;
}
index = __graphGet__(index, NEXT);
uint nonEmpty = __ballot(myBits && threadIdx.x < BASE);
uint lastVar = NIL;
while (nonEmpty) {
uint pos = __ffs(nonEmpty) - 1;
nonEmpty &= (nonEmpty - 1);
uint bits = getValAtThread(myBits, pos);
uint var = mul960(base) + mul32(pos) + threadIdx.x;
if (var == I2P || !isBitActive(bits, threadIdx.x)) {
var = NIL;
} else {
uint stat = lockVarRep(var);
if (stat != UNLOCKED) {
if (stat != VAR(blockIdx.x) && var != lastVar) {
// TODO: do something so we do not lose equivalences. This only affects Linux, though
uint where = atomicInc(_nextVarSize_, HCD_DECODE_VECTOR_SIZE - 1);
_nextVar_[where] = var;
lastVar = var;
}
var = NIL;
}
}
bits = __ballot(var != NIL);
if (!bits) {
continue;
}
uint numOnes = __popc(bits);
uint prevNumFrom = 0;
if (isFirstThreadOfWarp()) {
prevNumFrom = atomicAdd(_currVarSize_, numOnes);
}
prevNumFrom = getValAtThread(prevNumFrom, 0);
// TODO: make sure that (prevNumFrom + numOnes < HCD_DECODE_VECTOR_SIZE)
//if (isFirstThreadOfWarp() && ((prevNumFrom + numOnes) >= HCD_DECODE_VECTOR_SIZE)) {
// //printf("Exceeded HCD_DECODE_VECTOR_SIZE!!\n");
//}
pos = prevNumFrom + __popc(bits & ((1 << threadIdx.x) - 1));
if (var != NIL) {
_currVar_[pos] = var;
}
}
} while (index != NIL);
}
/**
* Lock a list of (pointer) variables and their points-to sets
* Granularity: block
*/
__device__ void lockPtrs(uint* const _currPtr_, uint& _currPtrSize_, uint* const _nextPtr_,
uint* _nextPtrSize_, uint* const _currVar_, uint* _currVarSize_, uint* const _nextVar_,
uint* _nextVarSize_) {
const uint warpsPerBlock = getWarpsPerBlock();
for (int i = threadIdx.y; i < _currPtrSize_; i += warpsPerBlock) {
uint ptr = _currPtr_[i];
uint stat = lockPtr(ptr);
if (stat != UNLOCKED && stat != VAR(blockIdx.x)) {
_currPtr_[i] = NIL;
if (isFirstThreadOfWarp()) {
uint pos = atomicAdd(_nextPtrSize_, 1);
_nextPtr_[pos] = ptr;
}
} else {
decodeCurrPts(ptr, _currVar_, _currVarSize_, _nextVar_, _nextVarSize_);
}
}
__syncthreads();
}
__device__ void unlockPtrs(const uint* const _list_, const uint _listSize_) {
int init = getThreadIdInBlock();
int inc = getThreadsPerBlock();
for (int i = init; i < _listSize_; i += inc) {
uint var = _list_[i];
if (var != NIL) {
// if it is locked by VAR(blockIdx.x), keep it that way
atomicCAS(__lock__ + var, PTR(blockIdx.x), UNLOCKED);
}
}
__syncthreads();
}
/**
* Online phase of Hybrid Cycle Detection
* This is when things get really hairy -- but the overall performance of the algorithm is
* dramatically improved by removing the equivalents discovered during the offline analysis, so
* there is not way around it AFAIK.
* The kernel takes a list of tuples (y, x_0, ..., x_N) where pts(*y) = pts(x_0) = ... pts(x_N)
* Each block pops a pair out of the worklist, and performs the following logic:
* a) lock variables y,x_0,...,x_N
* b) decode and lock the points-to of x_0,...,x_N
* c) merge all the variables that we were able to lock
* d) unlock the merged variables
* e) repeat a-d for all the variables we were not able to lock
* Note that e) is not strictly necessary, but we would be missing some (maybe relevant)
* equivalences that will eventually result in more work for the standard graph rules.
*/
__global__ void hcd() {
__shared__ uint _counter_;
/**
* list of variables (x,...,x_N) such that all the variables in the set {pts(x),...pts(x_N)}
* are pointer-equivalent.
*/
__shared__ uint _ptr_[HCD_TABLE_SIZE * 2];
/*
* pointer to _ptr_ indicating where the current list starts
*/
__shared__ uint *_currPtr_;
/**
* pointer to _ptr_ indicating where the next list starts.
* The reason why need of sublists within _ptr_ is because we might not have been able to lock
* all the variables in _currPtr_, so everything that is pending (=needs to be processed in the
* next iteration) is placed in the subarray pointed by _nextPtr_
*/
__shared__ uint *_nextPtr_;
/**
* list of variables that are pointer equivalent (thus need to be merged)
*/
__shared__ uint _currVar_[HCD_DECODE_VECTOR_SIZE];
/**
* list of variables that are pointer equivalent but could not be locked in the current iteration
*/
__shared__ uint *_nextVar_;
__shared__ uint _currPtrSize_, _nextPtrSize_, _currVarSize_, _nextVarSize_;
const uint threadIdInBlock = getThreadIdInBlock();
const uint threadsInBlock = getThreadsPerBlock();
const uint to = __numHcdIndex__;
// first thread of the block picks next hcd pair to work on
if (isFirstThreadOfBlock()) {
_counter_ = atomicAdd(&__worklistIndex0__, 1);
_nextVar_ = __nextVar__ + getBlockIdInGrid() * HCD_DECODE_VECTOR_SIZE;
}
__syncthreads();
while (_counter_ < to) {
uint pair = __hcdIndex__[_counter_];
uint start = getFirst(pair);
uint end = getSecond(pair);
// move the (x0,...,x_N) sublist to shared memory
for (int i = start + 1 + threadIdInBlock; i < end; i += threadsInBlock) {
_ptr_[i - start - 1] = __hcdTable__[i];
}
if (isFirstWarpOfBlock()) {
_currPtrSize_ = end - start - 1;
_currVar_[0] = __hcdTable__[start];
_currVarSize_ = 1;
_currPtr_ = _ptr_;
// we do not know how many variables we will not be able to lock, so unfortunately we have
// use a statically fixed index
_nextPtr_ = _ptr_ + HCD_TABLE_SIZE;
}
while (1) {
_nextPtrSize_ = 0;
_nextVarSize_ = 0;
__syncthreads();
// lock variables in the current variable list (variables that belong to the points-to set
// of x_I and could not be locked in a previous iteration)
lockVars(_currVar_, _currVarSize_, _nextVar_, &_nextVarSize_);
// lock variables in current pointer list, then decode their points-to sets and lock those too
lockPtrs(_currPtr_, _currPtrSize_, _nextPtr_, &_nextPtrSize_, _currVar_, &_currVarSize_, _nextVar_, &_nextVarSize_);
// unlock variables in pointer list if they are not in the variable list
unlockPtrs(_currPtr_, _currPtrSize_);
blockSort(_currVar_, _currVarSize_);
// merge variable list!
mergeCycle(_currVar_, _currVarSize_);
// if there is any pending work -because variables or pointers could not be locked-, update
// the corresponding information and retry
if (!_nextPtrSize_ && (!_nextVarSize_ || (_currVarSize_ + _nextVarSize_ == 1))) {
break;
}
if (isFirstWarpOfBlock() && _currVarSize_) {
_currVar_[_nextVarSize_] = _currVar_[0]; // merge representative with pending
}
__syncthreads();
for (int i = threadIdInBlock; i < _nextVarSize_; i+= threadsInBlock) {
_currVar_[i] = _nextVar_[i];
}
if (isFirstWarpOfBlock()) {
_currVarSize_ = _nextVarSize_ + (_currVarSize_ > 0);
_currPtrSize_ = _nextPtrSize_;
uint* tmp = _nextPtr_;
_nextPtr_ = _currPtr_;
_currPtr_ = tmp;
}
__syncthreads();
blockSort(_currVar_, _currVarSize_);
}
if (isFirstThreadOfBlock()) {
_counter_ = atomicAdd(&__worklistIndex0__, 1);
}
__syncthreads();
}
resetWorklistIndex();
}
__global__ void updateInfo() {
int inc = getThreadsPerGrid();
int init = getThreadIdInGrid();
uint to = __numVars__;
// a) path compression
for (int var = init; var < to; var += inc) {
uint rep = getRepRec(var); // non-coalesced
if (rep != var) {
setRep(var, rep); //coalesced
}
uint diffPtsMask = __ballot(!isEmpty(rep, CURR_DIFF_PTS)); //non aligned
__diffPtsMaskSet__(BASE_OF(var), WORD_OF(var), diffPtsMask); //aligned
}
syncAllThreads();
// b) update store rules
to = __numStore__;
for (int index = init; index < to; index += inc) {
// the size of store has been rounded to a multiple of 32, so no out-of-bounds
uint src = __storeConstraints__[index];
if (src != NIL) {
src = getRep(src);
uint val = (atomicCAS(__lock__ + src, UNLOCKED, LOCKED) == UNLOCKED) ? src : NIL;
__storeConstraints__[index] = val;
}
}
syncAllThreads();
// c) unlock
for (int index = init; index < to; index += inc) {
uint src = __storeConstraints__[index];
if (src != NIL) {
unlock(getRep(src));
}
}
}
__launch_bounds__ (DEF_THREADS_PER_BLOCK)
__global__ void initialize() {
uint to = __numVars__;
uint headerSize = to * ELEMENT_WIDTH;
if (isFirstThreadOfBlock()) {
__ptsFreeList__ = headerSize;
__currDiffPtsFreeList__ = CURR_DIFF_PTS_START - headerSize;
__nextDiffPtsFreeList__ = NEXT_DIFF_PTS_START - headerSize;
// after LOAD_INV, STORE and CURR_DIFF_PTS_INV header regions
__otherFreeList__ = COPY_INV_START + headerSize * (LAST_DYNAMIC_REL - COPY_INV + 1);
}
__syncthreads();
int inc = mul32(getWarpsPerGrid());
int init = mul32(getWarpIdInGrid());
for (int var = init; var < to; var += inc) {
unlock(var + threadIdx.x);
setRep(var + threadIdx.x, var + threadIdx.x);
for (int i = 0; i < WARP_SIZE; i++) {
uint index = getHeadIndex(var + i, PTS);
__graphSet__(index + threadIdx.x, NIL);
index = getHeadIndex(var + i, NEXT_DIFF_PTS);
__graphSet__(index + threadIdx.x, NIL);
index = getHeadIndex(var + i, CURR_DIFF_PTS);
__graphSet__(index + threadIdx.x, NIL);
index = getHeadIndex(var + i, COPY_INV);
__graphSet__(index + threadIdx.x, NIL);
index = getHeadIndex(var + i, STORE);
__graphSet__(index + threadIdx.x, NIL);
index = getHeadIndex(var + i, LOAD_INV);
__graphSet__(index + threadIdx.x, NIL);
}
}
inc = mul960(getWarpsPerGrid());
init = mul960(getWarpIdInGrid());
for (int i = init; i < to; i += inc) {
uint base = BASE_OF(i);
__diffPtsMaskSet__(base, threadIdx.x, 0);
}
syncAllThreads();
to = __numInitialRep__;
init = getThreadIdInGrid();
inc = getThreadsPerGrid();
// the offline phase of Hybrid Cycle Detection already detected some pointer equivalent variables.
for (int i = init; i < to; i += inc) {
setRep(__initialNonRep__[i], __initialRep__[i]);
}
}
__global__ void computeCurrPtsHash() {
const uint to = __numVars__;
uint src = getAndIncrement(WARP_SIZE);
while (src < to) {
for (int i = 0; i < WARP_SIZE; i++) {
if (!isEmpty(src + i, CURR_DIFF_PTS)) {
uint hash = hashCode(getHeadIndex(src + i, CURR_DIFF_PTS));
uint next = getAndIncrement(&__numKeysCounter__, 1);
__key__[next] = hash;
__val__[next] = src + i;
}
}
src = getAndIncrement(WARP_SIZE);
}
if (resetWorklistIndex()) {
__numKeys__ = __numKeysCounter__;
__numKeysCounter__ = 0;
}
}
__global__ void findCurrPtsEquivalents() {
__shared__ uint _sh_[WARPS_PER_BLOCK(UPDATE_THREADS_PER_BLOCK) * WARP_SIZE * 2];
uint* _key_ = &_sh_[threadIdx.y * WARP_SIZE * 2];
uint* _val_ = _key_ + WARP_SIZE;
const uint to = __numKeys__;
uint index = getAndIncrement(WARP_SIZE);
while (index < to) {
if (index + threadIdx.x < to) {
_key_[threadIdx.x] = __key__[index + threadIdx.x];
_val_[threadIdx.x] = __val__[index + threadIdx.x];
}
for (int i = 0; i < WARP_SIZE && index + i < to; i++) {
uint var1 = _val_[i];
uint var1Head = getHeadIndex(var1, CURR_DIFF_PTS);
uint j = _key_[i];
while (j < index + i) {
uint var2 = __val__[j];
uint var2Head = getHeadIndex(var2, CURR_DIFF_PTS);
if (equal(var1Head, var2Head)) {
__currPtsHead__[var1] = var2Head;
break;
}
j++;
}
if (j == index + i) {
__currPtsHead__[var1] = var1Head;
}
}
index = getAndIncrement(WARP_SIZE);
}
resetWorklistIndex();
}
__host__ void checkKernelErrors(char *msg) {
cudaError_t e;
cudaThreadSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
printf("\n%s: %s\n", msg, cudaGetErrorString(e));
exit(-1);
}
}
__host__ void checkErrors(uint rel) {
#if CHECK_SPV
uint error = 0;
checkForErrors << <getBlocks(), THREADS_PER_BLOCK >> >(rel);
checkKernelErrors("ERROR while checking for errors");
cudaSafeCall(cudaMemcpyFromSymbol(&error, __error__, uintSize, 0, D2H));
if (error) {
exit(-1);
}
#endif
}
__host__ void checkAllErrors() {
checkErrors(PTS);
checkErrors(NEXT_DIFF_PTS);
checkErrors(CURR_DIFF_PTS);
checkErrors(COPY_INV);
checkErrors(LOAD_INV);
checkErrors(STORE);
}
__host__ void addTimeToRule(uint& counter, clock_t& startTime) {
uint ellapsedTime = (int) (1000.0f * (clock() - startTime) / CLOCKS_PER_SEC);
counter += ellapsedTime;
startTime = clock();
}
__host__ void printRule(const char* msg) {
#if PRINT_RULES
printf("%s", msg);
#endif
}
template <typename Vector>
__host__ void printVector(const Vector& v, uint size) {
std::cout << "[";
for (size_t i = 0; i < size; i++) {
uint num = v[i];
if (num != NIL) {
std::cout << num;
if (i < size - 1) {
std::cout << ", ";
}
}
}
std::cout << "]";
}
__host__ void initializeEdges(uint* &constraintsName, uint &constraintNumber, uint rel) {
dim3 dimInitialize(WARP_SIZE, getThreadsPerBlock(UPDATE_THREADS_PER_BLOCK) / WARP_SIZE);
uint* constraints;
uint numConstraints;
cudaSafeCall(cudaMemcpyFromSymbol(&constraints, constraintsName, sizeof(uint*)));
cudaSafeCall(cudaMemcpyFromSymbol(&numConstraints, constraintNumber, uintSize));
device_ptr<uint> src(constraints);
device_vector<uint> dstIndex(numConstraints);
sequence(dstIndex.begin(), dstIndex.begin() + numConstraints);
uint numSrc = unique_by_key(src, src + numConstraints, dstIndex.begin()).first - src;
addEdges<<<getBlocks() * 3, dimInitialize>>>(constraints, raw_pointer_cast(&dstIndex[0]),
constraints + numConstraints, numSrc, rel);
if (rel == STORE) {
cudaSafeCall(cudaMemcpyToSymbol(__numStore__, &numSrc, uintSize));
} else {
cudaFree(constraints);
}
checkKernelErrors("ERROR while adding initial edges");
}
extern "C" void createGraph(const uint numObjectVars, const uint maxOffset) {
setbuf(stdout, NULL);
printf("[dev] Creating graph and masks out of constraints...");
const uint startTime = clock();
dim3 dim(WARP_SIZE, getThreadsPerBlock(DEF_THREADS_PER_BLOCK)/ WARP_SIZE);
initialize<<<getBlocks(), dim>>>();
checkKernelErrors("ERROR at initialize");
initializeEdges(__ptsConstraints__, __numPtsConstraints__, NEXT_DIFF_PTS);
initializeEdges(__copyConstraints__, __numCopyConstraints__, COPY_INV);
initializeEdges(__loadConstraints__, __numLoadConstraints__, LOAD_INV);
initializeEdges(__storeConstraints__, __numStoreConstraints__, STORE);
// no need to add GEP_INV edges, there is only one per variable
createOffsetMasks<<<getBlocks(), dim>>>(numObjectVars, maxOffset);
checkKernelErrors("ERROR while creating the offset mask");
uint* size;
cudaSafeCall(cudaMemcpyFromSymbol(&size, __size__, sizeof(uint*)));
cudaFree(size);
printf("OK.\n");
createTime = getEllapsedTime(startTime);
}
struct neqAdapter : public thrust::unary_function<tuple<uint, uint>, uint>{
__host__ __device__
uint operator()(const tuple<uint, uint>& a) {
return get<0>(a) != get<1>(a);
}
};
struct mulAdapter : public thrust::unary_function<tuple<uint, uint>, uint>{
__host__ __device__
uint operator()(const tuple<uint, uint>& a) {
return get<0>(a) * get<1>(a);
}
};
__host__ void buildHashMap(device_vector<uint>& key, device_vector<uint>& val,const uint size) {
sort_by_key(key.begin(), key.begin() + size, val.begin());
thrust::maximum<uint> uintMax;
inclusive_scan(
make_transform_iterator(
make_zip_iterator(make_tuple(
make_transform_iterator(
make_zip_iterator(make_tuple(key.begin() + 1, key.begin())),
neqAdapter()),
counting_iterator<uint>(1))),
mulAdapter()),
make_transform_iterator(
make_zip_iterator(make_tuple(
make_transform_iterator(
make_zip_iterator(make_tuple(key.begin() + size, key.begin() + size - 1)),
neqAdapter()),
counting_iterator<uint>(1))),
mulAdapter()), key.begin() + 1, uintMax);
key[0] = 0;
}
extern "C" uint andersen(uint numVars) {
setbuf(stdout, NULL);
printf("[dev] Solving: ");
const uint startTime = clock();
uint iteration = 0;
uint updatePtsTime = 0;
uint hcdTime = 0;
uint ptsEquivTime = 0;
uint copyInvTime = 0;
uint storeInvTime = 0;
uint gepInvTime = 0;
dim3 dim512(WARP_SIZE, getThreadsPerBlock(512) / WARP_SIZE);
dim3 dimUpdate2(WARP_SIZE, getThreadsPerBlock(UPDATE_THREADS_PER_BLOCK) / WARP_SIZE);
dim3 dimHcd(WARP_SIZE, getThreadsPerBlock(HCD_THREADS_PER_BLOCK) / WARP_SIZE);
dim3 dimCopy(WARP_SIZE, getThreadsPerBlock(COPY_INV_THREADS_PER_BLOCK) / WARP_SIZE);
dim3 dimStore(WARP_SIZE, getThreadsPerBlock(STORE_INV_THREADS_PER_BLOCK) / WARP_SIZE);
dim3 dimGep(WARP_SIZE, getThreadsPerBlock(GEP_INV_THREADS_PER_BLOCK) / WARP_SIZE);
device_vector<uint> key(MAX_HASH_SIZE);
uint* ptr = raw_pointer_cast(&key[0]);
cudaSafeCall(cudaMemcpyToSymbol(__key__, &ptr, sizeof(uint*)));
device_vector<uint> keyAux(MAX_HASH_SIZE);
ptr = raw_pointer_cast(&keyAux[0]);
cudaSafeCall(cudaMemcpyToSymbol(__keyAux__, &ptr, sizeof(uint*)));
device_vector<uint> val(MAX_HASH_SIZE);
ptr = raw_pointer_cast(&val[0]);
cudaSafeCall(cudaMemcpyToSymbol(__val__, &ptr, sizeof(uint*)));
clock_t ruleTime = clock();
uint blocks = getBlocks();
// TODO: mega-hack to avoid race condition on 'gcc' input.
uint hcdBlocks = getenv("GCC") ? 4 : blocks;
/**
* TODO (Jan'11)
*
* a) use pointers instead of integers for the indexes, which is possible because all the
* inputs can be analyzed using a 4GB heap. Advantages:
* a.1) when dereferencing an index, currently we assume that in reality is a delta with
* respect to __edges__. Because of that, every access to an element becomes *(__edges__ + delta).
* If we are using pointers, we could simply do *ptr. Note that __edges__ is in constant memory.
* a.2.) we could use the malloc in the CUDA libraries. Malloc could potentially be used in two
* places: OTHER and PTS edges. In practice, we currently keep the PTS edges together because they
* contain the solution so we would restric malloc to allocating copy/load/store edges. Since
* malloc returns a pointer, it would be compatible with the index-is-a-pointer system
*
* b) HCD is buggy when many blocks are used. This happens only for the gcc input, so the
* temporal path (see "hcdBlocks" variable) is to set the limit of blocks to four.
*
* c) retrieve the amount of memory and use that as HEAP_SIZE.
*
* d) devise a better representation scheme st all the benchmarks fit in 3GB, so I can effectively
* use an MSI GTX580 (=> much faster than the Tesla C2070 or Quadro 6000) for all the inputs.
*/
uint ptsStartIndex;
while (1) {
//printf("\n\nIteration: %u\n", iteration);
cudaSafeCall(cudaMemcpyFromSymbol(&ptsStartIndex, __ptsFreeList__, uintSize));
//printf("\tstart = %d.\n", ptsStartIndex);
printRule(" updating pts...");
updatePtsInformation<<<blocks, dimUpdate2>>>();
checkKernelErrors("ERROR at update pts");
printRule("done\n");
addTimeToRule(updatePtsTime, ruleTime);
bool done = true;
cudaSafeCall(cudaMemcpyFromSymbol(&done, __done__, sizeof(bool)));
if (done) {
break;
}
// Ideally, we would use one stream to copy all the points-to edges discovered during the
// last iteration (resident in the interval [CURR_DIFF_PTS_START, __currDiffPtsFreeList__])
// back to the host while the other stream computes the next iteration, computation that does
// not modify the CURR_DIFF_PTS set. However, Thrust does not currently support streams, and
// kernel invocations using the default stream add a implicit synchronization point [CUDA 4.1
// programming guide, 3.2.5.5.4]
// If you do want to implement the simultaneous copy-kernel scheme, you can always modify
// the Thrust source code or create your custom Thrust library with the stream hardcoded on it.
// To avoid going that way, I chose to publish the version of the code that does pay a penalty
// for the data transfer.
printRule(" hcd...");
hcd<<<hcdBlocks, dimHcd>>>();
checkKernelErrors("ERROR at hcd rule");
updateInfo<<<3 * blocks, dim512>>>();
checkKernelErrors("ERROR while updating information after collapsing");
printRule("done\n");
addTimeToRule(hcdTime, ruleTime);
printRule(" finding curr_pts equivalences...");
computeCurrPtsHash<<<3 * blocks, dim512>>>();
checkKernelErrors("ERROR at compute hash");
uint numKeys;
cudaSafeCall(cudaMemcpyFromSymbol(&numKeys, __numKeys__, uintSize));
buildHashMap(key, val, numKeys);
findCurrPtsEquivalents<<<3 * blocks, dim512>>>();
checkKernelErrors("ERROR in finding CURR_PTS equivalents");
printRule("done\n");
addTimeToRule(ptsEquivTime, ruleTime);
printRule(" copy_inv and load_inv and store2storeInv...");
copyInv_loadInv_store2storeInv<<<blocks, dimCopy>>>();
checkKernelErrors("ERROR at copy_inv/load_inv/store2storeinv rule");
cudaSafeCall(cudaMemcpyFromSymbol(&numKeys, __numKeys__, uintSize));
assert(numKeys <= MAX_HASH_SIZE);
sort_by_key(key.begin(), key.begin() + numKeys, val.begin());
sequence(keyAux.begin(), keyAux.begin() + numKeys);
numKeys = unique_by_key(key.begin(), key.begin() + numKeys, keyAux.begin()).first - key.begin();
cudaSafeCall(cudaMemcpyToSymbol(__numKeys__, &numKeys, uintSize));
printRule("done\n");
addTimeToRule(copyInvTime, ruleTime);
printRule(" store_inv...");
storeInv<<<blocks, dimStore>>>();
checkKernelErrors("ERROR at store_inv rule");
printRule("done\n");
addTimeToRule(storeInvTime, ruleTime);
printRule(" gep_inv...");
gepInv<<<blocks, dimGep>>>();
checkKernelErrors("ERROR at gep_inv rule");
printRule("done\n");
addTimeToRule(gepInvTime, ruleTime);
iteration++;
printf(".");
}
printf("OK.\n");
printf("Iterations = %u.\n", iteration);
// store the last index for the PTS elements
uint ptsEndIndex;
cudaSafeCall(cudaMemcpyFromSymbol(&ptsEndIndex, __ptsFreeList__, uintSize));
uint solveTime = getEllapsedTime(startTime);
printf("SOLVE runtime: %u ms.\n", createTime + solveTime);
printf(" create graph : %u ms.\n", createTime);
printf(" rule solving : %u ms.\n", solveTime);
printf(" updatePts : %u ms.\n", updatePtsTime);
printf(" hcd : %u ms.\n", hcdTime);
printf(" equiv : %u ms.\n", ptsEquivTime);
printf(" cpLdSt2inv : %u ms.\n", copyInvTime);
printf(" store : %u ms.\n", storeInvTime);
printf(" gepInv : %u ms.\n", gepInvTime);
//printf("amount of points-to info = %d.\n", ptsEndIndex - ptsStartIndex);
// return ptsEndIndex - ptsStartIndex;
return ptsEndIndex;
}
|
fa63c3bb668f3af38954eb26803fcff097be86af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/nanmedian_kernel.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/impl/nanmedian_kernel_impl.h"
#include "paddle/phi/kernels/top_k_kernel.h"
namespace phi {
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T>
__global__ void KernelNanCounts(const T* input,
const int numel,
const int64_t pre_dim,
const int64_t stride,
T min_val,
int64_t* nan_total,
int64_t* nan_counts) {
extern __shared__ int64_t buf[];
for (int i = threadIdx.x; i < pre_dim; i += blockDim.x) {
buf[i] = 0;
nan_counts[i] = 0;
}
if (threadIdx.x == 0) {
nan_total[0] = 0;
nan_total[1] = 0;
}
__syncthreads();
CUDA_KERNEL_LOOP(index, numel) {
const T x = input[index];
if (isnan(static_cast<float>(x))) {
auto bin = static_cast<int64_t>(index / stride);
paddle::platform::CudaAtomicAdd(&buf[bin], 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < pre_dim; i += blockDim.x) {
paddle::platform::CudaAtomicAdd(&nan_counts[i], buf[i]);
paddle::platform::CudaAtomicAdd(&nan_total[0], buf[i]);
paddle::platform::CudaAtomicMax(&nan_total[1], stride - buf[i]);
}
}
template <typename T>
__global__ void CalcMedianKernel(const T* sort_out_ptr,
const int64_t* sort_indices_ptr,
int64_t* median_val,
T* output,
T div_factor,
const bool is_odd,
const int64_t pre_dim,
const int64_t stride) {
CUDA_KERNEL_LOOP(index, pre_dim) {
int64_t pos = static_cast<int64_t>((index + 1) * stride) - 1;
if (is_odd) {
median_val[index * 2] = sort_indices_ptr[pos];
median_val[index * 2 + 1] = sort_indices_ptr[pos];
output[index] = sort_out_ptr[pos];
} else {
median_val[index * 2] =
pos > 0 ? sort_indices_ptr[pos - 1] : sort_indices_ptr[pos];
median_val[index * 2 + 1] = sort_indices_ptr[pos];
T median_val_left = pos > 0 ? sort_out_ptr[pos - 1] : sort_out_ptr[pos];
T median_val_right = sort_out_ptr[pos];
output[index] = (median_val_left + median_val_right) / div_factor;
}
}
}
template <typename T>
__global__ void CalcNanmedianKernel(const T* sort_out_ptr,
const int64_t* sort_indices_ptr,
int64_t* nan_counts,
int64_t* median_val,
T* output,
const bool is_odd,
const int64_t pre_dim,
const int64_t max_valid_num,
const int64_t stride,
const T div_factor,
const T nan_val) {
CUDA_KERNEL_LOOP(index, pre_dim) {
int64_t pos = static_cast<int64_t>(index * max_valid_num);
int64_t nan_cnt = nan_counts[index];
if (nan_cnt == stride) {
median_val[index * 2] = -1;
median_val[index * 2 + 1] = -1;
output[index] = nan_val;
} else {
int64_t nan_k =
nan_cnt > 0 ? static_cast<int64_t>(stride - nan_cnt) : max_valid_num;
int64_t row_pos = static_cast<int64_t>(nan_k >> 1);
pos += row_pos;
if (nan_k & 1) {
median_val[index * 2] = sort_indices_ptr[pos];
median_val[index * 2 + 1] = sort_indices_ptr[pos];
output[index] = sort_out_ptr[pos];
} else {
median_val[index * 2] =
pos > 0 ? sort_indices_ptr[pos - 1] : sort_indices_ptr[pos];
median_val[index * 2 + 1] = sort_indices_ptr[pos];
T median_val_left = pos > 0 ? sort_out_ptr[pos - 1] : sort_out_ptr[pos];
T median_val_right = sort_out_ptr[pos];
output[index] = (median_val_left + median_val_right) / div_factor;
}
}
}
}
template <typename T, typename Context>
void ProcessMedianKernel(const Context& dev_ctx,
const DenseTensor& x,
bool ignore_nan,
DenseTensor* out,
int64_t* m_ptr) {
bool should_ignore_nan = ignore_nan;
auto stream = dev_ctx.stream();
const T* x_ptr = x.data<T>();
T* o_ptr = dev_ctx.template Alloc<T>(out);
int64_t numel = x.numel();
auto x_dim = x.dims();
int64_t x_rank = x_dim.size();
int64_t stride = x_dim[x_rank - 1];
int64_t pre_dim = numel / stride;
int64_t i = 0;
DenseTensor nan_counts, nan_stat;
int64_t* nan_counts_ptr;
int64_t max_valid_num = 0;
if (should_ignore_nan) {
nan_counts.Resize(phi::make_ddim({pre_dim}));
dev_ctx.template Alloc<int64_t>(&nan_counts);
nan_counts_ptr = nan_counts.data<int64_t>();
nan_stat.Resize(phi::make_ddim({2}));
int64_t* nan_stat_mem = dev_ctx.template Alloc<int64_t>(&nan_stat);
int64_t* nan_stat_ptr = nan_stat.data<int64_t>();
hipLaunchKernelGGL(( KernelNanCounts<T>), dim3(GET_BLOCKS(numel)),
dim3(PADDLE_CUDA_NUM_THREADS),
pre_dim * sizeof(int64_t),
stream, x_ptr,
numel,
pre_dim,
stride,
std::numeric_limits<T>::min(),
nan_stat_ptr,
nan_counts_ptr);
auto nan_stat_mem_cpu =
paddle::memory::Alloc(phi::CPUPlace(), sizeof(int64_t) * 2);
int64_t* nan_stat_cpu_ptr =
reinterpret_cast<int64_t*>(nan_stat_mem_cpu->ptr());
paddle::memory::Copy(phi::CPUPlace(),
nan_stat_cpu_ptr,
dev_ctx.GetPlace(),
nan_stat_mem,
sizeof(int64_t) * 2,
stream);
// all elements are nan values
T nan_val = std::numeric_limits<T>::quiet_NaN();
if (nan_stat_cpu_ptr[0] == numel) {
FullLikeKernel<T, Context>(dev_ctx, x, nan_val, x.dtype(), out);
return;
}
should_ignore_nan = nan_stat_cpu_ptr[0] > 0;
max_valid_num = nan_stat_cpu_ptr[1];
}
int64_t sort_k = should_ignore_nan ? max_valid_num : ((stride >> 1) + 1);
bool is_ori_odd = stride & 1;
DenseTensor sort_out, sort_indices;
auto sort_dim = x.dims();
int64_t rank = sort_dim.size();
sort_dim[rank - 1] = sort_k;
sort_out.Resize(sort_dim);
sort_indices.Resize(sort_dim);
dev_ctx.template Alloc<T>(&sort_out);
T* sort_out_ptr = sort_out.data<T>();
dev_ctx.template Alloc<int64_t>(&sort_indices);
int64_t* sort_indices_ptr = sort_indices.data<int64_t>();
TopkKernel<T, Context>(
dev_ctx, x, Scalar(sort_k), -1, false, true, &sort_out, &sort_indices);
T div_factor = static_cast<T>(2.0);
T nan_val = std::numeric_limits<T>::quiet_NaN();
if (should_ignore_nan) {
hipLaunchKernelGGL(( CalcNanmedianKernel<
T>), dim3(GET_BLOCKS(pre_dim)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
sort_out_ptr,
sort_indices_ptr,
nan_counts_ptr,
m_ptr,
o_ptr,
is_ori_odd,
pre_dim,
max_valid_num,
stride,
div_factor,
nan_val);
} else {
hipLaunchKernelGGL(( CalcMedianKernel<
T>), dim3(GET_BLOCKS(pre_dim)), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
sort_out_ptr,
sort_indices_ptr,
m_ptr,
o_ptr,
div_factor,
is_ori_odd,
pre_dim,
sort_k);
}
}
template <typename T, typename Context>
void BaseMedianKernel(const Context& dev_ctx,
const DenseTensor& input,
const IntArray& axes,
bool ignore_nan,
DenseTensor* out,
DenseTensor* median_index) {
DenseTensor x;
auto rank = input.dims().size();
if ((axes.size() == 0) || rank <= 1) {
x = input;
x.Resize({input.numel()});
} else {
PreprocessMedianKernel<T, Context>(dev_ctx, input, axes, &x);
}
int64_t* m_ptr = dev_ctx.template Alloc<int64_t>(median_index);
ProcessMedianKernel<T, Context>(dev_ctx, x, ignore_nan, out, m_ptr);
out->Resize(out->dims());
}
template <typename T, typename Context>
void NanmedianKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& axes,
bool keepdim,
DenseTensor* out,
DenseTensor* median_index) {
BaseMedianKernel<T, Context>(dev_ctx, x, axes, true, out, median_index);
}
} // namespace phi
PD_REGISTER_KERNEL(nanmedian,
GPU,
ALL_LAYOUT,
phi::NanmedianKernel,
float,
double,
int,
int64_t,
phi::dtype::float16) {}
| fa63c3bb668f3af38954eb26803fcff097be86af.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/nanmedian_kernel.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/impl/nanmedian_kernel_impl.h"
#include "paddle/phi/kernels/top_k_kernel.h"
namespace phi {
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
inline int GET_BLOCKS(const int N) {
return (N + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS;
}
template <typename T>
__global__ void KernelNanCounts(const T* input,
const int numel,
const int64_t pre_dim,
const int64_t stride,
T min_val,
int64_t* nan_total,
int64_t* nan_counts) {
extern __shared__ int64_t buf[];
for (int i = threadIdx.x; i < pre_dim; i += blockDim.x) {
buf[i] = 0;
nan_counts[i] = 0;
}
if (threadIdx.x == 0) {
nan_total[0] = 0;
nan_total[1] = 0;
}
__syncthreads();
CUDA_KERNEL_LOOP(index, numel) {
const T x = input[index];
if (isnan(static_cast<float>(x))) {
auto bin = static_cast<int64_t>(index / stride);
paddle::platform::CudaAtomicAdd(&buf[bin], 1);
}
}
__syncthreads();
for (int i = threadIdx.x; i < pre_dim; i += blockDim.x) {
paddle::platform::CudaAtomicAdd(&nan_counts[i], buf[i]);
paddle::platform::CudaAtomicAdd(&nan_total[0], buf[i]);
paddle::platform::CudaAtomicMax(&nan_total[1], stride - buf[i]);
}
}
template <typename T>
__global__ void CalcMedianKernel(const T* sort_out_ptr,
const int64_t* sort_indices_ptr,
int64_t* median_val,
T* output,
T div_factor,
const bool is_odd,
const int64_t pre_dim,
const int64_t stride) {
CUDA_KERNEL_LOOP(index, pre_dim) {
int64_t pos = static_cast<int64_t>((index + 1) * stride) - 1;
if (is_odd) {
median_val[index * 2] = sort_indices_ptr[pos];
median_val[index * 2 + 1] = sort_indices_ptr[pos];
output[index] = sort_out_ptr[pos];
} else {
median_val[index * 2] =
pos > 0 ? sort_indices_ptr[pos - 1] : sort_indices_ptr[pos];
median_val[index * 2 + 1] = sort_indices_ptr[pos];
T median_val_left = pos > 0 ? sort_out_ptr[pos - 1] : sort_out_ptr[pos];
T median_val_right = sort_out_ptr[pos];
output[index] = (median_val_left + median_val_right) / div_factor;
}
}
}
template <typename T>
__global__ void CalcNanmedianKernel(const T* sort_out_ptr,
const int64_t* sort_indices_ptr,
int64_t* nan_counts,
int64_t* median_val,
T* output,
const bool is_odd,
const int64_t pre_dim,
const int64_t max_valid_num,
const int64_t stride,
const T div_factor,
const T nan_val) {
CUDA_KERNEL_LOOP(index, pre_dim) {
int64_t pos = static_cast<int64_t>(index * max_valid_num);
int64_t nan_cnt = nan_counts[index];
if (nan_cnt == stride) {
median_val[index * 2] = -1;
median_val[index * 2 + 1] = -1;
output[index] = nan_val;
} else {
int64_t nan_k =
nan_cnt > 0 ? static_cast<int64_t>(stride - nan_cnt) : max_valid_num;
int64_t row_pos = static_cast<int64_t>(nan_k >> 1);
pos += row_pos;
if (nan_k & 1) {
median_val[index * 2] = sort_indices_ptr[pos];
median_val[index * 2 + 1] = sort_indices_ptr[pos];
output[index] = sort_out_ptr[pos];
} else {
median_val[index * 2] =
pos > 0 ? sort_indices_ptr[pos - 1] : sort_indices_ptr[pos];
median_val[index * 2 + 1] = sort_indices_ptr[pos];
T median_val_left = pos > 0 ? sort_out_ptr[pos - 1] : sort_out_ptr[pos];
T median_val_right = sort_out_ptr[pos];
output[index] = (median_val_left + median_val_right) / div_factor;
}
}
}
}
template <typename T, typename Context>
void ProcessMedianKernel(const Context& dev_ctx,
const DenseTensor& x,
bool ignore_nan,
DenseTensor* out,
int64_t* m_ptr) {
bool should_ignore_nan = ignore_nan;
auto stream = dev_ctx.stream();
const T* x_ptr = x.data<T>();
T* o_ptr = dev_ctx.template Alloc<T>(out);
int64_t numel = x.numel();
auto x_dim = x.dims();
int64_t x_rank = x_dim.size();
int64_t stride = x_dim[x_rank - 1];
int64_t pre_dim = numel / stride;
int64_t i = 0;
DenseTensor nan_counts, nan_stat;
int64_t* nan_counts_ptr;
int64_t max_valid_num = 0;
if (should_ignore_nan) {
nan_counts.Resize(phi::make_ddim({pre_dim}));
dev_ctx.template Alloc<int64_t>(&nan_counts);
nan_counts_ptr = nan_counts.data<int64_t>();
nan_stat.Resize(phi::make_ddim({2}));
int64_t* nan_stat_mem = dev_ctx.template Alloc<int64_t>(&nan_stat);
int64_t* nan_stat_ptr = nan_stat.data<int64_t>();
KernelNanCounts<T><<<GET_BLOCKS(numel),
PADDLE_CUDA_NUM_THREADS,
pre_dim * sizeof(int64_t),
stream>>>(x_ptr,
numel,
pre_dim,
stride,
std::numeric_limits<T>::min(),
nan_stat_ptr,
nan_counts_ptr);
auto nan_stat_mem_cpu =
paddle::memory::Alloc(phi::CPUPlace(), sizeof(int64_t) * 2);
int64_t* nan_stat_cpu_ptr =
reinterpret_cast<int64_t*>(nan_stat_mem_cpu->ptr());
paddle::memory::Copy(phi::CPUPlace(),
nan_stat_cpu_ptr,
dev_ctx.GetPlace(),
nan_stat_mem,
sizeof(int64_t) * 2,
stream);
// all elements are nan values
T nan_val = std::numeric_limits<T>::quiet_NaN();
if (nan_stat_cpu_ptr[0] == numel) {
FullLikeKernel<T, Context>(dev_ctx, x, nan_val, x.dtype(), out);
return;
}
should_ignore_nan = nan_stat_cpu_ptr[0] > 0;
max_valid_num = nan_stat_cpu_ptr[1];
}
int64_t sort_k = should_ignore_nan ? max_valid_num : ((stride >> 1) + 1);
bool is_ori_odd = stride & 1;
DenseTensor sort_out, sort_indices;
auto sort_dim = x.dims();
int64_t rank = sort_dim.size();
sort_dim[rank - 1] = sort_k;
sort_out.Resize(sort_dim);
sort_indices.Resize(sort_dim);
dev_ctx.template Alloc<T>(&sort_out);
T* sort_out_ptr = sort_out.data<T>();
dev_ctx.template Alloc<int64_t>(&sort_indices);
int64_t* sort_indices_ptr = sort_indices.data<int64_t>();
TopkKernel<T, Context>(
dev_ctx, x, Scalar(sort_k), -1, false, true, &sort_out, &sort_indices);
T div_factor = static_cast<T>(2.0);
T nan_val = std::numeric_limits<T>::quiet_NaN();
if (should_ignore_nan) {
CalcNanmedianKernel<
T><<<GET_BLOCKS(pre_dim), PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
sort_out_ptr,
sort_indices_ptr,
nan_counts_ptr,
m_ptr,
o_ptr,
is_ori_odd,
pre_dim,
max_valid_num,
stride,
div_factor,
nan_val);
} else {
CalcMedianKernel<
T><<<GET_BLOCKS(pre_dim), PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
sort_out_ptr,
sort_indices_ptr,
m_ptr,
o_ptr,
div_factor,
is_ori_odd,
pre_dim,
sort_k);
}
}
template <typename T, typename Context>
void BaseMedianKernel(const Context& dev_ctx,
const DenseTensor& input,
const IntArray& axes,
bool ignore_nan,
DenseTensor* out,
DenseTensor* median_index) {
DenseTensor x;
auto rank = input.dims().size();
if ((axes.size() == 0) || rank <= 1) {
x = input;
x.Resize({input.numel()});
} else {
PreprocessMedianKernel<T, Context>(dev_ctx, input, axes, &x);
}
int64_t* m_ptr = dev_ctx.template Alloc<int64_t>(median_index);
ProcessMedianKernel<T, Context>(dev_ctx, x, ignore_nan, out, m_ptr);
out->Resize(out->dims());
}
template <typename T, typename Context>
void NanmedianKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& axes,
bool keepdim,
DenseTensor* out,
DenseTensor* median_index) {
BaseMedianKernel<T, Context>(dev_ctx, x, axes, true, out, median_index);
}
} // namespace phi
PD_REGISTER_KERNEL(nanmedian,
GPU,
ALL_LAYOUT,
phi::NanmedianKernel,
float,
double,
int,
int64_t,
phi::dtype::float16) {}
|
3c4d90076ae4441d1cf345afb9a338c7895cd0a9.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdint.h>
#include <inttypes.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include "drvapi_error_string.h"
#include "../common/log.h"
__global__ void cu_interlace_frames(uint32_t* src_0, uint32_t* src_1, uint32_t* dst, int width, int height)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int s = i + j * width;
int d = i + (height - 1 - j) * width;
uint32_t src = (j & 1) ? src_1[s] : src_0[s];
dst[d] = src;
};
extern "C" int cuda_interlace_frames(unsigned char* src_0, unsigned char* src_1, unsigned char* dst, int stride, int height, hipStream_t cu_stream)
{
int width = stride / sizeof(uint32_t);
dim3 threads(16, 8, 1);
dim3 blocks(width / threads.x, height / threads.y, 1);
hipLaunchKernelGGL(( cu_interlace_frames), dim3(blocks), dim3(threads), 0, cu_stream, (uint32_t*)src_0, (uint32_t*)src_1, (uint32_t*)dst, width, height);
return 0;
};
| 3c4d90076ae4441d1cf345afb9a338c7895cd0a9.cu | #include <stdint.h>
#include <inttypes.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include "drvapi_error_string.h"
#include "../common/log.h"
__global__ void cu_interlace_frames(uint32_t* src_0, uint32_t* src_1, uint32_t* dst, int width, int height)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int s = i + j * width;
int d = i + (height - 1 - j) * width;
uint32_t src = (j & 1) ? src_1[s] : src_0[s];
dst[d] = src;
};
extern "C" int cuda_interlace_frames(unsigned char* src_0, unsigned char* src_1, unsigned char* dst, int stride, int height, cudaStream_t cu_stream)
{
int width = stride / sizeof(uint32_t);
dim3 threads(16, 8, 1);
dim3 blocks(width / threads.x, height / threads.y, 1);
cu_interlace_frames<<<blocks, threads, 0, cu_stream>>>((uint32_t*)src_0, (uint32_t*)src_1, (uint32_t*)dst, width, height);
return 0;
};
|
ade2614ade3e25e8be3b9986aba726cf6f29d684.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
Please check example 07 and 08 for the basics of tensor op gemm kernels. On NVIDIA Ampere
architecture, most concept still holds. The two main differences are
1. NVIDIA Ampere architecture introduces a new series of tensor core instructions (see
include/cutlass/arch/mma_sm80.h) which are more efficient on Ampere.
2. NVIDIA Ampere architecture uses cp_async() to build multistage software pipeline to better hide
latency (see include/cutlass/gemm/threadblock/mma_multistage.h)
Moreover, NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h)
data types in tensor cores. One big advantage is that we can load in fp32 data and convert them
implicitly to tf32 inside the GEMM kernel which means no change is needed to accelerate traditional
fp32 data by using NVIDIA Ampere architecture.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
hipError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
hipError_t error = hipSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
float alpha;
float beta;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({5120, 4096, 4096}),
batch_count(1),
reference_check(true),
iterations(20),
alpha(1),
beta() { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "14_ampere_tf32_tensorop_gemm example\n\n"
<< " This example uses the CUTLASS Library to execute TF32 tensorop GEMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m <int> GEMM M dimension\n"
<< " --n <int> GEMM N dimension\n"
<< " --k <int> GEMM K dimension\n"
<< " --alpha <f32> Epilogue scalar alpha\n"
<< " --beta <f32> Epilogue scalar beta\n\n"
<< " --iterations <int> Number of profiling iterations to perform.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/14_ampere_tf32_tensorop_gemm/14_ampere_tf32_tensorop_gemm --m=1024 --n=512 --k=1024 \\\n"
<< " --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product() * batch_count;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = float; // <- data type of elements in input matrix A
using ElementInputB = float; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 16>; // <- threadblock tile M = 128, N = 128, K = 16
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 16>; // <- warp tile M = 64, N = 64, K = 16
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 4;
using Gemm = cutlass::gemm::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
int run(Options &options) {
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size = options.problem_size;
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(options.alpha);
ElementComputeEpilogue beta = ElementComputeEpilogue(options.beta);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Result structure
Result result;
//
// Construct events
//
hipEvent_t events[2];
for (auto & event : events) {
result.error = hipEventCreate(&event);
if (result.error != hipSuccess) {
std::cerr << "hipEventCreate() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMMs
result.error = hipEventRecord(events[0]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = hipEventRecord(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
// Wait for work on the device to complete.
result.error = hipEventSynchronize(events[1]);
if (result.error != hipSuccess) {
std::cerr << "hipEventSynchronize() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = hipEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != hipSuccess) {
std::cerr << "cudaEventElapsed() failed: " << hipGetErrorString(result.error) << std::endl;
return -1;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)hipEventDestroy(event);
}
// Create instantiation for device reference gemm kernel
cutlass::reference::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device;
// Launch device reference gemm kernel
gemm_device(problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
beta,
tensor_c.device_ref(),
tensor_ref_d.device_ref());
// Wait for kernels to finish
hipDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (passed) {
std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " GFLOPs: " << result.gflops << std::endl;
}
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main(int argc, const char **argv) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
printf("%d x %d x %d TF32 tensor op Matrix Multiply\n", \
options.problem_size.m(), options.problem_size.n(), options.problem_size.k());
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
return run(options);
}
| ade2614ade3e25e8be3b9986aba726cf6f29d684.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
Please check example 07 and 08 for the basics of tensor op gemm kernels. On NVIDIA Ampere
architecture, most concept still holds. The two main differences are
1. NVIDIA Ampere architecture introduces a new series of tensor core instructions (see
include/cutlass/arch/mma_sm80.h) which are more efficient on Ampere.
2. NVIDIA Ampere architecture uses cp_async() to build multistage software pipeline to better hide
latency (see include/cutlass/gemm/threadblock/mma_multistage.h)
Moreover, NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h)
data types in tensor cores. One big advantage is that we can load in fp32 data and convert them
implicitly to tf32 inside the GEMM kernel which means no change is needed to accelerate traditional
fp32 data by using NVIDIA Ampere architecture.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/command_line.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Result structure
struct Result {
double runtime_ms;
double gflops;
cutlass::Status status;
cudaError_t error;
bool passed;
//
// Methods
//
Result(
double runtime_ms = 0,
double gflops = 0,
cutlass::Status status = cutlass::Status::kSuccess,
cudaError_t error = cudaSuccess
):
runtime_ms(runtime_ms), gflops(gflops), status(status), error(error), passed(true) { }
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// Command line options parsing
struct Options {
bool help;
cutlass::gemm::GemmCoord problem_size;
int batch_count;
float alpha;
float beta;
bool reference_check;
int iterations;
Options():
help(false),
problem_size({5120, 4096, 4096}),
batch_count(1),
reference_check(true),
iterations(20),
alpha(1),
beta() { }
bool valid() {
return true;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out << "14_ampere_tf32_tensorop_gemm example\n\n"
<< " This example uses the CUTLASS Library to execute TF32 tensorop GEMM computations.\n\n"
<< "Options:\n\n"
<< " --help If specified, displays this usage statement.\n\n"
<< " --m <int> GEMM M dimension\n"
<< " --n <int> GEMM N dimension\n"
<< " --k <int> GEMM K dimension\n"
<< " --alpha <f32> Epilogue scalar alpha\n"
<< " --beta <f32> Epilogue scalar beta\n\n"
<< " --iterations <int> Number of profiling iterations to perform.\n\n";
out << "\n\nExamples:\n\n"
<< "$ ./examples/14_ampere_tf32_tensorop_gemm/14_ampere_tf32_tensorop_gemm --m=1024 --n=512 --k=1024 \\\n"
<< " --alpha=2 --beta=0.707 \n\n";
return out;
}
/// Compute performance in GFLOP/s
double gflops(double runtime_s) const {
// Number of real-valued multiply-adds
int64_t fmas = problem_size.product() * batch_count;
// Two flops per multiply-add
return 2.0 * double(fmas) / double(1.0e9) / runtime_s;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = float; // <- data type of elements in input matrix A
using ElementInputB = float; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 16>; // <- threadblock tile M = 128, N = 128, K = 16
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 16>; // <- warp tile M = 64, N = 64, K = 16
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 4;
using Gemm = cutlass::gemm::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
int run(Options &options) {
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size = options.problem_size;
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(4),
ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(4),
ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(4),
ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(options.alpha);
ElementComputeEpilogue beta = ElementComputeEpilogue(options.beta);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Result structure
Result result;
//
// Construct events
//
cudaEvent_t events[2];
for (auto & event : events) {
result.error = cudaEventCreate(&event);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
}
// Record an event at the start of a series of GEMMs
result.error = cudaEventRecord(events[0]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
//
// Run profiling loop
//
for (int iter = 0; iter < options.iterations; ++iter) {
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
}
//
// Stop profiling loop
//
// Record an event when the GEMMs are complete
result.error = cudaEventRecord(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Wait for work on the device to complete.
result.error = cudaEventSynchronize(events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Measure elapsed runtime
float runtime_ms = 0;
result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]);
if (result.error != cudaSuccess) {
std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl;
return -1;
}
// Compute average runtime and GFLOPs.
result.runtime_ms = double(runtime_ms) / double(options.iterations);
result.gflops = options.gflops(result.runtime_ms / 1000.0);
// Cleanup
for (auto event : events) {
(void)cudaEventDestroy(event);
}
// Create instantiation for device reference gemm kernel
cutlass::reference::device::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device;
// Launch device reference gemm kernel
gemm_device(problem_size,
alpha,
tensor_a.device_ref(),
tensor_b.device_ref(),
beta,
tensor_c.device_ref(),
tensor_ref_d.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
if (passed) {
std::cout << "Runtime: " << result.runtime_ms << " ms" << std::endl;
std::cout << " GFLOPs: " << result.gflops << std::endl;
}
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main(int argc, const char **argv) {
bool notSupported = false;
// Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.0.
//
// CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << std::endl;
return 0;
}
printf("%d x %d x %d TF32 tensor op Matrix Multiply\n", \
options.problem_size.m(), options.problem_size.n(), options.problem_size.k());
if (!options.valid()) {
std::cerr << "Invalid problem." << std::endl;
return -1;
}
return run(options);
}
|
d42ce21e0c8bd62a6640065347b37ed002e2c14b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ex1.h"
#define VEC_SIZE 256
#define HISTOGRAM_SIZE 256
__device__ void prefix_sum(int *arr , int len) {
int thIdx = threadIdx.x;
int inc;
for(int stride = 1 ; stride < len ; stride *= 2){
if(thIdx >= stride && thIdx < len){
inc = arr[thIdx - stride];
}
__syncthreads();
if(thIdx >= stride && thIdx < len){
arr[thIdx] += inc;
}
__syncthreads();
}
return;
}
__device__ void map_calc(uchar *map ,int *cdf, int idx){
float map_value = IMG_HEIGHT * IMG_WIDTH;
if(idx < HISTOGRAM_SIZE){
map[idx] = ((uchar)(N_COLORS * (cdf[idx] /map_value))) * (256 / N_COLORS);
}
}
__global__ void process_image_kernel(uchar *all_in, uchar *all_out){
int thIdx = threadIdx.x;
int offset = IMG_WIDTH * IMG_HEIGHT * blockIdx.x + thIdx;
__shared__ int histogram[HISTOGRAM_SIZE];
__shared__ uchar map[HISTOGRAM_SIZE];
int * cdf = histogram;
if (thIdx < HISTOGRAM_SIZE) {
histogram[thIdx] = 0;
}
__syncthreads();
for(int j = 0; j < IMG_WIDTH * IMG_HEIGHT; j += blockDim.x){
int pixelValue = all_in[offset + j];
atomicAdd(histogram + pixelValue, 1);
}
__syncthreads();
prefix_sum(histogram, HISTOGRAM_SIZE);
map_calc(map ,cdf,thIdx);
__syncthreads();
for(int j = 0; j < IMG_WIDTH * IMG_HEIGHT; j += blockDim.x){
int pixelValue = all_in[offset + j];
all_out[offset + j] = map[pixelValue];
}
return;
}
/* Task serial context struct with necessary CPU / GPU pointers to process a single image */
struct task_serial_context {
// TODO define task serial memory buffers
uchar *gpu_in_img[N_IMAGES];
uchar *gpu_out_img[N_IMAGES];
hipStream_t streams[N_IMAGES];
};
/* Allocate GPU memory for a single input image and a single output image.
*
* Returns: allocated and initialized task_serial_context. */
struct task_serial_context *task_serial_init()
{
auto context = new task_serial_context;
//allocate GPU memory for a single input image and a single output image
for(int i = 0 ; i < N_IMAGES ; i++){
CUDA_CHECK( hipStreamCreate(&context->streams[i]) );
CUDA_CHECK( hipMalloc(&context->gpu_in_img[i], IMG_HEIGHT * IMG_WIDTH) );
CUDA_CHECK( hipMalloc(&context->gpu_out_img[i], IMG_HEIGHT * IMG_WIDTH) );
}
return context;
}
/* Process all the images in the given host array and return the output in the
* provided output host array */
void task_serial_process(struct task_serial_context *context, uchar *images_in, uchar *images_out)
{
//TODO: in a for loop:
int offset = 0;
for(int i = 0 ; i < N_IMAGES ; i++ , offset += IMG_HEIGHT * IMG_WIDTH){
// 1. copy the relevant image from images_in to the GPU memory you allocated
// offset = i * IMG_HEIGHT * IMG_WIDTH ;
CUDA_CHECK( hipMemcpyAsync(context->gpu_in_img[i] , images_in + offset , IMG_HEIGHT * IMG_WIDTH, hipMemcpyHostToDevice , context->streams[i]) );
// 2. invoke GPU kernel on this image
hipLaunchKernelGGL(( process_image_kernel), dim3(1) , dim3(1024) , 0 , context->streams[i], context->gpu_in_img[i] , context->gpu_out_img[i]);
// 3. copy output from GPU memory to relevant location in images_out_gpu_serial
CUDA_CHECK( hipMemcpyAsync(images_out + offset , context->gpu_out_img[i] , IMG_HEIGHT * IMG_WIDTH, hipMemcpyDeviceToHost , context->streams[i]) );
}
CUDA_CHECK( hipDeviceSynchronize() );
}
/* Release allocated resources for the task-serial implementation. */
void task_serial_free(struct task_serial_context *context)
{
//TODO: free resources allocated in task_serial_init
for(int i = 0 ; i < N_IMAGES ; i++){
CUDA_CHECK( hipFree(context->gpu_in_img[i]) );
CUDA_CHECK( hipFree(context->gpu_out_img[i]) );
}
free(context);
}
/* Bulk GPU context struct with necessary CPU / GPU pointers to process all the images */
struct gpu_bulk_context {
// TODO define bulk-GPU memory buffers
uchar *gpu_in_img;
uchar *gpu_out_img;
};
/* Allocate GPU memory for all the input and output images.
*
* Returns: allocated and initialized gpu_bulk_context. */
struct gpu_bulk_context *gpu_bulk_init()
{
auto context = new gpu_bulk_context;
//TODO: allocate GPU memory for a all input images and all output images
CUDA_CHECK( hipMalloc(&context->gpu_in_img, N_IMAGES * IMG_HEIGHT * IMG_WIDTH) );
CUDA_CHECK( hipMalloc(&context->gpu_out_img, N_IMAGES * IMG_HEIGHT * IMG_WIDTH) );
return context;
}
/* Process all the images in the given host array and return the output in the
* provided output host array */
void gpu_bulk_process(struct gpu_bulk_context *context, uchar *images_in, uchar *images_out)
{
// 1. copy all input images from images_in to the GPU memory you allocated
CUDA_CHECK( hipMemcpy(context->gpu_in_img , images_in, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, hipMemcpyHostToDevice) );
// 2. invoke a kernel with N_IMAGES threadblocks, each working on a different image
hipLaunchKernelGGL(( process_image_kernel), dim3(N_IMAGES) , dim3(1024), 0, 0, context->gpu_in_img , context->gpu_out_img);
// 3. copy output images from GPU memory to images_out
CUDA_CHECK( hipMemcpy(images_out, context->gpu_out_img , N_IMAGES * IMG_HEIGHT * IMG_WIDTH, hipMemcpyDeviceToHost) );
}
/* Release allocated resources for the bulk GPU implementation. */
void gpu_bulk_free(struct gpu_bulk_context *context)
{
//free resources allocated in gpu_bulk_init
CUDA_CHECK(hipFree(context->gpu_in_img));
CUDA_CHECK(hipFree(context->gpu_out_img));
free(context);
}
| d42ce21e0c8bd62a6640065347b37ed002e2c14b.cu | #include "ex1.h"
#define VEC_SIZE 256
#define HISTOGRAM_SIZE 256
__device__ void prefix_sum(int *arr , int len) {
int thIdx = threadIdx.x;
int inc;
for(int stride = 1 ; stride < len ; stride *= 2){
if(thIdx >= stride && thIdx < len){
inc = arr[thIdx - stride];
}
__syncthreads();
if(thIdx >= stride && thIdx < len){
arr[thIdx] += inc;
}
__syncthreads();
}
return;
}
__device__ void map_calc(uchar *map ,int *cdf, int idx){
float map_value = IMG_HEIGHT * IMG_WIDTH;
if(idx < HISTOGRAM_SIZE){
map[idx] = ((uchar)(N_COLORS * (cdf[idx] /map_value))) * (256 / N_COLORS);
}
}
__global__ void process_image_kernel(uchar *all_in, uchar *all_out){
int thIdx = threadIdx.x;
int offset = IMG_WIDTH * IMG_HEIGHT * blockIdx.x + thIdx;
__shared__ int histogram[HISTOGRAM_SIZE];
__shared__ uchar map[HISTOGRAM_SIZE];
int * cdf = histogram;
if (thIdx < HISTOGRAM_SIZE) {
histogram[thIdx] = 0;
}
__syncthreads();
for(int j = 0; j < IMG_WIDTH * IMG_HEIGHT; j += blockDim.x){
int pixelValue = all_in[offset + j];
atomicAdd(histogram + pixelValue, 1);
}
__syncthreads();
prefix_sum(histogram, HISTOGRAM_SIZE);
map_calc(map ,cdf,thIdx);
__syncthreads();
for(int j = 0; j < IMG_WIDTH * IMG_HEIGHT; j += blockDim.x){
int pixelValue = all_in[offset + j];
all_out[offset + j] = map[pixelValue];
}
return;
}
/* Task serial context struct with necessary CPU / GPU pointers to process a single image */
struct task_serial_context {
// TODO define task serial memory buffers
uchar *gpu_in_img[N_IMAGES];
uchar *gpu_out_img[N_IMAGES];
cudaStream_t streams[N_IMAGES];
};
/* Allocate GPU memory for a single input image and a single output image.
*
* Returns: allocated and initialized task_serial_context. */
struct task_serial_context *task_serial_init()
{
auto context = new task_serial_context;
//allocate GPU memory for a single input image and a single output image
for(int i = 0 ; i < N_IMAGES ; i++){
CUDA_CHECK( cudaStreamCreate(&context->streams[i]) );
CUDA_CHECK( cudaMalloc(&context->gpu_in_img[i], IMG_HEIGHT * IMG_WIDTH) );
CUDA_CHECK( cudaMalloc(&context->gpu_out_img[i], IMG_HEIGHT * IMG_WIDTH) );
}
return context;
}
/* Process all the images in the given host array and return the output in the
* provided output host array */
void task_serial_process(struct task_serial_context *context, uchar *images_in, uchar *images_out)
{
//TODO: in a for loop:
int offset = 0;
for(int i = 0 ; i < N_IMAGES ; i++ , offset += IMG_HEIGHT * IMG_WIDTH){
// 1. copy the relevant image from images_in to the GPU memory you allocated
// offset = i * IMG_HEIGHT * IMG_WIDTH ;
CUDA_CHECK( cudaMemcpyAsync(context->gpu_in_img[i] , images_in + offset , IMG_HEIGHT * IMG_WIDTH, cudaMemcpyHostToDevice , context->streams[i]) );
// 2. invoke GPU kernel on this image
process_image_kernel<<<1 , 1024 , 0 , context->streams[i]>>>(context->gpu_in_img[i] , context->gpu_out_img[i]);
// 3. copy output from GPU memory to relevant location in images_out_gpu_serial
CUDA_CHECK( cudaMemcpyAsync(images_out + offset , context->gpu_out_img[i] , IMG_HEIGHT * IMG_WIDTH, cudaMemcpyDeviceToHost , context->streams[i]) );
}
CUDA_CHECK( cudaDeviceSynchronize() );
}
/* Release allocated resources for the task-serial implementation. */
void task_serial_free(struct task_serial_context *context)
{
//TODO: free resources allocated in task_serial_init
for(int i = 0 ; i < N_IMAGES ; i++){
CUDA_CHECK( cudaFree(context->gpu_in_img[i]) );
CUDA_CHECK( cudaFree(context->gpu_out_img[i]) );
}
free(context);
}
/* Bulk GPU context struct with necessary CPU / GPU pointers to process all the images */
struct gpu_bulk_context {
// TODO define bulk-GPU memory buffers
uchar *gpu_in_img;
uchar *gpu_out_img;
};
/* Allocate GPU memory for all the input and output images.
*
* Returns: allocated and initialized gpu_bulk_context. */
struct gpu_bulk_context *gpu_bulk_init()
{
auto context = new gpu_bulk_context;
//TODO: allocate GPU memory for a all input images and all output images
CUDA_CHECK( cudaMalloc(&context->gpu_in_img, N_IMAGES * IMG_HEIGHT * IMG_WIDTH) );
CUDA_CHECK( cudaMalloc(&context->gpu_out_img, N_IMAGES * IMG_HEIGHT * IMG_WIDTH) );
return context;
}
/* Process all the images in the given host array and return the output in the
* provided output host array */
void gpu_bulk_process(struct gpu_bulk_context *context, uchar *images_in, uchar *images_out)
{
// 1. copy all input images from images_in to the GPU memory you allocated
CUDA_CHECK( cudaMemcpy(context->gpu_in_img , images_in, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, cudaMemcpyHostToDevice) );
// 2. invoke a kernel with N_IMAGES threadblocks, each working on a different image
process_image_kernel<<<N_IMAGES , 1024>>>(context->gpu_in_img , context->gpu_out_img);
// 3. copy output images from GPU memory to images_out
CUDA_CHECK( cudaMemcpy(images_out, context->gpu_out_img , N_IMAGES * IMG_HEIGHT * IMG_WIDTH, cudaMemcpyDeviceToHost) );
}
/* Release allocated resources for the bulk GPU implementation. */
void gpu_bulk_free(struct gpu_bulk_context *context)
{
//free resources allocated in gpu_bulk_init
CUDA_CHECK(cudaFree(context->gpu_in_img));
CUDA_CHECK(cudaFree(context->gpu_out_img));
free(context);
}
|
refine_sum.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "timerc.h"
#define N 1024*1024
#define THREADS_PER_BLOCK 1024
12 #define NUM_BLOCKS N/THREADS_PER_BLOCK
13 #define MULT 32
14
15 __global__ void parallel_sum_naive(int *a, int *b, int mult) {
16 int ix = threadIdx.x + blockDim.x * blockIdx.x;
17 int sum = 0;
18 for (int i = 0; i < mult; i++) {
19 sum += a[ix * mult + i];
20 }
21 b[ix] = sum;
22 }
23
24 // parallel reduction
25 __global__ void parallel_sum_reduction(int *a, int *b, int block_size) {
26 int start = blockIdx.x * block_size;
27 int step = 1;
28
29 while (step <= block_size/2) {
30 if (threadIdx.x < (block_size / step / 2)) {
31 a[start + 2 * threadIdx.x * step] += a[start + 2 * threadIdx.x * step + step];
32 }
33 __syncthreads();
34 step *= 2;
35 }
36 if (threadIdx.x == 0) b[blockIdx.x] = a[start];
37 }
38
39 __global__ void parallel_sum_reduction_with_shared_mem(int *a, int *b, int block_size) {
40 __shared__ int tmpmem[THREADS_PER_BLOCK];
41 int step = 1;
42
43 tmpmem[threadIdx.x] = a[threadIdx.x + blockDim.x * blockIdx.x];
44 __syncthreads();
1,1 Top
| refine_sum.cu | #include <stdio.h>
#include <stdlib.h>
#include "timerc.h"
#define N 1024*1024
#define THREADS_PER_BLOCK 1024
12 #define NUM_BLOCKS N/THREADS_PER_BLOCK
13 #define MULT 32
14
15 __global__ void parallel_sum_naive(int *a, int *b, int mult) {
16 int ix = threadIdx.x + blockDim.x * blockIdx.x;
17 int sum = 0;
18 for (int i = 0; i < mult; i++) {
19 sum += a[ix * mult + i];
20 }
21 b[ix] = sum;
22 }
23
24 // parallel reduction
25 __global__ void parallel_sum_reduction(int *a, int *b, int block_size) {
26 int start = blockIdx.x * block_size;
27 int step = 1;
28
29 while (step <= block_size/2) {
30 if (threadIdx.x < (block_size / step / 2)) {
31 a[start + 2 * threadIdx.x * step] += a[start + 2 * threadIdx.x * step + step];
32 }
33 __syncthreads();
34 step *= 2;
35 }
36 if (threadIdx.x == 0) b[blockIdx.x] = a[start];
37 }
38
39 __global__ void parallel_sum_reduction_with_shared_mem(int *a, int *b, int block_size) {
40 __shared__ int tmpmem[THREADS_PER_BLOCK];
41 int step = 1;
42
43 tmpmem[threadIdx.x] = a[threadIdx.x + blockDim.x * blockIdx.x];
44 __syncthreads();
1,1 Top
|
f551326fe356da3b679c1e9f1d7e5f54dc130399.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright: (c) 2011 Matija Osrecki <[email protected]>
*/
#include <vector>
#include "combine.h"
// Floating point type to be used on the device.
typedef float dftype;
void CUDACombiner::combine(std::vector<double> &res) {
const int n = (int)x_.size();
const int typesize = sizeof(dftype);
// calculate Xt * X and Xt * Y
dftype *hxx = new dftype[n+1];
dftype *hxy = new dftype[n+1];
hxx[0] = hxy[0] = 0.0;
for(int i = 0; i < n; ++i) {
hxx[i+1] = hxx[i] + x_[i] * x_[i];
hxy[i+1] = hxy[i] + x_[i] * y_[i];
}
// allocate device memory and copy data
dftype *dxx, *dxy, *dres;
hipMalloc(&dxx, (n+1) * typesize);
hipMalloc(&dxy, (n+1) * typesize);
hipMalloc(&dres, n * typesize);
hipMemcpy(dxx, hxx, (n+1) * typesize, hipMemcpyHostToDevice);
hipMemcpy(dxy, hxy, (n+1) * typesize, hipMemcpyHostToDevice);
// TODO(matija): init kernel
// TODO(matija): start kernel
// retrieve results
dftype *hres = new dftype[n];
hipMemcpy(hres, dres, n * typesize, hipMemcpyDeviceToHost);
res.resize(n);
copy(hres, hres+n, res, res+n);
// free memory
delete[] hxx;
delete[] hxy;
delete[] hres;
hipFree(dxx);
hipFree(dxy);
hipFree(dres);
}
| f551326fe356da3b679c1e9f1d7e5f54dc130399.cu | /*
Copyright: (c) 2011 Matija Osrecki <[email protected]>
*/
#include <vector>
#include "combine.h"
// Floating point type to be used on the device.
typedef float dftype;
void CUDACombiner::combine(std::vector<double> &res) {
const int n = (int)x_.size();
const int typesize = sizeof(dftype);
// calculate Xt * X and Xt * Y
dftype *hxx = new dftype[n+1];
dftype *hxy = new dftype[n+1];
hxx[0] = hxy[0] = 0.0;
for(int i = 0; i < n; ++i) {
hxx[i+1] = hxx[i] + x_[i] * x_[i];
hxy[i+1] = hxy[i] + x_[i] * y_[i];
}
// allocate device memory and copy data
dftype *dxx, *dxy, *dres;
cudaMalloc(&dxx, (n+1) * typesize);
cudaMalloc(&dxy, (n+1) * typesize);
cudaMalloc(&dres, n * typesize);
cudaMemcpy(dxx, hxx, (n+1) * typesize, cudaMemcpyHostToDevice);
cudaMemcpy(dxy, hxy, (n+1) * typesize, cudaMemcpyHostToDevice);
// TODO(matija): init kernel
// TODO(matija): start kernel
// retrieve results
dftype *hres = new dftype[n];
cudaMemcpy(hres, dres, n * typesize, cudaMemcpyDeviceToHost);
res.resize(n);
copy(hres, hres+n, res, res+n);
// free memory
delete[] hxx;
delete[] hxy;
delete[] hres;
cudaFree(dxx);
cudaFree(dxy);
cudaFree(dres);
}
|
e79437172ad040c6feffebfffab9aceb40ec79ca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
//for random intialize
#include <stdlib.h>
#include <time.h>
//for memset
#include <cstring>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char * file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPU assert: %s %s %d\n", hipGetErrorString(code), file, line );
if (abort) exit(code);
}
}
__global__ void sum_array_gpu( int* a, int* b, int* c, int* result, int size)
{
int gid = blockIdx.x *blockDim.x +threadIdx.x;
if(gid < size)
{
result[gid] = a[gid] * b[gid] * c[gid];
}
}
void sum_array_cpu( int* a, int* b, int* c, int* result, int size)
{
for (int i=0; i < size; i++)
{
result[i] = a[i] * b[i] * c[i];
}
}
void compare_arrays (int* gpu, int* cpu, int size){
for ( int i = 0; i < size ; i++){
if(gpu[i]!= cpu[i]){
printf("Arrays are different \n");
return;
}
}
printf("Arrays are same \n");
}
int main()
{
int size = pow(2,30);
int block_size = 512;
int NO_BYTES = size * sizeof(int);
// Allocate memory in Host
int* h_a, *h_b, *h_c, *gpu_results, *cpu_results;
h_a = (int*)malloc(NO_BYTES);
h_b = (int*)malloc(NO_BYTES);
h_c = (int*)malloc(NO_BYTES);
cpu_results = (int*)malloc(NO_BYTES);
gpu_results = (int*)malloc(NO_BYTES);
time_t t;
srand((unsigned)time(&t));
// Initialise random values for the array
for (int i=0; i <size; i++)
{
h_a[i] = (int)(rand() & 0xff);
}
for (int i=0; i <size; i++)
{
h_b[i] = (int)(rand() & 0xff);
}
for (int i=0; i <size; i++)
{
h_c[i] = (int)(rand() & 0xff);
}
memset(gpu_results,0,NO_BYTES);
memset(cpu_results,0,NO_BYTES);
//Summation in CPU
clock_t cpu_start, cpu_end;
cpu_start = clock();
sum_array_cpu(h_a, h_b, h_c, cpu_results, size);
cpu_end = clock();
// Allocate memory in device
int* d_a, *d_b, *d_c, *d_result;
gpuErrchk(hipMalloc((int**)&d_a,NO_BYTES));
gpuErrchk(hipMalloc((int**)&d_b,NO_BYTES));
gpuErrchk(hipMalloc((int**)&d_c,NO_BYTES));
gpuErrchk(hipMalloc((int**)&d_result,NO_BYTES));
clock_t htod_start, htod_end;
htod_start = clock();
// Transfer the data from host to device
gpuErrchk(hipMemcpy(d_a, h_a, NO_BYTES, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_b, h_b, NO_BYTES, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_c, h_c, NO_BYTES, hipMemcpyHostToDevice));
htod_end = clock();
// Designing grid and block size
dim3 block(block_size);
dim3 grid((size/block.x)+1);
// Launch kernel function
clock_t gpu_start, gpu_end;
gpu_start = clock();
sum_array_gpu << < grid, block >> > (d_a, d_b, d_c, d_result, size);
hipDeviceSynchronize();
gpu_end = clock();
clock_t dtoh_start, dtoh_end;
dtoh_start = clock();
gpuErrchk(hipMemcpy(gpu_results, d_result, NO_BYTES, hipMemcpyDeviceToHost));
dtoh_end = clock();
//compare the arrays
compare_arrays(gpu_results,cpu_results, size);
printf("Sum array CPU execution time : %4.6f \n",
(double)((double)(cpu_end - cpu_start)/ CLOCKS_PER_SEC));
printf("Sum array GPU execution time : %4.6f \n",
(double)((double)(gpu_end - gpu_start)/ CLOCKS_PER_SEC));
printf("htod mem transfer time : %4.6f \n",
(double)((double)(htod_end - htod_start)/ CLOCKS_PER_SEC));
printf("dtoh mem transfer time : %4.6f \n",
(double)((double)(dtoh_end - dtoh_start)/ CLOCKS_PER_SEC));
printf("Sum array GPU total execution time : %4.6f \n",
(double)((double)(dtoh_end - htod_start)/ CLOCKS_PER_SEC));
hipFree(d_result);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(gpu_results);
free(h_a);
free(h_b);
free(h_c);
hipDeviceReset();
} | e79437172ad040c6feffebfffab9aceb40ec79ca.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
//for random intialize
#include <stdlib.h>
#include <time.h>
//for memset
#include <cstring>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char * file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line );
if (abort) exit(code);
}
}
__global__ void sum_array_gpu( int* a, int* b, int* c, int* result, int size)
{
int gid = blockIdx.x *blockDim.x +threadIdx.x;
if(gid < size)
{
result[gid] = a[gid] * b[gid] * c[gid];
}
}
void sum_array_cpu( int* a, int* b, int* c, int* result, int size)
{
for (int i=0; i < size; i++)
{
result[i] = a[i] * b[i] * c[i];
}
}
void compare_arrays (int* gpu, int* cpu, int size){
for ( int i = 0; i < size ; i++){
if(gpu[i]!= cpu[i]){
printf("Arrays are different \n");
return;
}
}
printf("Arrays are same \n");
}
int main()
{
int size = pow(2,30);
int block_size = 512;
int NO_BYTES = size * sizeof(int);
// Allocate memory in Host
int* h_a, *h_b, *h_c, *gpu_results, *cpu_results;
h_a = (int*)malloc(NO_BYTES);
h_b = (int*)malloc(NO_BYTES);
h_c = (int*)malloc(NO_BYTES);
cpu_results = (int*)malloc(NO_BYTES);
gpu_results = (int*)malloc(NO_BYTES);
time_t t;
srand((unsigned)time(&t));
// Initialise random values for the array
for (int i=0; i <size; i++)
{
h_a[i] = (int)(rand() & 0xff);
}
for (int i=0; i <size; i++)
{
h_b[i] = (int)(rand() & 0xff);
}
for (int i=0; i <size; i++)
{
h_c[i] = (int)(rand() & 0xff);
}
memset(gpu_results,0,NO_BYTES);
memset(cpu_results,0,NO_BYTES);
//Summation in CPU
clock_t cpu_start, cpu_end;
cpu_start = clock();
sum_array_cpu(h_a, h_b, h_c, cpu_results, size);
cpu_end = clock();
// Allocate memory in device
int* d_a, *d_b, *d_c, *d_result;
gpuErrchk(cudaMalloc((int**)&d_a,NO_BYTES));
gpuErrchk(cudaMalloc((int**)&d_b,NO_BYTES));
gpuErrchk(cudaMalloc((int**)&d_c,NO_BYTES));
gpuErrchk(cudaMalloc((int**)&d_result,NO_BYTES));
clock_t htod_start, htod_end;
htod_start = clock();
// Transfer the data from host to device
gpuErrchk(cudaMemcpy(d_a, h_a, NO_BYTES, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_b, h_b, NO_BYTES, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_c, h_c, NO_BYTES, cudaMemcpyHostToDevice));
htod_end = clock();
// Designing grid and block size
dim3 block(block_size);
dim3 grid((size/block.x)+1);
// Launch kernel function
clock_t gpu_start, gpu_end;
gpu_start = clock();
sum_array_gpu << < grid, block >> > (d_a, d_b, d_c, d_result, size);
cudaDeviceSynchronize();
gpu_end = clock();
clock_t dtoh_start, dtoh_end;
dtoh_start = clock();
gpuErrchk(cudaMemcpy(gpu_results, d_result, NO_BYTES, cudaMemcpyDeviceToHost));
dtoh_end = clock();
//compare the arrays
compare_arrays(gpu_results,cpu_results, size);
printf("Sum array CPU execution time : %4.6f \n",
(double)((double)(cpu_end - cpu_start)/ CLOCKS_PER_SEC));
printf("Sum array GPU execution time : %4.6f \n",
(double)((double)(gpu_end - gpu_start)/ CLOCKS_PER_SEC));
printf("htod mem transfer time : %4.6f \n",
(double)((double)(htod_end - htod_start)/ CLOCKS_PER_SEC));
printf("dtoh mem transfer time : %4.6f \n",
(double)((double)(dtoh_end - dtoh_start)/ CLOCKS_PER_SEC));
printf("Sum array GPU total execution time : %4.6f \n",
(double)((double)(dtoh_end - htod_start)/ CLOCKS_PER_SEC));
cudaFree(d_result);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(gpu_results);
free(h_a);
free(h_b);
free(h_c);
cudaDeviceReset();
} |
nnKernel.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
#include "linear.h"
#include "../include/nnKernel.cuh"
#define TILE_SIZE 32
#define CHECK_ERROR(call) { \
hipError_t err = call; \
if (err != hipSuccess) { \
printf("%s in %s at line %d\n", hipGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
__global__
void matrixMult(const float * __restrict__ M, const float __restrict__ *N, float *P, int j, int k, int l)
{
__shared__ float Mds[TILE_SIZE][TILE_SIZE];
__shared__ float NdsOne[TILE_SIZE][TILE_SIZE];
__shared__ float NdsTwo[TILE_SIZE][TILE_SIZE];
float PvalOne = 0.0;
float PvalTwo = 0.0;
int bx = blockIdx.x * 2; int by = blockIdx.y;
int tx= threadIdx.x; int ty = threadIdx.y;
int Col = bx * blockDim.x + tx;
int Row = by * blockDim.y + ty;
#pragma unroll
for(int ph=0; ph<ceil(k/(float)TILE_SIZE); ph++)
{
Mds[ty][tx] = 0.0;
NdsOne[ty][tx] = 0.0;
NdsTwo[ty][tx] = 0.0;
__syncthreads();
if(Row < j && (ph * TILE_SIZE + ty) < k)
Mds[ty][tx] = M[row*k + TILE_SIZE * ph + tx];
if(Col < l && (ph * bx + tx) < k)
NdsOne[ty][tx] = N[(ty + ph * TILE_SIZE) * l) + Col];
if(Col + 1 < l && (ph * TILE_SIZE + tx) < k)
NdsTwo[ty][tx] = N[(ty + ph * TILE_SIZE) * l) + Col + TILE_WIDTH];
__syncthreads();
#pragma unroll
for(int k=0; k<TILE_SIZE; k++)
{
PvalOne += Mds[ty][k] * NdsOne[k][tx];
PvalTwo += Mds[ty][k] * NdsTwo[k][tx];
}
__syncthreads();
}
if(Row < j && Col < l)
P[Row * l + Col] = PvalOne;
if(Row < j && Col + TILE_SIZE < l)
P[Row * l + Col + TILE_SIZE] = PvalTwo;
}
__global__ void vecxvec_kernel(const float* __restrict__ d_m, const float* __restrict__ d_x, float * __restrict__ d_p,
const unsigned int nRows, const unsigned int nCols)
{
const unsigned int tid = blockDimx.x * blockIdx.x + threadIdx.x;
__shared__ float xds[TILE_SIZE];
float pval = 0.0;
#pragma unroll
for(unsigned int m = 0; m < ((nCols + TILE_SIZE -1)/TILE_SIZE); m++)
{
if((m * TILE_SIZE + threadIdx.x) < nCols)
{
xds[threadIdx.x] = d_x[threadIdx.x + m * TILE_SIZE];
} else xds[threadIdx.x] = 0.f;
}
__syncthreads();
if ((Row<Width) && (Col<Width)) p[tid] = Pvalue;
#pragma unroll
for(unsigned int e = 0; e < TILE_SIZE; e++)
{
pval += d_m[t + (e + TILE_SIZE *m) * nRows] * xds[e];
}
if ((Row<Width) && (Col<Width)) P[Row*Width+Col] = Pvalue;
}
__global__ void matvec_kernel(const float* __restrict__ d_M, const float* __restrict__ d_x, float * __restrict__ d_p,
const unsigned int nRows, const unsigned int nCols)
{
const unsigned int tid = blockDimx.x * blockIdx.x + threadIdx.x;
__shared__ float xds[TILE_SIZE];
float pval = 0.0;
#pragma unroll
for(unsigned int m = 0; m < ((nCols + TILE_SIZE -1)/TILE_SIZE); m++)
{
if((m * TILE_SIZE + threadIdx.x) < nCols)
{
xds[threadIdx.x] = d_x[threadIdx.x + m * TILE_SIZE];
} else xds[threadIdx.x] = 0.f;
__syncthreads();
#pragma unroll
for(unsigned int e = 0; e < TILE_SIZE; e++)
{
pval += d_M[tid + (e + TILE_SIZE *m) * nRows] * xds[e];
}
}
}
__global__ void transpose_kernel(float *odata, float *idata, int width, int height)
{
__shared__ float block[BLOCK_DIM][BLOCK_DIM+1];
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
void matrixVector(float* h_Matrix, float* h_Vec, float* h_Out, int in, int out)
{
float *d_Out;
float *d_Vec;
float *d_Matrix;
int sizeMatrix = in * out * sizeof(float);
int sizeInVec = in * sizeof(float);
int sizeOutVec = out * sizeof(float);
CHECK_ERROR(hipMalloc((**void)&d_Matrix, sizeMatrix));
CHECK_ERROR(hipMalloc((**void)&d_Vec, sizeInVec));
CHECK_ERROR(hipMalloc((**void)&d_Out, sizeOutVec));
cudaMemCpy(h_Matrix, d_Matrix, sizeMatrix, cudaMemCpyHostToDevice);
cudaMemCpy(h_Vec, d_Vec, sizeInVec, cudaMemCpyHostToDevice);
dim3 dimGrid(ceil(in/32.0), ceil(out/32.0), 1);
dim3 dimBlock(32.0, 32.0, 1);
hipLaunchKernelGGL(( matvec_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_Matrix, d_Vec, d_Out, in, out);
cudaMemCpy(d_Out, output, sizeOutVec, cudaMemCpyDeviceToHost);
hipFree(d_Out);
hipFree(d_Vec);
hipFree(d_Matrix);
}
void transpose( float* h_inMatrix, float* h_outMatrix, int in, int out)
{
float *d_inMatrix;
float *d_outMatrix;
int sizeMatrix = in * out * sizeof(float);
CHECK_ERROR(hipMalloc((**void)&d_inMatrix, sizeMatrix));
CHECK_ERROR(hipMalloc((**void)&d_outMatrix, sizeMatrix));
cudaMemCpy(h_inMatrix, d_inMatrix, sizeMatrix, cudaMemCpyHostToDevice);
dim3 dimGrid(ceil(cols/32.0), ceil(rows/32.0), 1);
dim3 dimBlock(32.0, 32.0, 1);
hipLaunchKernelGGL(( transpose_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_outMatrix, d_inMatrix, in, out);
cudaMemCpy(d_outMatrix, d_inMatrix, sizeMatrix, cudaMemCpyHostToDevice);
hipFree(d_inMatrix);
hipFree(d_outMatrix);
}
| nnKernel.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <math.h>
#include "linear.h"
#include "../include/nnKernel.cuh"
#define TILE_SIZE 32
#define CHECK_ERROR(call) { \
cudaError_t err = call; \
if (err != cudaSuccess) { \
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); \
exit(err); \
} \
}
__global__
void matrixMult(const float * __restrict__ M, const float __restrict__ *N, float *P, int j, int k, int l)
{
__shared__ float Mds[TILE_SIZE][TILE_SIZE];
__shared__ float NdsOne[TILE_SIZE][TILE_SIZE];
__shared__ float NdsTwo[TILE_SIZE][TILE_SIZE];
float PvalOne = 0.0;
float PvalTwo = 0.0;
int bx = blockIdx.x * 2; int by = blockIdx.y;
int tx= threadIdx.x; int ty = threadIdx.y;
int Col = bx * blockDim.x + tx;
int Row = by * blockDim.y + ty;
#pragma unroll
for(int ph=0; ph<ceil(k/(float)TILE_SIZE); ph++)
{
Mds[ty][tx] = 0.0;
NdsOne[ty][tx] = 0.0;
NdsTwo[ty][tx] = 0.0;
__syncthreads();
if(Row < j && (ph * TILE_SIZE + ty) < k)
Mds[ty][tx] = M[row*k + TILE_SIZE * ph + tx];
if(Col < l && (ph * bx + tx) < k)
NdsOne[ty][tx] = N[(ty + ph * TILE_SIZE) * l) + Col];
if(Col + 1 < l && (ph * TILE_SIZE + tx) < k)
NdsTwo[ty][tx] = N[(ty + ph * TILE_SIZE) * l) + Col + TILE_WIDTH];
__syncthreads();
#pragma unroll
for(int k=0; k<TILE_SIZE; k++)
{
PvalOne += Mds[ty][k] * NdsOne[k][tx];
PvalTwo += Mds[ty][k] * NdsTwo[k][tx];
}
__syncthreads();
}
if(Row < j && Col < l)
P[Row * l + Col] = PvalOne;
if(Row < j && Col + TILE_SIZE < l)
P[Row * l + Col + TILE_SIZE] = PvalTwo;
}
__global__ void vecxvec_kernel(const float* __restrict__ d_m, const float* __restrict__ d_x, float * __restrict__ d_p,
const unsigned int nRows, const unsigned int nCols)
{
const unsigned int tid = blockDimx.x * blockIdx.x + threadIdx.x;
__shared__ float xds[TILE_SIZE];
float pval = 0.0;
#pragma unroll
for(unsigned int m = 0; m < ((nCols + TILE_SIZE -1)/TILE_SIZE); m++)
{
if((m * TILE_SIZE + threadIdx.x) < nCols)
{
xds[threadIdx.x] = d_x[threadIdx.x + m * TILE_SIZE];
} else xds[threadIdx.x] = 0.f;
}
__syncthreads();
if ((Row<Width) && (Col<Width)) p[tid] = Pvalue;
#pragma unroll
for(unsigned int e = 0; e < TILE_SIZE; e++)
{
pval += d_m[t + (e + TILE_SIZE *m) * nRows] * xds[e];
}
if ((Row<Width) && (Col<Width)) P[Row*Width+Col] = Pvalue;
}
__global__ void matvec_kernel(const float* __restrict__ d_M, const float* __restrict__ d_x, float * __restrict__ d_p,
const unsigned int nRows, const unsigned int nCols)
{
const unsigned int tid = blockDimx.x * blockIdx.x + threadIdx.x;
__shared__ float xds[TILE_SIZE];
float pval = 0.0;
#pragma unroll
for(unsigned int m = 0; m < ((nCols + TILE_SIZE -1)/TILE_SIZE); m++)
{
if((m * TILE_SIZE + threadIdx.x) < nCols)
{
xds[threadIdx.x] = d_x[threadIdx.x + m * TILE_SIZE];
} else xds[threadIdx.x] = 0.f;
__syncthreads();
#pragma unroll
for(unsigned int e = 0; e < TILE_SIZE; e++)
{
pval += d_M[tid + (e + TILE_SIZE *m) * nRows] * xds[e];
}
}
}
__global__ void transpose_kernel(float *odata, float *idata, int width, int height)
{
__shared__ float block[BLOCK_DIM][BLOCK_DIM+1];
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
void matrixVector(float* h_Matrix, float* h_Vec, float* h_Out, int in, int out)
{
float *d_Out;
float *d_Vec;
float *d_Matrix;
int sizeMatrix = in * out * sizeof(float);
int sizeInVec = in * sizeof(float);
int sizeOutVec = out * sizeof(float);
CHECK_ERROR(cudaMalloc((**void)&d_Matrix, sizeMatrix));
CHECK_ERROR(cudaMalloc((**void)&d_Vec, sizeInVec));
CHECK_ERROR(cudaMalloc((**void)&d_Out, sizeOutVec));
cudaMemCpy(h_Matrix, d_Matrix, sizeMatrix, cudaMemCpyHostToDevice);
cudaMemCpy(h_Vec, d_Vec, sizeInVec, cudaMemCpyHostToDevice);
dim3 dimGrid(ceil(in/32.0), ceil(out/32.0), 1);
dim3 dimBlock(32.0, 32.0, 1);
matvec_kernel<<<dimGrid, dimBlock>>>(d_Matrix, d_Vec, d_Out, in, out);
cudaMemCpy(d_Out, output, sizeOutVec, cudaMemCpyDeviceToHost);
cudaFree(d_Out);
cudaFree(d_Vec);
cudaFree(d_Matrix);
}
void transpose( float* h_inMatrix, float* h_outMatrix, int in, int out)
{
float *d_inMatrix;
float *d_outMatrix;
int sizeMatrix = in * out * sizeof(float);
CHECK_ERROR(cudaMalloc((**void)&d_inMatrix, sizeMatrix));
CHECK_ERROR(cudaMalloc((**void)&d_outMatrix, sizeMatrix));
cudaMemCpy(h_inMatrix, d_inMatrix, sizeMatrix, cudaMemCpyHostToDevice);
dim3 dimGrid(ceil(cols/32.0), ceil(rows/32.0), 1);
dim3 dimBlock(32.0, 32.0, 1);
transpose_kernel<<<dimGrid, dimBlock>>>( d_outMatrix, d_inMatrix, in, out);
cudaMemCpy(d_outMatrix, d_inMatrix, sizeMatrix, cudaMemCpyHostToDevice);
cudaFree(d_inMatrix);
cudaFree(d_outMatrix);
}
|
93ce42b82052f9d7c51b72ac678bcaa225e8f5bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <ctime>
using namespace std;
__global__ void AddInts(int *a, int *b, int count){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < count){
a[id] += b[id];
}
}
int main(){
srand(time(NULL));
int count = 1000;
int *h_a = new int[count];
int *h_b = new int[count];
for (int i = 0; i < count; i++){
h_a[i] = rand() % 1000;
h_b[i] = rand() % 1000;
}
//Check Arrays (first 5)
for (int i = 0; i < 5; i++){
cout << "[" << h_a[i] << "]" << " "<< "[" << h_b[i] << "] " << endl;
}
int *d_a;
int *d_b;
//Allocate variables Memory onto GPU
if (hipMalloc(&d_a, sizeof(int)*count) != hipSuccess){
cout << "Allocation of d_a failed" << endl;
delete[] h_a;
delete[] h_b;
return 0;
}
if (hipMalloc(&d_b, sizeof(int)*count) != hipSuccess){
cout << "Allocation of d_b failed" << endl;
delete[] h_a;
delete[] h_b;
hipFree(d_a);
return 0;
}
//Copy variables to allocated memory on GPU
if (hipMemcpy(d_a, h_a, sizeof(int)*count, hipMemcpyHostToDevice) != hipSuccess){
cout << "Could not copy variables to GPU" << endl;
delete[] h_a;
delete[] h_b;
hipFree(d_a);
hipFree(d_b);
return 0;
}
if (hipMemcpy(d_b, h_b, sizeof(int) *count, hipMemcpyHostToDevice) != hipSuccess){
cout << "Could not copy variables to GPU" << endl;
delete[] h_a;
delete[] h_b;
hipFree(d_a);
hipFree(d_b);
return 0;
}
//Launch Kernel
AddInts << < count / 256 + 1, 256 >> > (d_a, d_b, count);
//Copy results back from GPU to HOST
if (hipMemcpy(h_a, d_a, sizeof(int)*count, hipMemcpyDeviceToHost) != hipSuccess){
delete[] h_a;
delete[] h_b;
hipFree(d_a);
hipFree(d_b);
cout << "Could not copy variables back" << endl;
return 0;
}
//Print first 5 Additions
for (int i = 0; i < 5; i++){
cout << h_a[i] << endl;
}
hipFree(d_a);
hipFree(d_b);
//Delete variables
delete[] h_a;
delete[] h_b;
return 0;
} | 93ce42b82052f9d7c51b72ac678bcaa225e8f5bf.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <ctime>
using namespace std;
__global__ void AddInts(int *a, int *b, int count){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < count){
a[id] += b[id];
}
}
int main(){
srand(time(NULL));
int count = 1000;
int *h_a = new int[count];
int *h_b = new int[count];
for (int i = 0; i < count; i++){
h_a[i] = rand() % 1000;
h_b[i] = rand() % 1000;
}
//Check Arrays (first 5)
for (int i = 0; i < 5; i++){
cout << "[" << h_a[i] << "]" << " "<< "[" << h_b[i] << "] " << endl;
}
int *d_a;
int *d_b;
//Allocate variables Memory onto GPU
if (cudaMalloc(&d_a, sizeof(int)*count) != cudaSuccess){
cout << "Allocation of d_a failed" << endl;
delete[] h_a;
delete[] h_b;
return 0;
}
if (cudaMalloc(&d_b, sizeof(int)*count) != cudaSuccess){
cout << "Allocation of d_b failed" << endl;
delete[] h_a;
delete[] h_b;
cudaFree(d_a);
return 0;
}
//Copy variables to allocated memory on GPU
if (cudaMemcpy(d_a, h_a, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess){
cout << "Could not copy variables to GPU" << endl;
delete[] h_a;
delete[] h_b;
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
if (cudaMemcpy(d_b, h_b, sizeof(int) *count, cudaMemcpyHostToDevice) != cudaSuccess){
cout << "Could not copy variables to GPU" << endl;
delete[] h_a;
delete[] h_b;
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
//Launch Kernel
AddInts << < count / 256 + 1, 256 >> > (d_a, d_b, count);
//Copy results back from GPU to HOST
if (cudaMemcpy(h_a, d_a, sizeof(int)*count, cudaMemcpyDeviceToHost) != cudaSuccess){
delete[] h_a;
delete[] h_b;
cudaFree(d_a);
cudaFree(d_b);
cout << "Could not copy variables back" << endl;
return 0;
}
//Print first 5 Additions
for (int i = 0; i < 5; i++){
cout << h_a[i] << endl;
}
cudaFree(d_a);
cudaFree(d_b);
//Delete variables
delete[] h_a;
delete[] h_b;
return 0;
} |
2a252a925f5b4ee5a9995bde505c317516dd35a3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
#include "matrix.h"
// CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "rocblas.h"
// GPUmat
#include "GPUmat.hh"
// static paramaters
static int init = 0;
static GPUmat *gm;
#include "cudaKernels.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if (nrhs!=4)
mexErrMsgTxt("Wrong number of arguments");
if (init == 0) {
// Initialize function
// mexLock();
// load GPUmat
gm = gmGetGPUmat();
init = 1;
}
/* mex parameters are:
0 Source term (that this applies the B operator to)
1 Destination term (that this stores the result in)
2 Precondition coefficient (one double)
3 Precondition term (Accumulates successive scaled B operations
*/
// Get GPU array pointers
GPUtype srcArray = gm->gputype.getGPUtype(prhs[0]);
GPUtype dstArray = gm->gputype.getGPUtype(prhs[1]);
GPUtype accArray = gm->gputype.getGPUtype(prhs[3]);
// Get some control variables sorted out
const int *dims = gm->gputype.getSize(srcArray);
dim3 gridsize;
gridsize.x = dims[0]/EDGEDIM_BOP;
gridsize.y = dims[1]/EDGEDIM_BOP;
gridsize.z = 1;
if(gridsize.x * EDGEDIM_BOP < dims[0]) gridsize.x++;
if(gridsize.y * EDGEDIM_BOP < dims[1]) gridsize.y++;
dim3 blocksize; blocksize.x = blocksize.y = EDGEDIM_BOP+2;
blocksize.z = 1;
int nx = dims[0];
int ny = dims[1];
int nz = dims[2];
hipLaunchKernelGGL(( Laplacian_B_OperatorKernel), dim3(gridsize), dim3(blocksize), 0, 0, (double*)gm->gputype.getGPUptr(srcArray), (double*)gm->gputype.getGPUptr(dstArray), *mxGetPr(prhs[2]), (double*)gm->gputype.getGPUptr(accArray), nx, ny, nz, 4);
}
| 2a252a925f5b4ee5a9995bde505c317516dd35a3.cu | #include <stdio.h>
#include <string.h>
#include <stdarg.h>
#ifdef UNIX
#include <stdint.h>
#include <unistd.h>
#endif
#include "mex.h"
#include "matrix.h"
// CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "cublas.h"
// GPUmat
#include "GPUmat.hh"
// static paramaters
static int init = 0;
static GPUmat *gm;
#include "cudaKernels.h"
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
// At least 2 arguments expected
// Input and result
if (nrhs!=4)
mexErrMsgTxt("Wrong number of arguments");
if (init == 0) {
// Initialize function
// mexLock();
// load GPUmat
gm = gmGetGPUmat();
init = 1;
}
/* mex parameters are:
0 Source term (that this applies the B operator to)
1 Destination term (that this stores the result in)
2 Precondition coefficient (one double)
3 Precondition term (Accumulates successive scaled B operations
*/
// Get GPU array pointers
GPUtype srcArray = gm->gputype.getGPUtype(prhs[0]);
GPUtype dstArray = gm->gputype.getGPUtype(prhs[1]);
GPUtype accArray = gm->gputype.getGPUtype(prhs[3]);
// Get some control variables sorted out
const int *dims = gm->gputype.getSize(srcArray);
dim3 gridsize;
gridsize.x = dims[0]/EDGEDIM_BOP;
gridsize.y = dims[1]/EDGEDIM_BOP;
gridsize.z = 1;
if(gridsize.x * EDGEDIM_BOP < dims[0]) gridsize.x++;
if(gridsize.y * EDGEDIM_BOP < dims[1]) gridsize.y++;
dim3 blocksize; blocksize.x = blocksize.y = EDGEDIM_BOP+2;
blocksize.z = 1;
int nx = dims[0];
int ny = dims[1];
int nz = dims[2];
Laplacian_B_OperatorKernel<<<gridsize, blocksize>>>((double*)gm->gputype.getGPUptr(srcArray), (double*)gm->gputype.getGPUptr(dstArray), *mxGetPr(prhs[2]), (double*)gm->gputype.getGPUptr(accArray), nx, ny, nz, 4);
}
|
5391f865c0b4ebf563eef10dbd545e92147e9e40.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <cfloat>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <rocblas.h>
#include <cusolverDn.h>
#include "struct.h"
#include "constants.h"
void assignObjgrdStructMemory(long long &, fcndata &, double *);
void objgrd(double *, double *, double *, fcndata &);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
mxInitGPU();
fcndata fcnObj = {0};
mxGPUArray const *ctlGrpStk;
mxGPUArray const *cenIniMat, *difIniMat, *grpNdeVec, *grpIfoMat, *wgtGrpVec, *tgtNdeMat;
mxGPUArray *grdGrpVec;
ctlGrpStk = mxGPUCreateFromMxArray(prhs[ 0]);
cenIniMat = mxGPUCreateFromMxArray(prhs[ 1]);
difIniMat = mxGPUCreateFromMxArray(prhs[ 2]);
grpNdeVec = mxGPUCreateFromMxArray(prhs[ 3]);
grpIfoMat = mxGPUCreateFromMxArray(prhs[ 4]);
wgtGrpVec = mxGPUCreateFromMxArray(prhs[ 5]);
tgtNdeMat = mxGPUCreateFromMxArray(prhs[ 6]);
fcnObj.prm.knlOrder = mxGetScalar(prhs[ 7]);
fcnObj.prm.knlWidth = mxGetScalar(prhs[ 8]);
fcnObj.prm.knlEps = mxGetScalar(prhs[ 9]);
fcnObj.prm.timeStp = mxGetScalar(prhs[10]);
fcnObj.prm.timeNum = mxGetScalar(prhs[11]);
fcnObj.prm.tgtWgt = mxGetScalar(prhs[12]);
fcnObj.prm.rgdGrpNum = mxGPUGetNumberOfElements(wgtGrpVec);
mwSize const ndim = 1;
mwSize const dims[1] = {(mwSize) fcnObj.prm.rgdGrpNum * RGDDOF * (fcnObj.prm.timeNum - 1)};
grdGrpVec = mxGPUCreateGPUArray(ndim, dims, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
// ---
double *d_ctlGrpStk = (double *) mxGPUGetDataReadOnly(ctlGrpStk);
fcnObj.prm.d_cenIniMat = (double *) mxGPUGetDataReadOnly(cenIniMat);
fcnObj.prm.d_difIniMat = (double *) mxGPUGetDataReadOnly(difIniMat);
fcnObj.prm.d_grpNdeVec = (int *) mxGPUGetDataReadOnly(grpNdeVec);
fcnObj.prm.d_grpIfoMat = (int *) mxGPUGetDataReadOnly(grpIfoMat);
fcnObj.prm.d_wgtGrpVec = (double *) mxGPUGetDataReadOnly(wgtGrpVec);
fcnObj.tgt.d_tgtNdeMat = (double *) mxGPUGetDataReadOnly(tgtNdeMat);
double *d_grdGrpVec = (double *) mxGPUGetData(grdGrpVec);
fcnObj.prm.rgdNdeNum = mxGPUGetNumberOfElements(grpNdeVec);
// ---
int rgdGrpNum = fcnObj.prm.rgdGrpNum;
int rgdNdeNum = fcnObj.prm.rgdNdeNum;
int timeNum = fcnObj.prm.timeNum;
long long gpuAloDblMemCnt = rgdGrpNum * (timeNum - 1)
+ rgdNdeNum * ( rgdNdeNum * 2 + DIMNUM + DIMNUM * timeNum + DIMNUM * (timeNum - 1) * 2
+ RGDDOF * (timeNum - 1) + RGDDOF * timeNum)
+ rgdNdeNum * (DIMNUM + RGDDOF * (timeNum - 1) + RGDDOF * 5)
+ SUMBLKDIM;
double *gpuDblSpace;
hipError_t error = hipMalloc((void **) &gpuDblSpace, sizeof(double) * gpuAloDblMemCnt);
if ( error != hipSuccess )
mexErrMsgIdAndTxt("objgrd3D:hipMalloc", "Fail to allocate device memory.");
hipMalloc((void **) &(fcnObj.d_status), sizeof(int));
long long gpuAsgDblMemCnt;
assignObjgrdStructMemory(gpuAsgDblMemCnt, fcnObj, gpuDblSpace);
if ( gpuAsgDblMemCnt != gpuAloDblMemCnt )
{
mexErrMsgIdAndTxt("objgrd3D:memAssign",
"Assigned device double memory (%lld) mismatches the allocated memory (%lld).",
gpuAsgDblMemCnt, gpuAloDblMemCnt);
}
// ---
hipblasCreate(&(fcnObj.blasHdl));
hipsolverDnCreate(&(fcnObj.solvHdl));
hipsolverDnDpotrf_bufferSize(fcnObj.solvHdl, HIPBLAS_FILL_MODE_LOWER,
fcnObj.prm.rgdNdeNum, fcnObj.d_rgdKnlMat,
fcnObj.prm.rgdNdeNum, &(fcnObj.h_Lwork));
hipMalloc((void **) &(fcnObj.d_workspace), sizeof(double) * fcnObj.h_Lwork);
// ---
double h_objVal;
objgrd(&h_objVal, d_grdGrpVec, d_ctlGrpStk, fcnObj);
plhs[0] = mxCreateDoubleScalar(h_objVal);
plhs[1] = mxGPUCreateMxArrayOnGPU(grdGrpVec);
// ---
//
mxGPUDestroyGPUArray(ctlGrpStk);
mxGPUDestroyGPUArray(cenIniMat);
mxGPUDestroyGPUArray(difIniMat);
mxGPUDestroyGPUArray(grpNdeVec);
mxGPUDestroyGPUArray(grpIfoMat);
mxGPUDestroyGPUArray(wgtGrpVec);
mxGPUDestroyGPUArray(tgtNdeMat);
mxGPUDestroyGPUArray(grdGrpVec);
hipFree(gpuDblSpace);
hipFree(fcnObj.d_status);
hipFree(fcnObj.d_workspace);
hipblasDestroy(fcnObj.blasHdl);
hipsolverDnDestroy(fcnObj.solvHdl);
return;
}
| 5391f865c0b4ebf563eef10dbd545e92147e9e40.cu | #include <cstdio>
#include <cstdlib>
#include <cfloat>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "struct.h"
#include "constants.h"
void assignObjgrdStructMemory(long long &, fcndata &, double *);
void objgrd(double *, double *, double *, fcndata &);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
mxInitGPU();
fcndata fcnObj = {0};
mxGPUArray const *ctlGrpStk;
mxGPUArray const *cenIniMat, *difIniMat, *grpNdeVec, *grpIfoMat, *wgtGrpVec, *tgtNdeMat;
mxGPUArray *grdGrpVec;
ctlGrpStk = mxGPUCreateFromMxArray(prhs[ 0]);
cenIniMat = mxGPUCreateFromMxArray(prhs[ 1]);
difIniMat = mxGPUCreateFromMxArray(prhs[ 2]);
grpNdeVec = mxGPUCreateFromMxArray(prhs[ 3]);
grpIfoMat = mxGPUCreateFromMxArray(prhs[ 4]);
wgtGrpVec = mxGPUCreateFromMxArray(prhs[ 5]);
tgtNdeMat = mxGPUCreateFromMxArray(prhs[ 6]);
fcnObj.prm.knlOrder = mxGetScalar(prhs[ 7]);
fcnObj.prm.knlWidth = mxGetScalar(prhs[ 8]);
fcnObj.prm.knlEps = mxGetScalar(prhs[ 9]);
fcnObj.prm.timeStp = mxGetScalar(prhs[10]);
fcnObj.prm.timeNum = mxGetScalar(prhs[11]);
fcnObj.prm.tgtWgt = mxGetScalar(prhs[12]);
fcnObj.prm.rgdGrpNum = mxGPUGetNumberOfElements(wgtGrpVec);
mwSize const ndim = 1;
mwSize const dims[1] = {(mwSize) fcnObj.prm.rgdGrpNum * RGDDOF * (fcnObj.prm.timeNum - 1)};
grdGrpVec = mxGPUCreateGPUArray(ndim, dims, mxDOUBLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE);
// ---
double *d_ctlGrpStk = (double *) mxGPUGetDataReadOnly(ctlGrpStk);
fcnObj.prm.d_cenIniMat = (double *) mxGPUGetDataReadOnly(cenIniMat);
fcnObj.prm.d_difIniMat = (double *) mxGPUGetDataReadOnly(difIniMat);
fcnObj.prm.d_grpNdeVec = (int *) mxGPUGetDataReadOnly(grpNdeVec);
fcnObj.prm.d_grpIfoMat = (int *) mxGPUGetDataReadOnly(grpIfoMat);
fcnObj.prm.d_wgtGrpVec = (double *) mxGPUGetDataReadOnly(wgtGrpVec);
fcnObj.tgt.d_tgtNdeMat = (double *) mxGPUGetDataReadOnly(tgtNdeMat);
double *d_grdGrpVec = (double *) mxGPUGetData(grdGrpVec);
fcnObj.prm.rgdNdeNum = mxGPUGetNumberOfElements(grpNdeVec);
// ---
int rgdGrpNum = fcnObj.prm.rgdGrpNum;
int rgdNdeNum = fcnObj.prm.rgdNdeNum;
int timeNum = fcnObj.prm.timeNum;
long long gpuAloDblMemCnt = rgdGrpNum * (timeNum - 1)
+ rgdNdeNum * ( rgdNdeNum * 2 + DIMNUM + DIMNUM * timeNum + DIMNUM * (timeNum - 1) * 2
+ RGDDOF * (timeNum - 1) + RGDDOF * timeNum)
+ rgdNdeNum * (DIMNUM + RGDDOF * (timeNum - 1) + RGDDOF * 5)
+ SUMBLKDIM;
double *gpuDblSpace;
cudaError_t error = cudaMalloc((void **) &gpuDblSpace, sizeof(double) * gpuAloDblMemCnt);
if ( error != cudaSuccess )
mexErrMsgIdAndTxt("objgrd3D:cudaMalloc", "Fail to allocate device memory.");
cudaMalloc((void **) &(fcnObj.d_status), sizeof(int));
long long gpuAsgDblMemCnt;
assignObjgrdStructMemory(gpuAsgDblMemCnt, fcnObj, gpuDblSpace);
if ( gpuAsgDblMemCnt != gpuAloDblMemCnt )
{
mexErrMsgIdAndTxt("objgrd3D:memAssign",
"Assigned device double memory (%lld) mismatches the allocated memory (%lld).",
gpuAsgDblMemCnt, gpuAloDblMemCnt);
}
// ---
cublasCreate(&(fcnObj.blasHdl));
cusolverDnCreate(&(fcnObj.solvHdl));
cusolverDnDpotrf_bufferSize(fcnObj.solvHdl, CUBLAS_FILL_MODE_LOWER,
fcnObj.prm.rgdNdeNum, fcnObj.d_rgdKnlMat,
fcnObj.prm.rgdNdeNum, &(fcnObj.h_Lwork));
cudaMalloc((void **) &(fcnObj.d_workspace), sizeof(double) * fcnObj.h_Lwork);
// ---
double h_objVal;
objgrd(&h_objVal, d_grdGrpVec, d_ctlGrpStk, fcnObj);
plhs[0] = mxCreateDoubleScalar(h_objVal);
plhs[1] = mxGPUCreateMxArrayOnGPU(grdGrpVec);
// ---
//
mxGPUDestroyGPUArray(ctlGrpStk);
mxGPUDestroyGPUArray(cenIniMat);
mxGPUDestroyGPUArray(difIniMat);
mxGPUDestroyGPUArray(grpNdeVec);
mxGPUDestroyGPUArray(grpIfoMat);
mxGPUDestroyGPUArray(wgtGrpVec);
mxGPUDestroyGPUArray(tgtNdeMat);
mxGPUDestroyGPUArray(grdGrpVec);
cudaFree(gpuDblSpace);
cudaFree(fcnObj.d_status);
cudaFree(fcnObj.d_workspace);
cublasDestroy(fcnObj.blasHdl);
cusolverDnDestroy(fcnObj.solvHdl);
return;
}
|
e92d311eab5126f527f9fd27751f10c38605793e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include <getopt.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
#define NUM_THREAD 256 // Number of thread blocks
#define print(x) printf("%d",x)
float *matrixMul_float_serial(float vector1[], float vector2[], int size);
float *matrixMul_float_parallel(float vector1[], float vector2[], int size, int thread_count);
float *matrixMul_float_cuda(float* vector1, float* vector2, int num);
double *matrixMul_double_serial(double vector1[], double vector2[], int size);
double *matrixMul_double_parallel(double vector1[], double vector2[], int size, int thread_count);
double *matrixMul_double_cuda(double* vector1, double* vector2, int num);
double doubleGen();
float floatGen();
void operations(int size, int parallel, int serial, int cuda, int verify, int thread_count);
void print_results_float( int size, double time_spent);
void print_results_double( int size, double time_spent);
double verifyVectord(double *vector1, double *vector2, int size);
float verifyVectorf(float *vector1, float *vector2, int size);
__global__ void matMul_CUDA_double(double *sum, int size, double *vector1, double *vector2){
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
int k;
if(idx < size*size){
for(k=0; k< size; k++){
sum[idx] += (*(vector1+(idx-(idx % size)+k))) * (*(vector2+(k*size+(idx % size))));
}
}
}
__global__ void matMul_CUDA_float(float *sum, int size, float *vector1, float *vector2){
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
int k;
if(idx < size*size){
for(k=0; k< size; k++){
sum[idx] += (*(vector1+(idx-(idx % size)+k))) * (*(vector2+(k*size+(idx % size))));
}
}
}
int main(int argc, char **argv){
int parallel = 0;
int serial = 0;
int cuda = 0;
int verify = 0;
int thread_count = 2;
int c;
int size=10;
while((c = getopt(argc, argv, "scp:vn:")) != -1){
switch(c){
case 'p':
parallel = 1;
thread_count = atoi(optarg);
if(thread_count > 8 || thread_count < 2){
printf("Invalid Number of threads\nThread number is set to 2\n");
thread_count = 2;
}
break;
case 'n':
size = atoi(optarg);
if(size > 100000 || size < 1){
size = 10;
}
break;
case 's':
serial = 1;
break;
case 'c':
cuda = 1;
break;
case 'v':
verify = 1;
break;
case '?':
if(optopt == 'p'){
printf("Number of threads missing\nThread number set to 8");
thread_count = 8;
}else{
printf("Unknown option selected\nProgram exited\n");
return 0;
}
break;
default:
abort();
}
}
operations(size, parallel, serial, cuda, verify, thread_count);
}
void operations(int size, int parallel, int serial, int cuda, int verify, int thread_count){
int j;
int t_size;
clock_t begin, end;
double time_spent_serial, time_spent_parallel, time_spent_cuda;
srand(time(NULL));
t_size = size*size;
float *vector1;
vector1 = (float*) malloc(t_size * sizeof(float));
float *vector2;
vector2 = (float*) malloc(t_size * sizeof(float));
float *ans_fserial;
float *ans_fparallel;
float *ans_fcuda;
for(j=0; j < t_size; j++){
*(vector1+j) = floatGen();
*(vector2+j) = floatGen();
}
printf("===================================================================\n");
printf("\tVector Initialization is completed\n");
if(serial || verify){
printf("Run Serial\n");
begin = clock();
ans_fserial = matrixMul_float_serial(vector1,vector2,size);
end = clock();
time_spent_serial = (double)(end - begin)/ CLOCKS_PER_SEC;
print_results_float( size, time_spent_serial);
}
if(parallel){
printf("Run Parallel\n");
begin = clock();
ans_fparallel = matrixMul_float_parallel(vector1,vector2,size,thread_count);
end = clock();
time_spent_parallel = (double)(end - begin)/ CLOCKS_PER_SEC;
print_results_float( size, time_spent_parallel);
}
if(cuda){
printf("Run CUDA\n");
begin = clock();
ans_fcuda = matrixMul_float_cuda(vector1,vector2,size);
end = clock();
time_spent_cuda = (double)(end - begin)/ CLOCKS_PER_SEC;
print_results_float( size, time_spent_cuda);
}
if(verify){
float error;
double confident = 4*size / 10000;
printf("===============================Single Precision====================================\n");
if(parallel){
error = verifyVectorf(ans_fserial, ans_fparallel, t_size);
if(error > confident ){
printf("======================Paralle vs Serial=================================\n");
printf("Significant difference between parallal with %d threads and serial\n", thread_count);
}else{
printf("======================Paralle vs Serial=================================\n");
printf("No significant difference between parallal with %d threads and serial\n", thread_count);
}
printf("Error : %.20f\n",error);
}
if(cuda){
error = verifyVectorf(ans_fserial, ans_fcuda, t_size);
if(error > confident ){
printf("======================Cuda vs Serial=================================\n");
printf("Significant difference between cuda and serial\n");
}else{
printf("======================Cuda vs Serial=================================\n");
printf("No significant difference between cuda and serial\n");
}
printf("Error : %.20f\n",error);
}
}
free(vector1);
free(vector2);
if(serial || verify)
free(ans_fserial);
if(parallel)
free(ans_fparallel);
if(cuda)
free(ans_fcuda);
double *vector3;
vector3 = (double*) malloc(t_size * sizeof(double));
double *vector4;
vector4 = (double*) malloc(t_size * sizeof(double));
double *ans_dserial;
double *ans_dparallel;
double *ans_dcuda;
for(j=0; j < t_size; j++){
*(vector3+j) = doubleGen();
*(vector4+j) = doubleGen();
}
if(serial || verify){
printf("Run Serial\n");
begin = clock();
ans_dserial = matrixMul_double_serial(vector3,vector4,size);
end = clock();
time_spent_serial = (double)(end - begin)/ CLOCKS_PER_SEC;
print_results_double( size, time_spent_serial);
}
if(parallel){
printf("Run Parallel\n");
begin = clock();
ans_dparallel = matrixMul_double_parallel(vector3,vector4,size,thread_count);
end = clock();
time_spent_parallel = (double)(end - begin)/ CLOCKS_PER_SEC;
print_results_double( size, time_spent_parallel);
}
if(cuda){
printf("Run CUDA\n");
begin = clock();
ans_dcuda = matrixMul_double_cuda(vector3,vector4,size);
end = clock();
time_spent_cuda = (double)(end - begin)/ CLOCKS_PER_SEC;
print_results_double( size, time_spent_cuda);
}
if(verify){
double error_d;
double confident_d = 4*size / 10000;
printf("===============================Double Precision====================================\n");
if(parallel){
error_d = verifyVectord(ans_dserial, ans_dparallel, t_size);
if(error_d > confident_d ){
printf("======================Paralle vs Serial=================================\n");
printf("Significant difference between parallal with %d threads and serial\n", thread_count);
}else{
printf("======================Paralle vs Serial=================================\n");
printf("No significant difference between parallal with %d threads and serial\n", thread_count);
}
printf("Error : %.20lf\n",error_d);
}
if(cuda){
error_d = verifyVectord(ans_dserial, ans_dcuda, t_size);
if(error_d > confident_d ){
printf("======================Cuda vs Serial=================================\n");
printf("Significant difference between cuda and serial\n");
}else{
printf("======================Cuda vs Serial=================================\n");
printf("No significant difference between cuda and serial\n");
}
printf("Error : %.20lf\n",error_d);
}
}
free(vector3);
free(vector4);
if(serial || verify)
free(ans_dserial);
if(parallel)
free(ans_dparallel);
if(cuda)
free(ans_dcuda);
}
void print_results_float( int size, double time_spent){
printf("Single Precision Time Spent : %lf\n\n",time_spent);
}
void print_results_double( int size, double time_spent){
printf("Double Precision Time Spent : %lf\n\n",time_spent);
}
/*
* Verify the answer matrix
*/
float verifyVectorf(float *vector1, float *vector2, int size){
float error = 0;
int i;
for(i = 0; i<size; i++){
error += abs(vector1[i] - vector2[i]);
}
error /= size;
return error;
}
double verifyVectord(double *vector1, double *vector2, int size){
double error = 0;
int i;
for(i = 0; i<size; i++){
error += abs(vector1[i] - vector2[i]);
}
error /= size;
return error;
}
/*
* Sequential Matrix Multiplication
*/
float *matrixMul_float_serial(float* vector1, float* vector2, int size){
float sum = 0.0;
int i,j,k;
float* ans = (float*) malloc(size * size * sizeof(float));
for(i=0; i < size; i++){
for(j=0; j < size; j++){
sum = 0;
for(k=0; k< size; k++){
sum += (*(vector1+(i*size+k))) * (*(vector2+(k*size+j)));
}
ans[i*size+j] = sum;
}
}
return ans;
}
double *matrixMul_double_serial(double* vector1, double* vector2, int size){
double sum = 0.0;
int i,j,k;
double* ans = (double*) malloc(size * size * sizeof(double));
for(i=0; i < size; i++){
for(j=0; j < size; j++){
sum = 0;
for(k=0; k< size; k++){
sum += (*(vector1+(i*size+k))) * (*(vector2+(k*size+j)));
}
ans[i*size+j] = sum;
}
}
return ans;
}
/*
* OMP Thread parallel matrix multiplication
*/
float *matrixMul_float_parallel(float* vector1, float* vector2, int size, int thread_count){
float sum = 0.0;
int i,j,k;
float* ans = (float*) malloc(size * size * sizeof(float));
#pragma omp parallel num_threads(thread_count) private(i,j,k) shared(vector1, vector2, ans)
{
#pragma omp for schedule(static) reduction(+:sum)
for(i=0; i < size; i++){
for(j=0; j < size; j++){
sum = 0;
for(k=0; k< size; k++){
sum = sum + (*(vector1+(i*size+k))) * (*(vector2+(k*size+j)));
}
ans[i*size+j] = sum;
}
}
}
return ans;
}
double *matrixMul_double_parallel(double* vector1, double* vector2, int size, int thread_count){
double sum = 0.0;
int i,j,k;
double* ans = (double*) malloc(size * size * sizeof(double));
#pragma omp parallel num_threads(thread_count) private(i,j,k) shared(vector1, vector2, ans)
{
#pragma omp for schedule(static) reduction(+:sum)
for(i=0; i < size; i++){
for(j=0; j < size; j++){
sum = 0;
for(k=0; k< size; k++){
sum = sum + (*(vector1+(i*size+k))) * (*(vector2+(k*size+j)));
}
ans[i*size+j] = sum;
}
}
}
return ans;
}
/*
* CUDA GPU Matrix Multiplication
*/
float *matrixMul_float_cuda(float* vector1, float* vector2, int num){
int num_block = (num*num + NUM_THREAD - 1)/(NUM_THREAD); //print(358);
size_t size = num*num*sizeof(float); //Array memory size.
float *sumHost, *sumDev; // Pointer to host & device arrays
float *vector1_device;
float *vector2_device;
sumHost = (float *)malloc(size); // Allocate array on host
hipMalloc((void **) &sumDev, size); // Allocate array on device
hipMalloc((void **) &vector1_device, size); // Allocate array on device
hipMalloc((void **) &vector2_device, size); // Allocate array on device
// Initialize array in device to 0
hipMemset(sumDev, 0, size);//print(370);
hipMemcpy(vector1_device, vector1, size, hipMemcpyHostToDevice); //print(371);
hipMemcpy(vector2_device, vector2, size, hipMemcpyHostToDevice); //print(372);
// Do calculation on device
hipLaunchKernelGGL(( matMul_CUDA_float) , dim3(num_block), dim3(NUM_THREAD), 0, 0, sumDev, num, vector1_device, vector2_device); // call CUDA kernel
// Retrieve result from device and store it in host array
hipMemcpy(sumHost, sumDev, size, hipMemcpyDeviceToHost);//print(378);
// Cleanup
hipFree(sumDev);
hipFree(vector1_device);
hipFree(vector2_device);
return sumHost;
}
double *matrixMul_double_cuda(double* vector1, double* vector2, int num){
int num_block = (num*num + NUM_THREAD - 1)/(NUM_THREAD);
size_t size = num*num*sizeof(double); //Array memory size.
double *sumHost, *sumDev; // Pointer to host & device arrays
double *vector1_device;
double *vector2_device;
sumHost = (double *)malloc(size); // Allocate array on host
hipMalloc((void **) &sumDev, size); // Allocate array on device
hipMalloc((void **) &vector1_device, size); // Allocate array on device
hipMalloc((void **) &vector2_device, size); // Allocate array on device
// Initialize array in device to 0
hipMemset(sumDev, 0, size);
hipMemcpy(vector1_device, vector1, size, hipMemcpyHostToDevice);
hipMemcpy(vector2_device, vector2, size, hipMemcpyHostToDevice);
// Do calculation on device
hipLaunchKernelGGL(( matMul_CUDA_double) , dim3(num_block), dim3(NUM_THREAD), 0, 0, sumDev, num, vector1_device, vector2_device); // call CUDA kernel
// Retrieve result from device and store it in host array
hipMemcpy(sumHost, sumDev, size, hipMemcpyDeviceToHost);
// Cleanup
hipFree(sumDev);
hipFree(vector1_device);
hipFree(vector2_device);
return sumHost;
}
/*
* Random Number generator
*/
float floatGen(){
float num ;
num = 1.0 * random() / RAND_MAX + 1.0;
return num;
}
double doubleGen(){
double num;
num = 1.0 * random() / RAND_MAX + 1.0;
return num;
}
| e92d311eab5126f527f9fd27751f10c38605793e.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <omp.h>
#include <getopt.h>
#include <unistd.h>
#include <cuda.h>
#define NUM_THREAD 256 // Number of thread blocks
#define print(x) printf("%d",x)
float *matrixMul_float_serial(float vector1[], float vector2[], int size);
float *matrixMul_float_parallel(float vector1[], float vector2[], int size, int thread_count);
float *matrixMul_float_cuda(float* vector1, float* vector2, int num);
double *matrixMul_double_serial(double vector1[], double vector2[], int size);
double *matrixMul_double_parallel(double vector1[], double vector2[], int size, int thread_count);
double *matrixMul_double_cuda(double* vector1, double* vector2, int num);
double doubleGen();
float floatGen();
void operations(int size, int parallel, int serial, int cuda, int verify, int thread_count);
void print_results_float( int size, double time_spent);
void print_results_double( int size, double time_spent);
double verifyVectord(double *vector1, double *vector2, int size);
float verifyVectorf(float *vector1, float *vector2, int size);
__global__ void matMul_CUDA_double(double *sum, int size, double *vector1, double *vector2){
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
int k;
if(idx < size*size){
for(k=0; k< size; k++){
sum[idx] += (*(vector1+(idx-(idx % size)+k))) * (*(vector2+(k*size+(idx % size))));
}
}
}
__global__ void matMul_CUDA_float(float *sum, int size, float *vector1, float *vector2){
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
int k;
if(idx < size*size){
for(k=0; k< size; k++){
sum[idx] += (*(vector1+(idx-(idx % size)+k))) * (*(vector2+(k*size+(idx % size))));
}
}
}
int main(int argc, char **argv){
int parallel = 0;
int serial = 0;
int cuda = 0;
int verify = 0;
int thread_count = 2;
int c;
int size=10;
while((c = getopt(argc, argv, "scp:vn:")) != -1){
switch(c){
case 'p':
parallel = 1;
thread_count = atoi(optarg);
if(thread_count > 8 || thread_count < 2){
printf("Invalid Number of threads\nThread number is set to 2\n");
thread_count = 2;
}
break;
case 'n':
size = atoi(optarg);
if(size > 100000 || size < 1){
size = 10;
}
break;
case 's':
serial = 1;
break;
case 'c':
cuda = 1;
break;
case 'v':
verify = 1;
break;
case '?':
if(optopt == 'p'){
printf("Number of threads missing\nThread number set to 8");
thread_count = 8;
}else{
printf("Unknown option selected\nProgram exited\n");
return 0;
}
break;
default:
abort();
}
}
operations(size, parallel, serial, cuda, verify, thread_count);
}
void operations(int size, int parallel, int serial, int cuda, int verify, int thread_count){
int j;
int t_size;
clock_t begin, end;
double time_spent_serial, time_spent_parallel, time_spent_cuda;
srand(time(NULL));
t_size = size*size;
float *vector1;
vector1 = (float*) malloc(t_size * sizeof(float));
float *vector2;
vector2 = (float*) malloc(t_size * sizeof(float));
float *ans_fserial;
float *ans_fparallel;
float *ans_fcuda;
for(j=0; j < t_size; j++){
*(vector1+j) = floatGen();
*(vector2+j) = floatGen();
}
printf("===================================================================\n");
printf("\tVector Initialization is completed\n");
if(serial || verify){
printf("Run Serial\n");
begin = clock();
ans_fserial = matrixMul_float_serial(vector1,vector2,size);
end = clock();
time_spent_serial = (double)(end - begin)/ CLOCKS_PER_SEC;
print_results_float( size, time_spent_serial);
}
if(parallel){
printf("Run Parallel\n");
begin = clock();
ans_fparallel = matrixMul_float_parallel(vector1,vector2,size,thread_count);
end = clock();
time_spent_parallel = (double)(end - begin)/ CLOCKS_PER_SEC;
print_results_float( size, time_spent_parallel);
}
if(cuda){
printf("Run CUDA\n");
begin = clock();
ans_fcuda = matrixMul_float_cuda(vector1,vector2,size);
end = clock();
time_spent_cuda = (double)(end - begin)/ CLOCKS_PER_SEC;
print_results_float( size, time_spent_cuda);
}
if(verify){
float error;
double confident = 4*size / 10000;
printf("===============================Single Precision====================================\n");
if(parallel){
error = verifyVectorf(ans_fserial, ans_fparallel, t_size);
if(error > confident ){
printf("======================Paralle vs Serial=================================\n");
printf("Significant difference between parallal with %d threads and serial\n", thread_count);
}else{
printf("======================Paralle vs Serial=================================\n");
printf("No significant difference between parallal with %d threads and serial\n", thread_count);
}
printf("Error : %.20f\n",error);
}
if(cuda){
error = verifyVectorf(ans_fserial, ans_fcuda, t_size);
if(error > confident ){
printf("======================Cuda vs Serial=================================\n");
printf("Significant difference between cuda and serial\n");
}else{
printf("======================Cuda vs Serial=================================\n");
printf("No significant difference between cuda and serial\n");
}
printf("Error : %.20f\n",error);
}
}
free(vector1);
free(vector2);
if(serial || verify)
free(ans_fserial);
if(parallel)
free(ans_fparallel);
if(cuda)
free(ans_fcuda);
double *vector3;
vector3 = (double*) malloc(t_size * sizeof(double));
double *vector4;
vector4 = (double*) malloc(t_size * sizeof(double));
double *ans_dserial;
double *ans_dparallel;
double *ans_dcuda;
for(j=0; j < t_size; j++){
*(vector3+j) = doubleGen();
*(vector4+j) = doubleGen();
}
if(serial || verify){
printf("Run Serial\n");
begin = clock();
ans_dserial = matrixMul_double_serial(vector3,vector4,size);
end = clock();
time_spent_serial = (double)(end - begin)/ CLOCKS_PER_SEC;
print_results_double( size, time_spent_serial);
}
if(parallel){
printf("Run Parallel\n");
begin = clock();
ans_dparallel = matrixMul_double_parallel(vector3,vector4,size,thread_count);
end = clock();
time_spent_parallel = (double)(end - begin)/ CLOCKS_PER_SEC;
print_results_double( size, time_spent_parallel);
}
if(cuda){
printf("Run CUDA\n");
begin = clock();
ans_dcuda = matrixMul_double_cuda(vector3,vector4,size);
end = clock();
time_spent_cuda = (double)(end - begin)/ CLOCKS_PER_SEC;
print_results_double( size, time_spent_cuda);
}
if(verify){
double error_d;
double confident_d = 4*size / 10000;
printf("===============================Double Precision====================================\n");
if(parallel){
error_d = verifyVectord(ans_dserial, ans_dparallel, t_size);
if(error_d > confident_d ){
printf("======================Paralle vs Serial=================================\n");
printf("Significant difference between parallal with %d threads and serial\n", thread_count);
}else{
printf("======================Paralle vs Serial=================================\n");
printf("No significant difference between parallal with %d threads and serial\n", thread_count);
}
printf("Error : %.20lf\n",error_d);
}
if(cuda){
error_d = verifyVectord(ans_dserial, ans_dcuda, t_size);
if(error_d > confident_d ){
printf("======================Cuda vs Serial=================================\n");
printf("Significant difference between cuda and serial\n");
}else{
printf("======================Cuda vs Serial=================================\n");
printf("No significant difference between cuda and serial\n");
}
printf("Error : %.20lf\n",error_d);
}
}
free(vector3);
free(vector4);
if(serial || verify)
free(ans_dserial);
if(parallel)
free(ans_dparallel);
if(cuda)
free(ans_dcuda);
}
void print_results_float( int size, double time_spent){
printf("Single Precision Time Spent : %lf\n\n",time_spent);
}
void print_results_double( int size, double time_spent){
printf("Double Precision Time Spent : %lf\n\n",time_spent);
}
/*
* Verify the answer matrix
*/
float verifyVectorf(float *vector1, float *vector2, int size){
float error = 0;
int i;
for(i = 0; i<size; i++){
error += abs(vector1[i] - vector2[i]);
}
error /= size;
return error;
}
double verifyVectord(double *vector1, double *vector2, int size){
double error = 0;
int i;
for(i = 0; i<size; i++){
error += abs(vector1[i] - vector2[i]);
}
error /= size;
return error;
}
/*
* Sequential Matrix Multiplication
*/
float *matrixMul_float_serial(float* vector1, float* vector2, int size){
float sum = 0.0;
int i,j,k;
float* ans = (float*) malloc(size * size * sizeof(float));
for(i=0; i < size; i++){
for(j=0; j < size; j++){
sum = 0;
for(k=0; k< size; k++){
sum += (*(vector1+(i*size+k))) * (*(vector2+(k*size+j)));
}
ans[i*size+j] = sum;
}
}
return ans;
}
double *matrixMul_double_serial(double* vector1, double* vector2, int size){
double sum = 0.0;
int i,j,k;
double* ans = (double*) malloc(size * size * sizeof(double));
for(i=0; i < size; i++){
for(j=0; j < size; j++){
sum = 0;
for(k=0; k< size; k++){
sum += (*(vector1+(i*size+k))) * (*(vector2+(k*size+j)));
}
ans[i*size+j] = sum;
}
}
return ans;
}
/*
* OMP Thread parallel matrix multiplication
*/
float *matrixMul_float_parallel(float* vector1, float* vector2, int size, int thread_count){
float sum = 0.0;
int i,j,k;
float* ans = (float*) malloc(size * size * sizeof(float));
#pragma omp parallel num_threads(thread_count) private(i,j,k) shared(vector1, vector2, ans)
{
#pragma omp for schedule(static) reduction(+:sum)
for(i=0; i < size; i++){
for(j=0; j < size; j++){
sum = 0;
for(k=0; k< size; k++){
sum = sum + (*(vector1+(i*size+k))) * (*(vector2+(k*size+j)));
}
ans[i*size+j] = sum;
}
}
}
return ans;
}
double *matrixMul_double_parallel(double* vector1, double* vector2, int size, int thread_count){
double sum = 0.0;
int i,j,k;
double* ans = (double*) malloc(size * size * sizeof(double));
#pragma omp parallel num_threads(thread_count) private(i,j,k) shared(vector1, vector2, ans)
{
#pragma omp for schedule(static) reduction(+:sum)
for(i=0; i < size; i++){
for(j=0; j < size; j++){
sum = 0;
for(k=0; k< size; k++){
sum = sum + (*(vector1+(i*size+k))) * (*(vector2+(k*size+j)));
}
ans[i*size+j] = sum;
}
}
}
return ans;
}
/*
* CUDA GPU Matrix Multiplication
*/
float *matrixMul_float_cuda(float* vector1, float* vector2, int num){
int num_block = (num*num + NUM_THREAD - 1)/(NUM_THREAD); //print(358);
size_t size = num*num*sizeof(float); //Array memory size.
float *sumHost, *sumDev; // Pointer to host & device arrays
float *vector1_device;
float *vector2_device;
sumHost = (float *)malloc(size); // Allocate array on host
cudaMalloc((void **) &sumDev, size); // Allocate array on device
cudaMalloc((void **) &vector1_device, size); // Allocate array on device
cudaMalloc((void **) &vector2_device, size); // Allocate array on device
// Initialize array in device to 0
cudaMemset(sumDev, 0, size);//print(370);
cudaMemcpy(vector1_device, vector1, size, cudaMemcpyHostToDevice); //print(371);
cudaMemcpy(vector2_device, vector2, size, cudaMemcpyHostToDevice); //print(372);
// Do calculation on device
matMul_CUDA_float <<<num_block, NUM_THREAD>>> (sumDev, num, vector1_device, vector2_device); // call CUDA kernel
// Retrieve result from device and store it in host array
cudaMemcpy(sumHost, sumDev, size, cudaMemcpyDeviceToHost);//print(378);
// Cleanup
cudaFree(sumDev);
cudaFree(vector1_device);
cudaFree(vector2_device);
return sumHost;
}
double *matrixMul_double_cuda(double* vector1, double* vector2, int num){
int num_block = (num*num + NUM_THREAD - 1)/(NUM_THREAD);
size_t size = num*num*sizeof(double); //Array memory size.
double *sumHost, *sumDev; // Pointer to host & device arrays
double *vector1_device;
double *vector2_device;
sumHost = (double *)malloc(size); // Allocate array on host
cudaMalloc((void **) &sumDev, size); // Allocate array on device
cudaMalloc((void **) &vector1_device, size); // Allocate array on device
cudaMalloc((void **) &vector2_device, size); // Allocate array on device
// Initialize array in device to 0
cudaMemset(sumDev, 0, size);
cudaMemcpy(vector1_device, vector1, size, cudaMemcpyHostToDevice);
cudaMemcpy(vector2_device, vector2, size, cudaMemcpyHostToDevice);
// Do calculation on device
matMul_CUDA_double <<<num_block, NUM_THREAD>>> (sumDev, num, vector1_device, vector2_device); // call CUDA kernel
// Retrieve result from device and store it in host array
cudaMemcpy(sumHost, sumDev, size, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(sumDev);
cudaFree(vector1_device);
cudaFree(vector2_device);
return sumHost;
}
/*
* Random Number generator
*/
float floatGen(){
float num ;
num = 1.0 * random() / RAND_MAX + 1.0;
return num;
}
double doubleGen(){
double num;
num = 1.0 * random() / RAND_MAX + 1.0;
return num;
}
|
72bf6a7f210b19840b3a71ec81a910f7f9b613c0.hip | // !!! This is a file automatically generated by hipify!!!
/**
* covariance.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include "../../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 1.05
#define GPU_DEVICE 0
/* Problem size */
#define M 2048 * 6
#define N 2048 * 6
/* Thread block dimensions for kernel 1*/
#define DIM_THREAD_BLOCK_KERNEL_1_X 256
#define DIM_THREAD_BLOCK_KERNEL_1_Y 1
/* Thread block dimensions for kernel 2*/
#define DIM_THREAD_BLOCK_KERNEL_2_X 64
#define DIM_THREAD_BLOCK_KERNEL_2_Y 32
/* Thread block dimensions for kernel 3*/
#define DIM_THREAD_BLOCK_KERNEL_3_X 256
#define DIM_THREAD_BLOCK_KERNEL_3_Y 1
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#define FLOAT_N 3214212.01
#define EPS 0.005
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE* data, DATA_TYPE* data_gpu)
{
int i, j;
for (i = 1; i < (M+1); i++)
{
for (j = 1; j < (N+1); j++)
{
data[i*(N+1) + j] = ((DATA_TYPE) i*j) / M;
data_gpu[i*(N+1) + j] = ((DATA_TYPE) i*j) / M;
}
}
}
void covariance(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean)
{
int i, j, j1,j2;
/* Determine mean of column vectors of input data matrix */
for (j = 1; j < (M+1); j++)
{
mean[j] = 0.0;
for (i = 1; i < (N+1); i++)
{
mean[j] += data[i*(M+1) + j];
}
mean[j] /= FLOAT_N;
}
/* Center the column vectors. */
for (i = 1; i < (N+1); i++)
{
for (j = 1; j < (M+1); j++)
{
data[i*(M+1) + j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
for (j1 = 1; j1 < (M+1); j1++)
{
for (j2 = j1; j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for (i = 1; i < N+1; i++)
{
symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2];
}
symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2];
}
}
}
void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu)
{
int i,j,fail;
fail = 0;
for (i=1; i < (M+1); i++)
{
for (j=1; j < (N+1); j++)
{
if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
return;
}
__global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if ((j >= 1) && (j < (M+1)))
{
mean[j] = 0.0;
int i;
for(i = 1; i < (N+1); i++)
{
mean[j] += data[i * (M+1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
}
__global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((i >= 1) && (i < (N+1)) && (j >= 1) && (j < (M+1)))
{
data[i * (M+1) + j] -= mean[j];
}
}
__global__ void covar_kernel(DATA_TYPE *symmat, DATA_TYPE *data)
{
int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i, j2;
if ((j1 >= 1) && (j1 < (M+1)))
{
for (j2 = j1; j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for(i = 1; i < (N+1); i++)
{
symmat[j1 * (M+1) + j2] += data[i *(M+1) + j1] * data[i *(M+1) + j2];
}
symmat[j2 * (M+1) + j1] = symmat[j1 * (M+1) + j2];
}
}
}
void covarianceCuda(DATA_TYPE* data_gpu, DATA_TYPE* symmat_gpu, DATA_TYPE* mean_gpu)
{
double t_start, t_end;
dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y);
dim3 grid1((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1);
dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y);
dim3 grid2((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), (size_t)(ceil((float)N) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)));
dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y);
dim3 grid3((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), 1);
t_start = rtclock();
hipLaunchKernelGGL(( mean_kernel), dim3(grid1), dim3(block1), 0, 0, mean_gpu,data_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( reduce_kernel), dim3(grid2), dim3(block2), 0, 0, mean_gpu,data_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( covar_kernel), dim3(grid3), dim3(block3), 0, 0, symmat_gpu,data_gpu);
hipDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
}
int main()
{
double t_start, t_end;
DATA_TYPE* data;
DATA_TYPE* symmat;
DATA_TYPE* mean;
DATA_TYPE *data_gpu;
DATA_TYPE *mean_gpu;
DATA_TYPE *symmat_gpu;
data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
symmat = (DATA_TYPE*)malloc((M+1)*(M+1)*sizeof(DATA_TYPE));
mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE));
hipMallocManaged(&data_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1));
hipMallocManaged(&symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (M+1));
hipMallocManaged(&mean_gpu, sizeof(DATA_TYPE) * (M+1));
init_arrays(data, data_gpu);
GPU_argv_init();
covarianceCuda(data_gpu, symmat_gpu, mean_gpu);
t_start = rtclock();
covariance(data, symmat, mean);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(symmat, symmat_gpu);
free(data);
free(symmat);
free(mean);
hipFree(data_gpu);
hipFree(symmat_gpu);
hipFree(mean_gpu);
return 0;
}
| 72bf6a7f210b19840b3a71ec81a910f7f9b613c0.cu | /**
* covariance.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 1.05
#define GPU_DEVICE 0
/* Problem size */
#define M 2048 * 6
#define N 2048 * 6
/* Thread block dimensions for kernel 1*/
#define DIM_THREAD_BLOCK_KERNEL_1_X 256
#define DIM_THREAD_BLOCK_KERNEL_1_Y 1
/* Thread block dimensions for kernel 2*/
#define DIM_THREAD_BLOCK_KERNEL_2_X 64
#define DIM_THREAD_BLOCK_KERNEL_2_Y 32
/* Thread block dimensions for kernel 3*/
#define DIM_THREAD_BLOCK_KERNEL_3_X 256
#define DIM_THREAD_BLOCK_KERNEL_3_Y 1
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#define FLOAT_N 3214212.01
#define EPS 0.005
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_arrays(DATA_TYPE* data, DATA_TYPE* data_gpu)
{
int i, j;
for (i = 1; i < (M+1); i++)
{
for (j = 1; j < (N+1); j++)
{
data[i*(N+1) + j] = ((DATA_TYPE) i*j) / M;
data_gpu[i*(N+1) + j] = ((DATA_TYPE) i*j) / M;
}
}
}
void covariance(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean)
{
int i, j, j1,j2;
/* Determine mean of column vectors of input data matrix */
for (j = 1; j < (M+1); j++)
{
mean[j] = 0.0;
for (i = 1; i < (N+1); i++)
{
mean[j] += data[i*(M+1) + j];
}
mean[j] /= FLOAT_N;
}
/* Center the column vectors. */
for (i = 1; i < (N+1); i++)
{
for (j = 1; j < (M+1); j++)
{
data[i*(M+1) + j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
for (j1 = 1; j1 < (M+1); j1++)
{
for (j2 = j1; j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for (i = 1; i < N+1; i++)
{
symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2];
}
symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2];
}
}
}
void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu)
{
int i,j,fail;
fail = 0;
for (i=1; i < (M+1); i++)
{
for (j=1; j < (N+1); j++)
{
if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
return;
}
__global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
if ((j >= 1) && (j < (M+1)))
{
mean[j] = 0.0;
int i;
for(i = 1; i < (N+1); i++)
{
mean[j] += data[i * (M+1) + j];
}
mean[j] /= (DATA_TYPE)FLOAT_N;
}
}
__global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *data)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i = blockIdx.y * blockDim.y + threadIdx.y + 1;
if ((i >= 1) && (i < (N+1)) && (j >= 1) && (j < (M+1)))
{
data[i * (M+1) + j] -= mean[j];
}
}
__global__ void covar_kernel(DATA_TYPE *symmat, DATA_TYPE *data)
{
int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1;
int i, j2;
if ((j1 >= 1) && (j1 < (M+1)))
{
for (j2 = j1; j2 < (M+1); j2++)
{
symmat[j1*(M+1) + j2] = 0.0;
for(i = 1; i < (N+1); i++)
{
symmat[j1 * (M+1) + j2] += data[i *(M+1) + j1] * data[i *(M+1) + j2];
}
symmat[j2 * (M+1) + j1] = symmat[j1 * (M+1) + j2];
}
}
}
void covarianceCuda(DATA_TYPE* data_gpu, DATA_TYPE* symmat_gpu, DATA_TYPE* mean_gpu)
{
double t_start, t_end;
dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y);
dim3 grid1((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1);
dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y);
dim3 grid2((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), (size_t)(ceil((float)N) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)));
dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y);
dim3 grid3((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), 1);
t_start = rtclock();
mean_kernel<<<grid1, block1>>>(mean_gpu,data_gpu);
cudaDeviceSynchronize();
reduce_kernel<<<grid2, block2>>>(mean_gpu,data_gpu);
cudaDeviceSynchronize();
covar_kernel<<<grid3, block3>>>(symmat_gpu,data_gpu);
cudaDeviceSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
}
int main()
{
double t_start, t_end;
DATA_TYPE* data;
DATA_TYPE* symmat;
DATA_TYPE* mean;
DATA_TYPE *data_gpu;
DATA_TYPE *mean_gpu;
DATA_TYPE *symmat_gpu;
data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE));
symmat = (DATA_TYPE*)malloc((M+1)*(M+1)*sizeof(DATA_TYPE));
mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE));
cudaMallocManaged(&data_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1));
cudaMallocManaged(&symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (M+1));
cudaMallocManaged(&mean_gpu, sizeof(DATA_TYPE) * (M+1));
init_arrays(data, data_gpu);
GPU_argv_init();
covarianceCuda(data_gpu, symmat_gpu, mean_gpu);
t_start = rtclock();
covariance(data, symmat, mean);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(symmat, symmat_gpu);
free(data);
free(symmat);
free(mean);
cudaFree(data_gpu);
cudaFree(symmat_gpu);
cudaFree(mean_gpu);
return 0;
}
|
b3d3b57d2ecea2d4c0f9624a94d8f835df51ed05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include <stdio.h>
#include <algorithm>
#include <cmath>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
using namespace cv;
#define BLOCK_WIDTH 32
/*-------------------------------------------*/
/* Utils
/*-------------------------------------------*/
struct GpuTimer
{
hipEvent_t start;
hipEvent_t stop;
GpuTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer()
{
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start()
{
hipEventRecord(start, 0);
}
void Stop()
{
hipEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
struct CpuTimer {
int64 start;
int64 stop;
CpuTimer()
{
}
void Start()
{
start = cv::getTickCount();
}
void Stop()
{
stop = cv::getTickCount();
}
float Elapsed()
{
return ((stop - start) / cv::getTickFrequency()) * 1000;
}
};
// Sum all pixels of an image with depth 3 (3 channels)
double sumImagePixels(cv::Mat input) {
cv::Scalar sum = cv::sum(input);
return sum[0] + sum[1] + sum[2];
}
//std::sort(durations.begin(), durations.end());
float average(float data[]) {
float accum = 0;
for (int i = 10; i < 90; ++i)
accum += data[i];
return accum / 80;
}
float standardDeviation(float data[])
{
float sum = 0.0, mean, standardDeviation = 0.0;
int i;
for (i = 10; i < 90; ++i)
{
sum += data[i];
}
mean = sum / 80;
for (i = 0; i < 80; ++i)
standardDeviation += pow(data[i] - mean, 2);
return sqrt(standardDeviation / 80);
}
string getFileName(string s, int maskWidth) {
std::stringstream ss;
ss << s << maskWidth << ".jpg";
return ss.str();
}
void printLastCudaError() {
// make the host block until the device is finished with foo
hipDeviceSynchronize();
// check for error
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
}
else {
printf("No CUDA error \n");
}
}
/*-------------------------------------------*/
/* CPU "kernel"
/*-------------------------------------------*/
void convolutionCPU(const unsigned char *input, unsigned char *output, int width, int height,
int step, int channels, const float * mask, int maskWidth)
{
for (int row = 0; row < height; ++row)
for (int col = 0; col < width; ++col)
{
int rowStart = row - maskWidth / 2;
int colStart = col - maskWidth / 2;
for (int currentChannel = 0; currentChannel < channels; ++currentChannel)
{
float sum = 0;
for (int currentMaskRow = 0; currentMaskRow < maskWidth; ++currentMaskRow)
{
for (int currentMaskCol = 0; currentMaskCol < maskWidth; ++currentMaskCol)
{
int currentRow = rowStart + currentMaskRow;
int currentCol = colStart + currentMaskCol;
// Verify we have a valid image pixel
if (currentRow > -1 && currentRow < height && currentCol > -1 && currentCol < width)
sum += input[currentRow * step + currentCol * channels + currentChannel] *
mask[currentMaskRow * maskWidth + currentMaskCol];
}
}
//Make sure pixel values are in the range 0-255
if (sum < 0) sum = 0;
if (sum > 255) sum = 255;
output[row * step + col * channels + currentChannel] = static_cast<unsigned char>(sum);
}
}
}
/*-------------------------------------------*/
/* GPU (CUDA) kernels
/*-------------------------------------------*/
__global__
void convolutionGPUKernelSharedMem(const unsigned char *input, unsigned char *output, int width, int height,
int step, int channels, const float * __restrict__ mask, int maskWidth, int tileWidth)
{
__shared__ unsigned char Ns[BLOCK_WIDTH][BLOCK_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y*tileWidth + ty;
int col_o = blockIdx.x*tileWidth + tx;
int row_i = row_o - maskWidth / 2;
int col_i = col_o - maskWidth / 2;
// Compute the output value for each channel
for (int k = 0; k < channels; ++k) {
// Load image into shared memory.
// All threads are involved in this operation
if ((row_i >= 0) && (row_i < height) && (col_i >= 0) && (col_i < width))
{
Ns[ty][tx] = input[row_i * step + col_i * channels + k];
}
else {
Ns[ty][tx] = 0.0f;
}
// Wait for the loading to finish
__syncthreads();
// Compute the output value.
// Not that some threads don't take part into this
if (ty < tileWidth && tx < tileWidth)
{
float sum = 0.0f;
for (int i = 0; i < maskWidth; i++)
{
for (int j = 0; j < maskWidth; j++)
{
sum += mask[i * maskWidth + j] * Ns[i + ty][j + tx];
}
}
if (row_o < height && col_o < width) {
// Normalize output value
if (sum < 0) sum = 0;
if (sum > 255) sum = 255;
output[row_o*step + col_o * channels + k] = static_cast<unsigned char>(sum);
}
}
// Wait for computation to finish for this channel
// before moving to another channel
__syncthreads();
}
}
__global__
void convolutionGPUKernelGlobalMem(const unsigned char *input, unsigned char *output,
int width, int height, int step, int channels, const float * mask, int maskWidth)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < width && row < height)
{
int rowStart = row - maskWidth / 2;
int colStart = col - maskWidth / 2;
for (int currentChannel = 0; currentChannel < channels; ++currentChannel)
{
float sum = 0;
for (int currentMaskRow = 0; currentMaskRow < maskWidth; ++currentMaskRow)
{
for (int currentMaskCol = 0; currentMaskCol < maskWidth; ++currentMaskCol)
{
int currentRow = rowStart + currentMaskRow;
int currentCol = colStart + currentMaskCol;
// Verify we have a valid image pixel
if (currentRow > -1 && currentRow < height && currentCol > -1 && currentCol < width)
{
sum += input[currentRow * step + currentCol * channels + currentChannel] *
mask[currentMaskRow * maskWidth + currentMaskCol];
}
}
}
//Make sure pixel values are in the range 0-255
if (sum < 0) sum = 0;
if (sum > 255) sum = 255;
output[row * step + col * channels + currentChannel] = static_cast<unsigned char>(sum);
}
}
}
/*-------------------------------------------*/
/* Test - Testing kernels defined above
/*-------------------------------------------*/
void convolutionGPU_shared_mem_test(const cv::Mat& input, cv::Mat& output,
const float * mask, const int maskWidth)
{
const int inputBytes = input.step * input.rows;
const int outputBytes = output.step * output.rows;
const int maskBytes = sizeof(float) * maskWidth * maskWidth;
unsigned char *d_output, *d_input;
float * d_mask;
const unsigned char * inputMat = input.ptr();
unsigned char * outputMat = output.ptr();
int width = input.cols;
int height = input.rows;
int step = input.step;
int channels = input.channels();
//Specify a reasonable block size
dim3 block(BLOCK_WIDTH, BLOCK_WIDTH);
//Calculate the tile width which depends on the mask size
int tileWidth = BLOCK_WIDTH - maskWidth + 1;
//Calculate grid size to cover the whole image
const dim3 grid((output.cols - 1) / tileWidth + 1, (output.rows - 1) / tileWidth + 1);
float durations[100];
for (int i = 0; i < 100; ++i) {
GpuTimer timer;
//Allocate device memory
hipMalloc<unsigned char>(&d_input, inputBytes);
hipMalloc<unsigned char>(&d_output, outputBytes);
hipMalloc<float>(&d_mask, maskBytes);
//Copy input image to device
hipMemcpy(d_input, inputMat, inputBytes, hipMemcpyHostToDevice);
hipMemcpy(d_mask, mask, maskBytes, hipMemcpyHostToDevice);
timer.Start();
convolutionGPUKernelSharedMem << <grid, block >> > (d_input, d_output, width, height,
step, channels, d_mask, maskWidth, tileWidth);
timer.Stop();
//printLastCudaError();
//Copy input image to device
hipMemcpy(outputMat, d_output, outputBytes, hipMemcpyDeviceToHost);
durations[i] = timer.Elapsed();
}
std::sort(begin(durations), end(durations));
printf("CUDA Shared Memory average time: %g\n", average(durations));
printf("CUDA Shared Memory standard deviation: %g\n", standardDeviation(durations));
hipFree(d_input);
hipFree(d_output);
hipFree(d_mask);
}
void convolutionGPU_global_mem_test(const cv::Mat& input, cv::Mat& output,
const float * mask, const int maskWidth)
{
const int inputBytes = input.step * input.rows;
const int outputBytes = output.step * output.rows;
const int maskBytes = sizeof(float) * maskWidth * maskWidth;
unsigned char *d_output, *d_input;
float * d_mask;
const unsigned char * inputMat = input.ptr();
unsigned char * outputMat = output.ptr();
int width = input.cols;
int height = input.rows;
int step = input.step;
int channels = input.channels();
//Specify a reasonable block size
const dim3 block(16, 16);
//Calculate grid size to cover the whole image
const dim3 grid((output.cols + block.x - 1) / block.x,
(output.rows + block.y - 1) / block.y);
float durations[100];
for (int i = 0; i < 100; ++i) {
GpuTimer timer;
//Allocate device memory
hipMalloc<unsigned char>(&d_input, inputBytes);
hipMalloc<unsigned char>(&d_output, outputBytes);
hipMalloc<float>(&d_mask, maskBytes);
//Copy input image to device
hipMemcpy(d_input, inputMat, inputBytes, hipMemcpyHostToDevice);
hipMemcpy(d_mask, mask, maskBytes, hipMemcpyHostToDevice);
timer.Start();
convolutionGPUKernelGlobalMem << <grid, block >> > (d_input, d_output, width, height,
step, channels, d_mask, maskWidth);
timer.Stop();
//Copy input image to device
hipMemcpy(outputMat, d_output, outputBytes, hipMemcpyDeviceToHost);
durations[i] = timer.Elapsed();
}
std::sort(begin(durations), end(durations));
printf("CUDA Global Memory average time: %g\n", average(durations));
printf("CUDA Global Memory standard deviation: %g\n", standardDeviation(durations));
hipFree(d_input);
hipFree(d_output);
hipFree(d_mask);
}
void convolutionCPU_test(const cv::Mat& input, cv::Mat& output,const float * mask, int maskWidth)
{
const unsigned char * inputMat = input.ptr();
unsigned char * outputMat = output.ptr();
int width = input.cols;
int height = input.rows;
int step = input.step;
int channels = input.channels();
float durations[100];
for (int i = 0; i < 100; ++i) {
CpuTimer timer;
timer.Start();
convolutionCPU(inputMat, outputMat, width, height, step, channels, mask, maskWidth);
timer.Stop();
durations[i] = timer.Elapsed();
}
std::sort(begin(durations), end(durations));
printf("CPU average time: %g\n", average(durations));
printf("CPU standard deviation: %g\n", standardDeviation(durations));
namedWindow("OutputCPU", 1);
imshow("OutputCPU", output);
}
int main()
{
string inputPath = "GiantLobster.jpg";
printf("Init\n");
//Load window;
Mat input = cv::imread(inputPath, CV_LOAD_IMAGE_COLOR);
const float mask3x3[] = { -1, 0, 1,
-2, 0, 2,
-1, 0, 1 };
const float mask5x5[] = { -1, -2, 0, 2, 1,
-4, -8, 0, 8, 4,
-6, -12, 0, 12, 6,
-4, -8, 0, 8, 4,
-1, -2, 0, 2, 1 };
const float mask7x7[] = { 1, 1, 1, 0, -1, -1, -1,
1, 2, 2, 0, -2, -2, -1,
1, 2, 3, 0, -3, -2, -1,
1, 2, 3, 0, -3, -2, -1,
1, 2, 3, 0, -3, -2, -1,
1, 2, 2, 0, -2, -2, -1,
1, 1, 1, 0, -1, -1, -1 };
Size newSize(input.size().width, input.size().height);
Mat outputCPU(newSize, input.type());
Mat outputGPUGlobalMem(newSize, input.type());
Mat outputGPUSharedMem(newSize, input.type());
const float * mask = mask3x3;
const int maskWidth = 3;
printf("\n ---Running benchmarking. Please wait...---\n");
printf("Mask width: %d \n", maskWidth);
printf("** Running on CPU...\n");
convolutionCPU_test(input, outputCPU, mask, maskWidth);
printf("** Running on GPU: global memory\n");
convolutionGPU_global_mem_test(input, outputGPUGlobalMem, mask, maskWidth);
printf("** Running on GPU: shared memory\n");
convolutionGPU_shared_mem_test(input, outputGPUSharedMem, mask, maskWidth);
printf("\n ---Benchmarking Done---\n");
//--
// Show original and resulting images
namedWindow("Original Image", 1);
imshow("Original Image", input);
namedWindow("Convolution CPU", 1);
imshow("Convolution CPU", outputCPU);
imwrite(getFileName("./Convolution_CPU.jpg", maskWidth), outputCPU);
namedWindow("Convoluton CUDA Global Memory", 1);
imshow("Convoluton CUDA Global Memory", outputGPUGlobalMem);
imwrite(getFileName("./Convolution_CUDA_Global_Memory.jpg", maskWidth), outputGPUGlobalMem);
//imwrite(outputPath, output);
namedWindow("Convoluton CUDA Shared Memory", 1);
imshow("Convoluton CUDA Shared Memory", outputGPUSharedMem);
std::stringstream ss;
imwrite(getFileName("./Convolution_CUDA_Shared_Memory", maskWidth), outputGPUGlobalMem);
//--
// Print resulting images sum
double sumCPUGlobalMem = sumImagePixels(outputCPU - outputGPUGlobalMem);
printf("Resulting sum (cpu - cuda_global_mem): %g\n", sumCPUGlobalMem);
double sumSharedMem = sumImagePixels(outputCPU - outputGPUSharedMem);
printf("Resulting sum (cpu - cuda_shared_mem): %g\n", sumSharedMem);
//wait for the user to press any key:
waitKey(0);
return 0;
} | b3d3b57d2ecea2d4c0f9624a94d8f835df51ed05.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<iostream>
#include <stdio.h>
#include <algorithm>
#include <cmath>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
using namespace cv;
#define BLOCK_WIDTH 32
/*-------------------------------------------*/
/* Utils
/*-------------------------------------------*/
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
struct CpuTimer {
int64 start;
int64 stop;
CpuTimer()
{
}
void Start()
{
start = cv::getTickCount();
}
void Stop()
{
stop = cv::getTickCount();
}
float Elapsed()
{
return ((stop - start) / cv::getTickFrequency()) * 1000;
}
};
// Sum all pixels of an image with depth 3 (3 channels)
double sumImagePixels(cv::Mat input) {
cv::Scalar sum = cv::sum(input);
return sum[0] + sum[1] + sum[2];
}
//std::sort(durations.begin(), durations.end());
float average(float data[]) {
float accum = 0;
for (int i = 10; i < 90; ++i)
accum += data[i];
return accum / 80;
}
float standardDeviation(float data[])
{
float sum = 0.0, mean, standardDeviation = 0.0;
int i;
for (i = 10; i < 90; ++i)
{
sum += data[i];
}
mean = sum / 80;
for (i = 0; i < 80; ++i)
standardDeviation += pow(data[i] - mean, 2);
return sqrt(standardDeviation / 80);
}
string getFileName(string s, int maskWidth) {
std::stringstream ss;
ss << s << maskWidth << ".jpg";
return ss.str();
}
void printLastCudaError() {
// make the host block until the device is finished with foo
cudaDeviceSynchronize();
// check for error
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
}
else {
printf("No CUDA error \n");
}
}
/*-------------------------------------------*/
/* CPU "kernel"
/*-------------------------------------------*/
void convolutionCPU(const unsigned char *input, unsigned char *output, int width, int height,
int step, int channels, const float * mask, int maskWidth)
{
for (int row = 0; row < height; ++row)
for (int col = 0; col < width; ++col)
{
int rowStart = row - maskWidth / 2;
int colStart = col - maskWidth / 2;
for (int currentChannel = 0; currentChannel < channels; ++currentChannel)
{
float sum = 0;
for (int currentMaskRow = 0; currentMaskRow < maskWidth; ++currentMaskRow)
{
for (int currentMaskCol = 0; currentMaskCol < maskWidth; ++currentMaskCol)
{
int currentRow = rowStart + currentMaskRow;
int currentCol = colStart + currentMaskCol;
// Verify we have a valid image pixel
if (currentRow > -1 && currentRow < height && currentCol > -1 && currentCol < width)
sum += input[currentRow * step + currentCol * channels + currentChannel] *
mask[currentMaskRow * maskWidth + currentMaskCol];
}
}
//Make sure pixel values are in the range 0-255
if (sum < 0) sum = 0;
if (sum > 255) sum = 255;
output[row * step + col * channels + currentChannel] = static_cast<unsigned char>(sum);
}
}
}
/*-------------------------------------------*/
/* GPU (CUDA) kernels
/*-------------------------------------------*/
__global__
void convolutionGPUKernelSharedMem(const unsigned char *input, unsigned char *output, int width, int height,
int step, int channels, const float * __restrict__ mask, int maskWidth, int tileWidth)
{
__shared__ unsigned char Ns[BLOCK_WIDTH][BLOCK_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y*tileWidth + ty;
int col_o = blockIdx.x*tileWidth + tx;
int row_i = row_o - maskWidth / 2;
int col_i = col_o - maskWidth / 2;
// Compute the output value for each channel
for (int k = 0; k < channels; ++k) {
// Load image into shared memory.
// All threads are involved in this operation
if ((row_i >= 0) && (row_i < height) && (col_i >= 0) && (col_i < width))
{
Ns[ty][tx] = input[row_i * step + col_i * channels + k];
}
else {
Ns[ty][tx] = 0.0f;
}
// Wait for the loading to finish
__syncthreads();
// Compute the output value.
// Not that some threads don't take part into this
if (ty < tileWidth && tx < tileWidth)
{
float sum = 0.0f;
for (int i = 0; i < maskWidth; i++)
{
for (int j = 0; j < maskWidth; j++)
{
sum += mask[i * maskWidth + j] * Ns[i + ty][j + tx];
}
}
if (row_o < height && col_o < width) {
// Normalize output value
if (sum < 0) sum = 0;
if (sum > 255) sum = 255;
output[row_o*step + col_o * channels + k] = static_cast<unsigned char>(sum);
}
}
// Wait for computation to finish for this channel
// before moving to another channel
__syncthreads();
}
}
__global__
void convolutionGPUKernelGlobalMem(const unsigned char *input, unsigned char *output,
int width, int height, int step, int channels, const float * mask, int maskWidth)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < width && row < height)
{
int rowStart = row - maskWidth / 2;
int colStart = col - maskWidth / 2;
for (int currentChannel = 0; currentChannel < channels; ++currentChannel)
{
float sum = 0;
for (int currentMaskRow = 0; currentMaskRow < maskWidth; ++currentMaskRow)
{
for (int currentMaskCol = 0; currentMaskCol < maskWidth; ++currentMaskCol)
{
int currentRow = rowStart + currentMaskRow;
int currentCol = colStart + currentMaskCol;
// Verify we have a valid image pixel
if (currentRow > -1 && currentRow < height && currentCol > -1 && currentCol < width)
{
sum += input[currentRow * step + currentCol * channels + currentChannel] *
mask[currentMaskRow * maskWidth + currentMaskCol];
}
}
}
//Make sure pixel values are in the range 0-255
if (sum < 0) sum = 0;
if (sum > 255) sum = 255;
output[row * step + col * channels + currentChannel] = static_cast<unsigned char>(sum);
}
}
}
/*-------------------------------------------*/
/* Test - Testing kernels defined above
/*-------------------------------------------*/
void convolutionGPU_shared_mem_test(const cv::Mat& input, cv::Mat& output,
const float * mask, const int maskWidth)
{
const int inputBytes = input.step * input.rows;
const int outputBytes = output.step * output.rows;
const int maskBytes = sizeof(float) * maskWidth * maskWidth;
unsigned char *d_output, *d_input;
float * d_mask;
const unsigned char * inputMat = input.ptr();
unsigned char * outputMat = output.ptr();
int width = input.cols;
int height = input.rows;
int step = input.step;
int channels = input.channels();
//Specify a reasonable block size
dim3 block(BLOCK_WIDTH, BLOCK_WIDTH);
//Calculate the tile width which depends on the mask size
int tileWidth = BLOCK_WIDTH - maskWidth + 1;
//Calculate grid size to cover the whole image
const dim3 grid((output.cols - 1) / tileWidth + 1, (output.rows - 1) / tileWidth + 1);
float durations[100];
for (int i = 0; i < 100; ++i) {
GpuTimer timer;
//Allocate device memory
cudaMalloc<unsigned char>(&d_input, inputBytes);
cudaMalloc<unsigned char>(&d_output, outputBytes);
cudaMalloc<float>(&d_mask, maskBytes);
//Copy input image to device
cudaMemcpy(d_input, inputMat, inputBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_mask, mask, maskBytes, cudaMemcpyHostToDevice);
timer.Start();
convolutionGPUKernelSharedMem << <grid, block >> > (d_input, d_output, width, height,
step, channels, d_mask, maskWidth, tileWidth);
timer.Stop();
//printLastCudaError();
//Copy input image to device
cudaMemcpy(outputMat, d_output, outputBytes, cudaMemcpyDeviceToHost);
durations[i] = timer.Elapsed();
}
std::sort(begin(durations), end(durations));
printf("CUDA Shared Memory average time: %g\n", average(durations));
printf("CUDA Shared Memory standard deviation: %g\n", standardDeviation(durations));
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_mask);
}
void convolutionGPU_global_mem_test(const cv::Mat& input, cv::Mat& output,
const float * mask, const int maskWidth)
{
const int inputBytes = input.step * input.rows;
const int outputBytes = output.step * output.rows;
const int maskBytes = sizeof(float) * maskWidth * maskWidth;
unsigned char *d_output, *d_input;
float * d_mask;
const unsigned char * inputMat = input.ptr();
unsigned char * outputMat = output.ptr();
int width = input.cols;
int height = input.rows;
int step = input.step;
int channels = input.channels();
//Specify a reasonable block size
const dim3 block(16, 16);
//Calculate grid size to cover the whole image
const dim3 grid((output.cols + block.x - 1) / block.x,
(output.rows + block.y - 1) / block.y);
float durations[100];
for (int i = 0; i < 100; ++i) {
GpuTimer timer;
//Allocate device memory
cudaMalloc<unsigned char>(&d_input, inputBytes);
cudaMalloc<unsigned char>(&d_output, outputBytes);
cudaMalloc<float>(&d_mask, maskBytes);
//Copy input image to device
cudaMemcpy(d_input, inputMat, inputBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_mask, mask, maskBytes, cudaMemcpyHostToDevice);
timer.Start();
convolutionGPUKernelGlobalMem << <grid, block >> > (d_input, d_output, width, height,
step, channels, d_mask, maskWidth);
timer.Stop();
//Copy input image to device
cudaMemcpy(outputMat, d_output, outputBytes, cudaMemcpyDeviceToHost);
durations[i] = timer.Elapsed();
}
std::sort(begin(durations), end(durations));
printf("CUDA Global Memory average time: %g\n", average(durations));
printf("CUDA Global Memory standard deviation: %g\n", standardDeviation(durations));
cudaFree(d_input);
cudaFree(d_output);
cudaFree(d_mask);
}
void convolutionCPU_test(const cv::Mat& input, cv::Mat& output,const float * mask, int maskWidth)
{
const unsigned char * inputMat = input.ptr();
unsigned char * outputMat = output.ptr();
int width = input.cols;
int height = input.rows;
int step = input.step;
int channels = input.channels();
float durations[100];
for (int i = 0; i < 100; ++i) {
CpuTimer timer;
timer.Start();
convolutionCPU(inputMat, outputMat, width, height, step, channels, mask, maskWidth);
timer.Stop();
durations[i] = timer.Elapsed();
}
std::sort(begin(durations), end(durations));
printf("CPU average time: %g\n", average(durations));
printf("CPU standard deviation: %g\n", standardDeviation(durations));
namedWindow("OutputCPU", 1);
imshow("OutputCPU", output);
}
int main()
{
string inputPath = "GiantLobster.jpg";
printf("Init\n");
//Load window;
Mat input = cv::imread(inputPath, CV_LOAD_IMAGE_COLOR);
const float mask3x3[] = { -1, 0, 1,
-2, 0, 2,
-1, 0, 1 };
const float mask5x5[] = { -1, -2, 0, 2, 1,
-4, -8, 0, 8, 4,
-6, -12, 0, 12, 6,
-4, -8, 0, 8, 4,
-1, -2, 0, 2, 1 };
const float mask7x7[] = { 1, 1, 1, 0, -1, -1, -1,
1, 2, 2, 0, -2, -2, -1,
1, 2, 3, 0, -3, -2, -1,
1, 2, 3, 0, -3, -2, -1,
1, 2, 3, 0, -3, -2, -1,
1, 2, 2, 0, -2, -2, -1,
1, 1, 1, 0, -1, -1, -1 };
Size newSize(input.size().width, input.size().height);
Mat outputCPU(newSize, input.type());
Mat outputGPUGlobalMem(newSize, input.type());
Mat outputGPUSharedMem(newSize, input.type());
const float * mask = mask3x3;
const int maskWidth = 3;
printf("\n ---Running benchmarking. Please wait...---\n");
printf("Mask width: %d \n", maskWidth);
printf("** Running on CPU...\n");
convolutionCPU_test(input, outputCPU, mask, maskWidth);
printf("** Running on GPU: global memory\n");
convolutionGPU_global_mem_test(input, outputGPUGlobalMem, mask, maskWidth);
printf("** Running on GPU: shared memory\n");
convolutionGPU_shared_mem_test(input, outputGPUSharedMem, mask, maskWidth);
printf("\n ---Benchmarking Done---\n");
//--
// Show original and resulting images
namedWindow("Original Image", 1);
imshow("Original Image", input);
namedWindow("Convolution CPU", 1);
imshow("Convolution CPU", outputCPU);
imwrite(getFileName("./Convolution_CPU.jpg", maskWidth), outputCPU);
namedWindow("Convoluton CUDA Global Memory", 1);
imshow("Convoluton CUDA Global Memory", outputGPUGlobalMem);
imwrite(getFileName("./Convolution_CUDA_Global_Memory.jpg", maskWidth), outputGPUGlobalMem);
//imwrite(outputPath, output);
namedWindow("Convoluton CUDA Shared Memory", 1);
imshow("Convoluton CUDA Shared Memory", outputGPUSharedMem);
std::stringstream ss;
imwrite(getFileName("./Convolution_CUDA_Shared_Memory", maskWidth), outputGPUGlobalMem);
//--
// Print resulting images sum
double sumCPUGlobalMem = sumImagePixels(outputCPU - outputGPUGlobalMem);
printf("Resulting sum (cpu - cuda_global_mem): %g\n", sumCPUGlobalMem);
double sumSharedMem = sumImagePixels(outputCPU - outputGPUSharedMem);
printf("Resulting sum (cpu - cuda_shared_mem): %g\n", sumSharedMem);
//wait for the user to press any key:
waitKey(0);
return 0;
} |
a733878c6b10e573fcec293ff0a8df5edb9db953.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_t1;
int xdim0_update_halo_kernel1_t1_h = -1;
__constant__ int ydim0_update_halo_kernel1_t1;
int ydim0_update_halo_kernel1_t1_h = -1;
__constant__ int xdim1_update_halo_kernel1_t1;
int xdim1_update_halo_kernel1_t1_h = -1;
__constant__ int ydim1_update_halo_kernel1_t1;
int ydim1_update_halo_kernel1_t1_h = -1;
__constant__ int xdim2_update_halo_kernel1_t1;
int xdim2_update_halo_kernel1_t1_h = -1;
__constant__ int ydim2_update_halo_kernel1_t1;
int ydim2_update_halo_kernel1_t1_h = -1;
__constant__ int xdim3_update_halo_kernel1_t1;
int xdim3_update_halo_kernel1_t1_h = -1;
__constant__ int ydim3_update_halo_kernel1_t1;
int ydim3_update_halo_kernel1_t1_h = -1;
__constant__ int xdim4_update_halo_kernel1_t1;
int xdim4_update_halo_kernel1_t1_h = -1;
__constant__ int ydim4_update_halo_kernel1_t1;
int ydim4_update_halo_kernel1_t1_h = -1;
__constant__ int xdim5_update_halo_kernel1_t1;
int xdim5_update_halo_kernel1_t1_h = -1;
__constant__ int ydim5_update_halo_kernel1_t1;
int ydim5_update_halo_kernel1_t1_h = -1;
__constant__ int xdim6_update_halo_kernel1_t1;
int xdim6_update_halo_kernel1_t1_h = -1;
__constant__ int ydim6_update_halo_kernel1_t1;
int ydim6_update_halo_kernel1_t1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel1_t1 * (y) + \
xdim0_update_halo_kernel1_t1 * ydim0_update_halo_kernel1_t1 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel1_t1 * (y) + \
xdim1_update_halo_kernel1_t1 * ydim1_update_halo_kernel1_t1 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_update_halo_kernel1_t1 * (y) + \
xdim2_update_halo_kernel1_t1 * ydim2_update_halo_kernel1_t1 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_update_halo_kernel1_t1 * (y) + \
xdim3_update_halo_kernel1_t1 * ydim3_update_halo_kernel1_t1 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_update_halo_kernel1_t1 * (y) + \
xdim4_update_halo_kernel1_t1 * ydim4_update_halo_kernel1_t1 * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_update_halo_kernel1_t1 * (y) + \
xdim5_update_halo_kernel1_t1 * ydim5_update_halo_kernel1_t1 * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_update_halo_kernel1_t1 * (y) + \
xdim6_update_halo_kernel1_t1 * ydim6_update_halo_kernel1_t1 * (z))
// user function
__device__
inline void
update_halo_kernel1_t1_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed, const int *fields) {
if (fields[FIELD_DENSITY0] == 1)
density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(0, -1, 0)];
if (fields[FIELD_DENSITY1] == 1)
density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(0, -1, 0)];
if (fields[FIELD_ENERGY0] == 1)
energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(0, -1, 0)];
if (fields[FIELD_ENERGY1] == 1)
energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(0, -1, 0)];
if (fields[FIELD_PRESSURE] == 1)
pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(0, -1, 0)];
if (fields[FIELD_VISCOSITY] == 1)
viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(0, -1, 0)];
if (fields[FIELD_SOUNDSPEED] == 1)
soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(0, -1, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void
ops_update_halo_kernel1_t1(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
double *__restrict arg6, const int *__restrict arg7,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim0_update_halo_kernel1_t1 *
ydim0_update_halo_kernel1_t1;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim1_update_halo_kernel1_t1 *
ydim1_update_halo_kernel1_t1;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim2_update_halo_kernel1_t1 *
ydim2_update_halo_kernel1_t1;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim3_update_halo_kernel1_t1 *
ydim3_update_halo_kernel1_t1;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim4_update_halo_kernel1_t1 *
ydim4_update_halo_kernel1_t1;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim5_update_halo_kernel1_t1 *
ydim5_update_halo_kernel1_t1;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim6_update_halo_kernel1_t1 *
ydim6_update_halo_kernel1_t1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_t1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
}
}
// host stub function
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
// Timing
double t1, t2, c1, c2;
ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 8, range, 59))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(59, "update_halo_kernel1_t1");
OPS_kernels[59].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_t1_h ||
ydim0 != ydim0_update_halo_kernel1_t1_h ||
xdim1 != xdim1_update_halo_kernel1_t1_h ||
ydim1 != ydim1_update_halo_kernel1_t1_h ||
xdim2 != xdim2_update_halo_kernel1_t1_h ||
ydim2 != ydim2_update_halo_kernel1_t1_h ||
xdim3 != xdim3_update_halo_kernel1_t1_h ||
ydim3 != ydim3_update_halo_kernel1_t1_h ||
xdim4 != xdim4_update_halo_kernel1_t1_h ||
ydim4 != ydim4_update_halo_kernel1_t1_h ||
xdim5 != xdim5_update_halo_kernel1_t1_h ||
ydim5 != ydim5_update_halo_kernel1_t1_h ||
xdim6 != xdim6_update_halo_kernel1_t1_h ||
ydim6 != ydim6_update_halo_kernel1_t1_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel1_t1, &xdim0, sizeof(int));
xdim0_update_halo_kernel1_t1_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel1_t1, &ydim0, sizeof(int));
ydim0_update_halo_kernel1_t1_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel1_t1, &xdim1, sizeof(int));
xdim1_update_halo_kernel1_t1_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel1_t1, &ydim1, sizeof(int));
ydim1_update_halo_kernel1_t1_h = ydim1;
hipMemcpyToSymbol(xdim2_update_halo_kernel1_t1, &xdim2, sizeof(int));
xdim2_update_halo_kernel1_t1_h = xdim2;
hipMemcpyToSymbol(ydim2_update_halo_kernel1_t1, &ydim2, sizeof(int));
ydim2_update_halo_kernel1_t1_h = ydim2;
hipMemcpyToSymbol(xdim3_update_halo_kernel1_t1, &xdim3, sizeof(int));
xdim3_update_halo_kernel1_t1_h = xdim3;
hipMemcpyToSymbol(ydim3_update_halo_kernel1_t1, &ydim3, sizeof(int));
ydim3_update_halo_kernel1_t1_h = ydim3;
hipMemcpyToSymbol(xdim4_update_halo_kernel1_t1, &xdim4, sizeof(int));
xdim4_update_halo_kernel1_t1_h = xdim4;
hipMemcpyToSymbol(ydim4_update_halo_kernel1_t1, &ydim4, sizeof(int));
ydim4_update_halo_kernel1_t1_h = ydim4;
hipMemcpyToSymbol(xdim5_update_halo_kernel1_t1, &xdim5, sizeof(int));
xdim5_update_halo_kernel1_t1_h = xdim5;
hipMemcpyToSymbol(ydim5_update_halo_kernel1_t1, &ydim5, sizeof(int));
ydim5_update_halo_kernel1_t1_h = ydim5;
hipMemcpyToSymbol(xdim6_update_halo_kernel1_t1, &xdim6, sizeof(int));
xdim6_update_halo_kernel1_t1_h = xdim6;
hipMemcpyToSymbol(ydim6_update_halo_kernel1_t1, &ydim6, sizeof(int));
ydim6_update_halo_kernel1_t1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
char *p_a[8];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] -
args[5].dat->base[1] - d_m[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] -
d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[6].dat->d_m[d];
#endif
int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] -
args[6].dat->base[0] - d_m[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] -
args[6].dat->base[1] - d_m[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] -
d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args, 8, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[59].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel1_t1), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,
x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[59].time += t1 - t2;
}
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
ops_set_halo_dirtybit3(&args[6], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[59].mpi_time += t2 - t1;
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
| a733878c6b10e573fcec293ff0a8df5edb9db953.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_t1;
int xdim0_update_halo_kernel1_t1_h = -1;
__constant__ int ydim0_update_halo_kernel1_t1;
int ydim0_update_halo_kernel1_t1_h = -1;
__constant__ int xdim1_update_halo_kernel1_t1;
int xdim1_update_halo_kernel1_t1_h = -1;
__constant__ int ydim1_update_halo_kernel1_t1;
int ydim1_update_halo_kernel1_t1_h = -1;
__constant__ int xdim2_update_halo_kernel1_t1;
int xdim2_update_halo_kernel1_t1_h = -1;
__constant__ int ydim2_update_halo_kernel1_t1;
int ydim2_update_halo_kernel1_t1_h = -1;
__constant__ int xdim3_update_halo_kernel1_t1;
int xdim3_update_halo_kernel1_t1_h = -1;
__constant__ int ydim3_update_halo_kernel1_t1;
int ydim3_update_halo_kernel1_t1_h = -1;
__constant__ int xdim4_update_halo_kernel1_t1;
int xdim4_update_halo_kernel1_t1_h = -1;
__constant__ int ydim4_update_halo_kernel1_t1;
int ydim4_update_halo_kernel1_t1_h = -1;
__constant__ int xdim5_update_halo_kernel1_t1;
int xdim5_update_halo_kernel1_t1_h = -1;
__constant__ int ydim5_update_halo_kernel1_t1;
int ydim5_update_halo_kernel1_t1_h = -1;
__constant__ int xdim6_update_halo_kernel1_t1;
int xdim6_update_halo_kernel1_t1_h = -1;
__constant__ int ydim6_update_halo_kernel1_t1;
int ydim6_update_halo_kernel1_t1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel1_t1 * (y) + \
xdim0_update_halo_kernel1_t1 * ydim0_update_halo_kernel1_t1 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel1_t1 * (y) + \
xdim1_update_halo_kernel1_t1 * ydim1_update_halo_kernel1_t1 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_update_halo_kernel1_t1 * (y) + \
xdim2_update_halo_kernel1_t1 * ydim2_update_halo_kernel1_t1 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_update_halo_kernel1_t1 * (y) + \
xdim3_update_halo_kernel1_t1 * ydim3_update_halo_kernel1_t1 * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_update_halo_kernel1_t1 * (y) + \
xdim4_update_halo_kernel1_t1 * ydim4_update_halo_kernel1_t1 * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_update_halo_kernel1_t1 * (y) + \
xdim5_update_halo_kernel1_t1 * ydim5_update_halo_kernel1_t1 * (z))
#define OPS_ACC6(x, y, z) \
(x + xdim6_update_halo_kernel1_t1 * (y) + \
xdim6_update_halo_kernel1_t1 * ydim6_update_halo_kernel1_t1 * (z))
// user function
__device__
inline void
update_halo_kernel1_t1_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed, const int *fields) {
if (fields[FIELD_DENSITY0] == 1)
density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(0, -1, 0)];
if (fields[FIELD_DENSITY1] == 1)
density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(0, -1, 0)];
if (fields[FIELD_ENERGY0] == 1)
energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(0, -1, 0)];
if (fields[FIELD_ENERGY1] == 1)
energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(0, -1, 0)];
if (fields[FIELD_PRESSURE] == 1)
pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(0, -1, 0)];
if (fields[FIELD_VISCOSITY] == 1)
viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(0, -1, 0)];
if (fields[FIELD_SOUNDSPEED] == 1)
soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(0, -1, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void
ops_update_halo_kernel1_t1(double *__restrict arg0, double *__restrict arg1,
double *__restrict arg2, double *__restrict arg3,
double *__restrict arg4, double *__restrict arg5,
double *__restrict arg6, const int *__restrict arg7,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim0_update_halo_kernel1_t1 *
ydim0_update_halo_kernel1_t1;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim1_update_halo_kernel1_t1 *
ydim1_update_halo_kernel1_t1;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim2_update_halo_kernel1_t1 *
ydim2_update_halo_kernel1_t1;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim3_update_halo_kernel1_t1 *
ydim3_update_halo_kernel1_t1;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim4_update_halo_kernel1_t1 *
ydim4_update_halo_kernel1_t1;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim5_update_halo_kernel1_t1 *
ydim5_update_halo_kernel1_t1;
arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_t1 +
idx_z * 1 * 1 * xdim6_update_halo_kernel1_t1 *
ydim6_update_halo_kernel1_t1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_t1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
}
}
// host stub function
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5, ops_arg arg6,
ops_arg arg7) {
// Timing
double t1, t2, c1, c2;
ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 8, range, 59))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(59, "update_halo_kernel1_t1");
OPS_kernels[59].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_t1_h ||
ydim0 != ydim0_update_halo_kernel1_t1_h ||
xdim1 != xdim1_update_halo_kernel1_t1_h ||
ydim1 != ydim1_update_halo_kernel1_t1_h ||
xdim2 != xdim2_update_halo_kernel1_t1_h ||
ydim2 != ydim2_update_halo_kernel1_t1_h ||
xdim3 != xdim3_update_halo_kernel1_t1_h ||
ydim3 != ydim3_update_halo_kernel1_t1_h ||
xdim4 != xdim4_update_halo_kernel1_t1_h ||
ydim4 != ydim4_update_halo_kernel1_t1_h ||
xdim5 != xdim5_update_halo_kernel1_t1_h ||
ydim5 != ydim5_update_halo_kernel1_t1_h ||
xdim6 != xdim6_update_halo_kernel1_t1_h ||
ydim6 != ydim6_update_halo_kernel1_t1_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel1_t1, &xdim0, sizeof(int));
xdim0_update_halo_kernel1_t1_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel1_t1, &ydim0, sizeof(int));
ydim0_update_halo_kernel1_t1_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel1_t1, &xdim1, sizeof(int));
xdim1_update_halo_kernel1_t1_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel1_t1, &ydim1, sizeof(int));
ydim1_update_halo_kernel1_t1_h = ydim1;
cudaMemcpyToSymbol(xdim2_update_halo_kernel1_t1, &xdim2, sizeof(int));
xdim2_update_halo_kernel1_t1_h = xdim2;
cudaMemcpyToSymbol(ydim2_update_halo_kernel1_t1, &ydim2, sizeof(int));
ydim2_update_halo_kernel1_t1_h = ydim2;
cudaMemcpyToSymbol(xdim3_update_halo_kernel1_t1, &xdim3, sizeof(int));
xdim3_update_halo_kernel1_t1_h = xdim3;
cudaMemcpyToSymbol(ydim3_update_halo_kernel1_t1, &ydim3, sizeof(int));
ydim3_update_halo_kernel1_t1_h = ydim3;
cudaMemcpyToSymbol(xdim4_update_halo_kernel1_t1, &xdim4, sizeof(int));
xdim4_update_halo_kernel1_t1_h = xdim4;
cudaMemcpyToSymbol(ydim4_update_halo_kernel1_t1, &ydim4, sizeof(int));
ydim4_update_halo_kernel1_t1_h = ydim4;
cudaMemcpyToSymbol(xdim5_update_halo_kernel1_t1, &xdim5, sizeof(int));
xdim5_update_halo_kernel1_t1_h = xdim5;
cudaMemcpyToSymbol(ydim5_update_halo_kernel1_t1, &ydim5, sizeof(int));
ydim5_update_halo_kernel1_t1_h = ydim5;
cudaMemcpyToSymbol(xdim6_update_halo_kernel1_t1, &xdim6, sizeof(int));
xdim6_update_halo_kernel1_t1_h = xdim6;
cudaMemcpyToSymbol(ydim6_update_halo_kernel1_t1, &ydim6, sizeof(int));
ydim6_update_halo_kernel1_t1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
char *p_a[8];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[4].dat->d_m[d];
#endif
int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] -
args[4].dat->base[0] - d_m[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] -
args[4].dat->base[1] - d_m[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] -
d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[5].dat->d_m[d];
#endif
int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] -
args[5].dat->base[0] - d_m[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] -
args[5].dat->base[1] - d_m[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] -
d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[6].dat->d_m[d];
#endif
int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] -
args[6].dat->base[0] - d_m[0]);
base6 = base6 +
dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] -
args[6].dat->base[1] - d_m[1]);
base6 = base6 +
dat6 * args[6].dat->size[0] * args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] -
d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args, 8, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[59].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel1_t1<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d,
x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[59].time += t1 - t2;
}
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
ops_set_halo_dirtybit3(&args[3], range);
ops_set_halo_dirtybit3(&args[4], range);
ops_set_halo_dirtybit3(&args[5], range);
ops_set_halo_dirtybit3(&args[6], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[59].mpi_time += t2 - t1;
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[59].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
|
24cfee6c116757658fc625a8ca329980e393b55d.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathPairwise.hip"
#else
#include <ATen/NamedTensorUtils.h>
static int THCTensor_(equalImpl)(THCState *state, THCTensor *self_, THCTensor *src_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (!THCTensor_(isSameSizeAs(state, self_, src_))) {
return 0;
}
// This is not as efficient as TH, but the basic idea: create a buffer that stores
// 1 if the two tensors are equal at a position, otherwise 0. If the minimum value
// in this buffer is 1, the two tensors are equal, otherwise they are not
// Both tensors are empty
if(THTensor_(nElement)(self_) == 0) return true;
THCudaByteTensor *buf = THCudaByteTensor_newWithSize(state, self_->sizes(), {});
if (!THC_pointwiseApply3<uint8_t, scalar_t, scalar_t>(state, buf, self_, src_, TensorEQOp<scalar_t, unsigned char>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
unsigned char min = THCudaByteTensor_minall(state, buf);
THCudaByteTensor_free(state, buf);
return min != 0;
}
int THCTensor_(equal)(THCState *state, THCTensor *self_, THCTensor *src_) {
if (!at::namedinference::are_names_equal(self_, src_)) {
return 0;
}
at::NoNamesGuard guard;
return THCTensor_(equalImpl)(state, self_, src_);
}
void THCTensor_(bitand)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitand only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitAndConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitAndConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(bitor)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitor only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitOrConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitOrConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(mul)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(div)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(value != ScalarConvert<int, scalar_t>::to(0), 3, "divide by zero");
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorDivConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorDivConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(lshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCTensor_(mul)(state, self_, src_, pow(2, value));
#elif defined(THC_REAL_IS_HALF)
return THError("lshift not supported for torch.CudaHalfTensor");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorLShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorLShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(rshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCTensor_(mul)(state, self_, src_, pow(2, -value));
#elif defined(THC_REAL_IS_HALF)
return THError("rshift not supported for torch.CudaHalfTensor");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorRShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorRShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
#endif
}
void THCTensor_(fmod)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(remainder)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorRemainderOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorRemainderOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(triu)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(!src_->is_empty() && src_->dim() == 2, 1, "expected a matrix");
if (self_ != src_)
THCTensor_(resizeAs)(state, self_, src_);
int64_t stride0 = self_->stride(0);
int64_t stride1 = self_->stride(1);
scalar_t *start = THCTensor_(data)(state, self_);
TensorTriOp<scalar_t, 1> op(start, stride0, stride1, k);
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
#endif
#endif
| 24cfee6c116757658fc625a8ca329980e393b55d.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathPairwise.cu"
#else
#include <ATen/NamedTensorUtils.h>
static int THCTensor_(equalImpl)(THCState *state, THCTensor *self_, THCTensor *src_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (!THCTensor_(isSameSizeAs(state, self_, src_))) {
return 0;
}
// This is not as efficient as TH, but the basic idea: create a buffer that stores
// 1 if the two tensors are equal at a position, otherwise 0. If the minimum value
// in this buffer is 1, the two tensors are equal, otherwise they are not
// Both tensors are empty
if(THTensor_(nElement)(self_) == 0) return true;
THCudaByteTensor *buf = THCudaByteTensor_newWithSize(state, self_->sizes(), {});
if (!THC_pointwiseApply3<uint8_t, scalar_t, scalar_t>(state, buf, self_, src_, TensorEQOp<scalar_t, unsigned char>())) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
unsigned char min = THCudaByteTensor_minall(state, buf);
THCudaByteTensor_free(state, buf);
return min != 0;
}
int THCTensor_(equal)(THCState *state, THCTensor *self_, THCTensor *src_) {
if (!at::namedinference::are_names_equal(self_, src_)) {
return 0;
}
at::NoNamesGuard guard;
return THCTensor_(equalImpl)(state, self_, src_);
}
void THCTensor_(bitand)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitand only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitAndConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitAndConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(bitor)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
return THError("bitor only supported for integer type tensors");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorBitOrConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorBitOrConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
#if !defined(THC_REAL_IS_BOOL)
void THCTensor_(mul)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorMulConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(div)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(value != ScalarConvert<int, scalar_t>::to(0), 3, "divide by zero");
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorDivConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorDivConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(lshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCTensor_(mul)(state, self_, src_, pow(2, value));
#elif defined(THC_REAL_IS_HALF)
return THError("lshift not supported for torch.CudaHalfTensor");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorLShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorLShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(rshift)(THCState* state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
THCTensor_(mul)(state, self_, src_, pow(2, -value));
#elif defined(THC_REAL_IS_HALF)
return THError("rshift not supported for torch.CudaHalfTensor");
#else
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorRShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorRShiftConstantOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
#endif
}
void THCTensor_(fmod)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorFmodOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(remainder)(THCState *state, THCTensor *self_, THCTensor *src_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorRemainderOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCTensor_(resizeAs)(state, self_, src_);
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, TensorRemainderOp<scalar_t>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(triu)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
THArgCheck(!src_->is_empty() && src_->dim() == 2, 1, "expected a matrix");
if (self_ != src_)
THCTensor_(resizeAs)(state, self_, src_);
int64_t stride0 = self_->stride(0);
int64_t stride1 = self_->stride(1);
scalar_t *start = THCTensor_(data)(state, self_);
TensorTriOp<scalar_t, 1> op(start, stride0, stride1, k);
if (self_ == src_) {
if (!THC_pointwiseApply1<scalar_t>(state, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src_, op)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
#endif
#endif
|
648a58cfd8d2bcb34c6c7250588f413cc7744a29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// Use 1024 threads per block, which requires cuda sm_2x or above
const int CUDA_NUM_THREADS = 1024;
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
// Kernel for fast unfold+copy
// (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu)
__global__ void im2col_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize, const int pad,
const int stride, const int height_col, const int width_col,
float* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
index /= width_col;
int h_out = index % height_col;
int channel_in = index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
data_col += (channel_out * height_col + h_out) * width_col + w_out;
data_im += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im[i * width + j] : 0;
data_col += height_col * width_col;
}
}
}
}
void im2col(const float* data_im, const int channels,
const int height, const int width, const int ksize, const int pad,
const int stride, float* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
// Launch
hipLaunchKernelGGL(( im2col_kernel) , dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, ksize,
pad, stride,
height_col, width_col, data_col
);
}
__global__ void col2im_kernel(const int n, const float* data_col,
const int height, const int width, const int channels, const int ksize,
const int pad, const int stride, const int height_col, const int width_col,
float* data_im) {
CUDA_KERNEL_LOOP(index, n) {
float val = 0;
int w = index % width + pad;
int h = (index / width) % height + pad;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1;
int w_col_end = min(w / stride + 1, width_col);
int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1;
int h_col_end = min(h / stride + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * ksize * ksize + (h - h_col * stride) * ksize + (w - w_col * stride);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
int offset = (c * ksize * ksize + h * ksize + w) * height_col * width_col;
int coeff_h_col = (1 - stride * ksize * height_col) * width_col;
int coeff_w_col = (1 - stride * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
void col2im(const float* data_col, const int channels,
const int height, const int width, const int ksize, const int pad,
const int stride, float* data_im) {
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
hipLaunchKernelGGL(( col2im_kernel) , dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, ksize, pad, stride,
height_col, width_col, data_im
);
}
static int cunn_SpatialConvolutionMM_updateOutput(lua_State *L) {
// Input
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
// Params:
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
int padding = luaT_getfieldcheckint(L, 1, "padding");
THCudaTensor *weight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *bias = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "bias", "torch.CudaTensor");
THCudaTensor *columns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *ones = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "fgradInput", "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(input, 1, input->size[0], input->size[1], input->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padding - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padding - kH) / dH + 1;
luaL_argcheck(L, kW == kH, 1, "filters must be square (kW == kH)");
luaL_argcheck(L, dW == dH, 1, "stride must be square (dW == dH)");
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize4d(output, batchSize, nOutputPlane, outputHeight, outputWidth);
// Resize temporary columns
THCudaTensor_resize2d(columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(ones, outputHeight, outputWidth);
THCudaTensor_fill(ones, 1);
}
// Helpers
THCudaTensor *input_n = THCudaTensor_new();
THCudaTensor *output_n = THCudaTensor_new();
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(input_n, input, 0, elt);
THCudaTensor_select(output_n, output, 0, elt);
// Do Bias first:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long n_ = outputHeight * outputWidth;
long k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
hipblasSgemm(
't', 'n',
n_, m_, k_,
1,
THCudaTensor_data(ones), k_,
THCudaTensor_data(bias), k_,
0,
THCudaTensor_data(output_n), n_
);
// Extract columns:
im2col(
THCudaTensor_data(input_n),
nInputPlane, inputHeight, inputWidth, kW, padding, dW,
THCudaTensor_data(columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = weight->size[0];
long n = columns->size[1];
long k = weight->size[1];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
hipblasSgemm(
'n', 'n',
n, m, k,
1,
THCudaTensor_data(columns), n,
THCudaTensor_data(weight), k,
1,
THCudaTensor_data(output_n), n
);
THCublasCheck();
}
// Free
THCudaTensor_free(input_n);
THCudaTensor_free(output_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(output, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(input, nInputPlane, inputHeight, inputWidth);
}
// return output
return 1;
}
static int cunn_SpatialConvolutionMM_updateGradInput(lua_State *L) {
// Inputs
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
// Params
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
int padding = luaT_getfieldcheckint(L, 1, "padding");
THCudaTensor *weight = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *gradColumns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(input, 1, input->size[0], input->size[1], input->size[2]);
THCudaTensor_resize4d(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padding - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padding - kH) / dH + 1;
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize4d(gradInput, batchSize, nInputPlane, inputHeight, inputWidth);
// Resize temporary columns
THCudaTensor_resize2d(gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCudaTensor *input_n = THCudaTensor_new();
THCudaTensor *gradInput_n = THCudaTensor_new();
THCudaTensor *gradOutput_n = THCudaTensor_new();
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per sample:
THCudaTensor_select(input_n, input, 0, elt);
THCudaTensor_select(gradInput_n, gradInput, 0, elt);
THCudaTensor_select(gradOutput_n, gradOutput, 0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = weight->size[1];
long n = gradColumns->size[1];
long k = weight->size[0];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
hipblasSgemm(
'n', 't',
n, m, k,
1,
THCudaTensor_data(gradOutput_n), n,
THCudaTensor_data(weight), m,
0,
THCudaTensor_data(gradColumns), n
);
THCublasCheck();
// Unpack columns back into input:
col2im(
THCudaTensor_data(gradColumns),
nInputPlane, inputHeight, inputWidth, kW, padding, dW,
THCudaTensor_data(gradInput_n)
);
}
// Free
THCudaTensor_free(input_n);
THCudaTensor_free(gradInput_n);
THCudaTensor_free(gradOutput_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(gradOutput, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(input, nInputPlane, inputHeight, inputWidth);
THCudaTensor_resize3d(gradInput, nInputPlane, inputHeight, inputWidth);
}
// Return gradInput
return 1;
}
static int cunn_SpatialConvolutionMM_accGradParameters(lua_State *L) {
// Inputs
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
// Params
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
int padding = luaT_getfieldcheckint(L, 1, "padding");
float scale = luaL_optnumber(L, 4, 1);
THCudaTensor *gradWeight = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradWeight", "torch.CudaTensor");
THCudaTensor *gradBias = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradBias", "torch.CudaTensor");
THCudaTensor *columns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *ones = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "fgradInput", "torch.CudaTensor");
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(input, 1, input->size[0], input->size[1], input->size[2]);
THCudaTensor_resize4d(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padding - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padding - kH) / dH + 1;
luaL_argcheck(L, kW == kH, 1, "filters must be square (kW == kH)");
luaL_argcheck(L, dW == dH, 1, "stride must be square (dW == dH)");
// Batch size + input planes
long batchSize = input->size[0];
// Define a buffer of ones, for bias accumulation
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(ones, outputHeight, outputWidth);
THCudaTensor_fill(ones, 1);
}
// Resize temporary columns
THCudaTensor_resize2d(columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCudaTensor *input_n = THCudaTensor_new();
THCudaTensor *gradOutput_n = THCudaTensor_new();
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(input_n, input, 0, elt);
THCudaTensor_select(gradOutput_n, gradOutput, 0, elt);
// Extract columns:
im2col(
THCudaTensor_data(input_n),
nInputPlane, inputHeight, inputWidth, kW, padding, dW,
THCudaTensor_data(columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = gradWeight->size[0];
long n = gradWeight->size[1];
long k = columns->size[1];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
hipblasSgemm(
't', 'n',
n, m, k,
scale,
THCudaTensor_data(columns), k,
THCudaTensor_data(gradOutput_n), k,
1,
THCudaTensor_data(gradWeight), n
);
THCublasCheck();
// Do Bias:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long n_ = 1;
long k_ = outputHeight * outputWidth;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
hipblasSgemm(
'n', 'n',
n_, m_, k_,
scale,
THCudaTensor_data(ones), n_,
THCudaTensor_data(gradOutput_n), k_,
1,
THCudaTensor_data(gradBias), n_
);
}
// Free
THCudaTensor_free(input_n);
THCudaTensor_free(gradOutput_n);
// Resize
if (batch == 0) {
THCudaTensor_resize3d(gradOutput, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(input, nInputPlane, inputHeight, inputWidth);
}
// Return nothing
return 0;
}
static const struct luaL_Reg cunn_SpatialConvolutionMM__ [] = {
{"SpatialConvolutionMM_updateOutput", cunn_SpatialConvolutionMM_updateOutput},
{"SpatialConvolutionMM_updateGradInput", cunn_SpatialConvolutionMM_updateGradInput},
{"SpatialConvolutionMM_accGradParameters", cunn_SpatialConvolutionMM_accGradParameters},
{NULL, NULL}
};
static void cunn_SpatialConvolutionMM_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_SpatialConvolutionMM__, "nn");
lua_pop(L,1);
}
| 648a58cfd8d2bcb34c6c7250588f413cc7744a29.cu | // CUDA: grid stride looping
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
// Use 1024 threads per block, which requires cuda sm_2x or above
const int CUDA_NUM_THREADS = 1024;
// CUDA: number of blocks for threads.
inline int GET_BLOCKS(const int N) {
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
// Kernel for fast unfold+copy
// (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu)
__global__ void im2col_kernel(const int n, const float* data_im,
const int height, const int width, const int ksize, const int pad,
const int stride, const int height_col, const int width_col,
float* data_col) {
CUDA_KERNEL_LOOP(index, n) {
int w_out = index % width_col;
index /= width_col;
int h_out = index % height_col;
int channel_in = index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
data_col += (channel_out * height_col + h_out) * width_col + w_out;
data_im += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im[i * width + j] : 0;
data_col += height_col * width_col;
}
}
}
}
void im2col(const float* data_im, const int channels,
const int height, const int width, const int ksize, const int pad,
const int stride, float* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
// Launch
im2col_kernel <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>> (
num_kernels, data_im, height, width, ksize,
pad, stride,
height_col, width_col, data_col
);
}
__global__ void col2im_kernel(const int n, const float* data_col,
const int height, const int width, const int channels, const int ksize,
const int pad, const int stride, const int height_col, const int width_col,
float* data_im) {
CUDA_KERNEL_LOOP(index, n) {
float val = 0;
int w = index % width + pad;
int h = (index / width) % height + pad;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1;
int w_col_end = min(w / stride + 1, width_col);
int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1;
int h_col_end = min(h / stride + 1, height_col);
/*
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * ksize * ksize + (h - h_col * stride) * ksize + (w - w_col * stride);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
*/
// equivalent implementation
int offset = (c * ksize * ksize + h * ksize + w) * height_col * width_col;
int coeff_h_col = (1 - stride * ksize * height_col) * width_col;
int coeff_w_col = (1 - stride * height_col * width_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
}
}
data_im[index] = val;
}
}
void col2im(const float* data_col, const int channels,
const int height, const int width, const int ksize, const int pad,
const int stride, float* data_im) {
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im_kernel <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>> (
num_kernels, data_col, height, width, channels, ksize, pad, stride,
height_col, width_col, data_im
);
}
static int cunn_SpatialConvolutionMM_updateOutput(lua_State *L) {
// Input
THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor");
// Params:
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
int padding = luaT_getfieldcheckint(L, 1, "padding");
THCudaTensor *weight = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *bias = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "bias", "torch.CudaTensor");
THCudaTensor *columns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *ones = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "fgradInput", "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(input, 1, input->size[0], input->size[1], input->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padding - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padding - kH) / dH + 1;
luaL_argcheck(L, kW == kH, 1, "filters must be square (kW == kH)");
luaL_argcheck(L, dW == dH, 1, "stride must be square (dW == dH)");
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize4d(output, batchSize, nOutputPlane, outputHeight, outputWidth);
// Resize temporary columns
THCudaTensor_resize2d(columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets increased,
// and always contains ones.
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(ones, outputHeight, outputWidth);
THCudaTensor_fill(ones, 1);
}
// Helpers
THCudaTensor *input_n = THCudaTensor_new();
THCudaTensor *output_n = THCudaTensor_new();
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(input_n, input, 0, elt);
THCudaTensor_select(output_n, output, 0, elt);
// Do Bias first:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long n_ = outputHeight * outputWidth;
long k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
cublasSgemm(
't', 'n',
n_, m_, k_,
1,
THCudaTensor_data(ones), k_,
THCudaTensor_data(bias), k_,
0,
THCudaTensor_data(output_n), n_
);
// Extract columns:
im2col(
THCudaTensor_data(input_n),
nInputPlane, inputHeight, inputWidth, kW, padding, dW,
THCudaTensor_data(columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = weight->size[0];
long n = columns->size[1];
long k = weight->size[1];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
cublasSgemm(
'n', 'n',
n, m, k,
1,
THCudaTensor_data(columns), n,
THCudaTensor_data(weight), k,
1,
THCudaTensor_data(output_n), n
);
THCublasCheck();
}
// Free
THCudaTensor_free(input_n);
THCudaTensor_free(output_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(output, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(input, nInputPlane, inputHeight, inputWidth);
}
// return output
return 1;
}
static int cunn_SpatialConvolutionMM_updateGradInput(lua_State *L) {
// Inputs
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
// Params
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
int padding = luaT_getfieldcheckint(L, 1, "padding");
THCudaTensor *weight = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *gradColumns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(input, 1, input->size[0], input->size[1], input->size[2]);
THCudaTensor_resize4d(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padding - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padding - kH) / dH + 1;
// Batch size + input planes
long batchSize = input->size[0];
// Resize output
THCudaTensor_resize4d(gradInput, batchSize, nInputPlane, inputHeight, inputWidth);
// Resize temporary columns
THCudaTensor_resize2d(gradColumns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCudaTensor *input_n = THCudaTensor_new();
THCudaTensor *gradInput_n = THCudaTensor_new();
THCudaTensor *gradOutput_n = THCudaTensor_new();
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per sample:
THCudaTensor_select(input_n, input, 0, elt);
THCudaTensor_select(gradInput_n, gradInput, 0, elt);
THCudaTensor_select(gradOutput_n, gradOutput, 0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = weight->size[1];
long n = gradColumns->size[1];
long k = weight->size[0];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
cublasSgemm(
'n', 't',
n, m, k,
1,
THCudaTensor_data(gradOutput_n), n,
THCudaTensor_data(weight), m,
0,
THCudaTensor_data(gradColumns), n
);
THCublasCheck();
// Unpack columns back into input:
col2im(
THCudaTensor_data(gradColumns),
nInputPlane, inputHeight, inputWidth, kW, padding, dW,
THCudaTensor_data(gradInput_n)
);
}
// Free
THCudaTensor_free(input_n);
THCudaTensor_free(gradInput_n);
THCudaTensor_free(gradOutput_n);
// Resize output
if (batch == 0) {
THCudaTensor_resize3d(gradOutput, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(input, nInputPlane, inputHeight, inputWidth);
THCudaTensor_resize3d(gradInput, nInputPlane, inputHeight, inputWidth);
}
// Return gradInput
return 1;
}
static int cunn_SpatialConvolutionMM_accGradParameters(lua_State *L) {
// Inputs
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
// Params
int dW = luaT_getfieldcheckint(L, 1, "dW");
int dH = luaT_getfieldcheckint(L, 1, "dH");
int kW = luaT_getfieldcheckint(L, 1, "kW");
int kH = luaT_getfieldcheckint(L, 1, "kH");
int nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
int padding = luaT_getfieldcheckint(L, 1, "padding");
float scale = luaL_optnumber(L, 4, 1);
THCudaTensor *gradWeight = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradWeight", "torch.CudaTensor");
THCudaTensor *gradBias = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradBias", "torch.CudaTensor");
THCudaTensor *columns = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "finput", "torch.CudaTensor");
THCudaTensor *ones = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "fgradInput", "torch.CudaTensor");
luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch mode) tensor is expected");
int batch = 1;
if (input->nDimension == 3) {
// Force batch
batch = 0;
THCudaTensor_resize4d(input, 1, input->size[0], input->size[1], input->size[2]);
THCudaTensor_resize4d(gradOutput, 1, gradOutput->size[0], gradOutput->size[1], gradOutput->size[2]);
}
long inputWidth = input->size[3];
long inputHeight = input->size[2];
long outputWidth = (inputWidth + 2*padding - kW) / dW + 1;
long outputHeight = (inputHeight + 2*padding - kH) / dH + 1;
luaL_argcheck(L, kW == kH, 1, "filters must be square (kW == kH)");
luaL_argcheck(L, dW == dH, 1, "stride must be square (dW == dH)");
// Batch size + input planes
long batchSize = input->size[0];
// Define a buffer of ones, for bias accumulation
if (ones->nDimension != 2 || ones->size[0]*ones->size[1] < outputHeight*outputWidth) {
// Resize plane and fill with ones...
THCudaTensor_resize2d(ones, outputHeight, outputWidth);
THCudaTensor_fill(ones, 1);
}
// Resize temporary columns
THCudaTensor_resize2d(columns, nInputPlane*kW*kH, outputHeight*outputWidth);
// Helpers
THCudaTensor *input_n = THCudaTensor_new();
THCudaTensor *gradOutput_n = THCudaTensor_new();
// For each elt in batch, do:
for (int elt = 0; elt < batchSize; elt ++) {
// Matrix mulitply per output:
THCudaTensor_select(input_n, input, 0, elt);
THCudaTensor_select(gradOutput_n, gradOutput, 0, elt);
// Extract columns:
im2col(
THCudaTensor_data(input_n),
nInputPlane, inputHeight, inputWidth, kW, padding, dW,
THCudaTensor_data(columns)
);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m = gradWeight->size[0];
long n = gradWeight->size[1];
long k = columns->size[1];
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
cublasSgemm(
't', 'n',
n, m, k,
scale,
THCudaTensor_data(columns), k,
THCudaTensor_data(gradOutput_n), k,
1,
THCudaTensor_data(gradWeight), n
);
THCublasCheck();
// Do Bias:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
long m_ = nOutputPlane;
long n_ = 1;
long k_ = outputHeight * outputWidth;
// Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices)
cublasSgemm(
'n', 'n',
n_, m_, k_,
scale,
THCudaTensor_data(ones), n_,
THCudaTensor_data(gradOutput_n), k_,
1,
THCudaTensor_data(gradBias), n_
);
}
// Free
THCudaTensor_free(input_n);
THCudaTensor_free(gradOutput_n);
// Resize
if (batch == 0) {
THCudaTensor_resize3d(gradOutput, nOutputPlane, outputHeight, outputWidth);
THCudaTensor_resize3d(input, nInputPlane, inputHeight, inputWidth);
}
// Return nothing
return 0;
}
static const struct luaL_Reg cunn_SpatialConvolutionMM__ [] = {
{"SpatialConvolutionMM_updateOutput", cunn_SpatialConvolutionMM_updateOutput},
{"SpatialConvolutionMM_updateGradInput", cunn_SpatialConvolutionMM_updateGradInput},
{"SpatialConvolutionMM_accGradParameters", cunn_SpatialConvolutionMM_accGradParameters},
{NULL, NULL}
};
static void cunn_SpatialConvolutionMM_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_SpatialConvolutionMM__, "nn");
lua_pop(L,1);
}
|
5307647d0c81e4e276e5426ccf84c4826349bc93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define DATA_SIZE 10
__global__ void quicksort(int *data,int left,int right){
unsigned int thread_id=threadIdx.x
int pivot;
int min,max;
int i;
for(i=0;i<DATA_SIZE;i++){
}
}
__device__ void ave(int *data,int size){
}
__host__ void launch_quicksort(int *data,int count){
hipLaunchKernelGGL(( quicksort), dim3(1),dim3(1), 0, 0, data,0,count-1);
}
int main(int argc,char *argv[]){
int list[DATA_SIZE];
int *dev_c;
int i;
hipError_t err;
srand(1);
for(i=0;i<DATA_SIZE;i++)
list[i]=rand()%20;
err=hipMalloc((void **)&dev_c,sizeof(int)*DATA_SIZE);
if(err != hipSuccess){
printf("hipMalloc Error\n");
}
hipLaunchKernelGGL(( launch_quicksort), dim3(1),dim3(1), 0, 0, dev_c);
err=hipMemcpy(&dev_c,list,sizeof(int)*DATA_SIZE,hipMemcpyDeviceToHost);
if(err != hipSuccess){
printf("hipMalloc Error\n");
}
for(i=0;i<DATA_SIZE;i++)
printf("%d\n",list[i]);
return 0;
}
| 5307647d0c81e4e276e5426ccf84c4826349bc93.cu | #include <stdio.h>
#include <stdlib.h>
#define DATA_SIZE 10
__global__ void quicksort(int *data,int left,int right){
unsigned int thread_id=threadIdx.x
int pivot;
int min,max;
int i;
for(i=0;i<DATA_SIZE;i++){
}
}
__device__ void ave(int *data,int size){
}
__host__ void launch_quicksort(int *data,int count){
quicksort<<<1,1>>>(data,0,count-1);
}
int main(int argc,char *argv[]){
int list[DATA_SIZE];
int *dev_c;
int i;
cudaError_t err;
srand(1);
for(i=0;i<DATA_SIZE;i++)
list[i]=rand()%20;
err=cudaMalloc((void **)&dev_c,sizeof(int)*DATA_SIZE);
if(err != cudaSuccess){
printf("cudaMalloc Error\n");
}
launch_quicksort<<<1,1>>>(dev_c);
err=cudaMemcpy(&dev_c,list,sizeof(int)*DATA_SIZE,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
printf("cudaMalloc Error\n");
}
for(i=0;i<DATA_SIZE;i++)
printf("%d\n",list[i]);
return 0;
}
|
f9efa59e1efa9cd7f842162ea3766f9afcf7cee2.hip | // !!! This is a file automatically generated by hipify!!!
// workaround issue between gcc >= 4.7 and cuda 5.5
#if (defined __GNUC__) && (__GNUC__>4 || __GNUC_MINOR__>=7)
#undef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_USE_INT128
#endif
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "opencv2/core/cuda.hpp"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime_api.h"
#include <iostream>
#include <vector>
#include <fstream>
using namespace std;
#include <boost/timer.hpp>
// for sophus
#include <sophus/se3.hpp>
using Sophus::SE3d;
using Sophus::SE3f;
// for eigen
#include <Eigen/Core>
#include <Eigen/Geometry>
using namespace Eigen;
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "common.h"
#include "Timer.h"
using cv::cuda::GpuMat;
using cv::cuda::HostMem;
using cv::Mat;
using cv::cuda::PtrStepSz;
#include "generate_ply.h"
__device__ __forceinline__ EigenVector3 px2cam_gpu(const EigenVector2 px) {
return EigenVector3(
(px(0, 0) - cx) / fx,
(px(1, 0) - cy) / fy,
1
);
}
//
__device__ __forceinline__ EigenVector2 cam2px_gpu(const EigenVector3 p_cam) {
return EigenVector2(
p_cam(0, 0) * fx / p_cam(2, 0) + cx,
p_cam(1, 0) * fy / p_cam(2, 0) + cy
);
}
__device__ __forceinline__ bool inside_gpu(const EigenVector2 &pt) {
return pt(0, 0) >= boarder && pt(1, 0) >= boarder
&& pt(0, 0) + boarder < width && pt(1, 0) + boarder <= height;
}
__device__ __forceinline__ FLOAT_T NCC_gpu(const PtrStepSz<uint8_t> &ref, const PtrStepSz<uint8_t> &curr, const EigenVector2 &pt_ref, const EigenVector2 &pt_curr);
__device__ __forceinline__ FLOAT_T getBilinearInterpolatedValue_gpu(const PtrStepSz<uint8_t> &img, const EigenVector2 &pt) {
//uchar *d = &img.data[int(pt(1, 0)) * img.step + int(pt(0, 0))];
const uint8_t* pixel_ptr = &img(floor(pt(1, 0)),floor(pt(0, 0)));//usage of PtrStepSz.
const uint8_t* next_ptr = &img(floor(pt(1, 0)+1) , floor(pt(0, 0)) );
FLOAT_T xx = pt(0, 0) - floor(pt(0, 0));
FLOAT_T yy = pt(1, 0) - floor(pt(1, 0));
return ((1 - xx) * (1 - yy) * FLOAT_T(pixel_ptr[0]) +
xx * (1 - yy) * FLOAT_T(pixel_ptr[1]) +
(1 - xx) * yy * FLOAT_T(next_ptr[0]) +
xx * yy * FLOAT_T(next_ptr[1])) / 255.0;
}
__device__ bool epipolarSearch_gpu(
const GpuMat &ref,
const GpuMat &curr,
const SE3_T &T_C_R,
const EigenVector2 &pt_ref,
const FLOAT_T &depth_mu,
const FLOAT_T &depth_cov,
EigenVector2 &pt_curr,
EigenVector2 &epipolar_direction
);
/**
*
* @param pt_ref
* @param pt_curr
* @param T_C_R
* @param epipolar_direction
* @param depth
* @param depth_cov2
* @return
*/
__device__ bool updateDepthFilter_gpu(
const EigenVector2 &pt_ref,
const EigenVector2 &pt_curr,
const SE3_T &T_C_R,
const EigenVector2 &epipolar_direction,
PtrStepSz<FLOAT_T> &depth,
PtrStepSz<FLOAT_T> &depth_cov2
);
__device__ FLOAT_T NCC_gpu(
const PtrStepSz<uint8_t> &ref, const PtrStepSz<uint8_t> &curr,
const EigenVector2 &pt_ref, const EigenVector2 &pt_curr) {
// -
//
FLOAT_T mean_ref = 0, mean_curr = 0;
//thrust::device_vector<double> values_ref(2*ncc_window_size+1),values_curr(2*ncc_window_size+1);//vector<double> values_ref, values_curr; //
const int TotalSize = (2*ncc_window_size+1)*(2*ncc_window_size+1);
FLOAT_T values_ref[TotalSize];
FLOAT_T values_curr[TotalSize];
int index = 0;
for (int x = -ncc_window_size; x <= ncc_window_size; x++)
{
for (int y = -ncc_window_size; y <= ncc_window_size; y++)
{
uint8_t pixel_val = ref((int)(pt_ref(1, 0)+y),(int)(pt_ref(0, 0)+x));//method to get val directly by PtrStepSz.
FLOAT_T value_ref = FLOAT_T(pixel_val)/255.0;
//double value_ref = double(ref.ptr<uchar>(int(y + pt_ref(1, 0)))[int(x + pt_ref(0, 0))]) / 255.0;
mean_ref += value_ref;
FLOAT_T value_curr = getBilinearInterpolatedValue_gpu(curr, pt_curr + EigenVector2(x, y));
mean_curr += value_curr;
//values_ref.push_back(value_ref);
//values_curr.push_back(value_curr);
values_ref[index] = value_ref;
values_curr[index] = value_curr;
index++;
}
}
mean_ref /= ncc_area;
mean_curr /= ncc_area;
// Zero mean NCC
FLOAT_T numerator = 0, demoniator1 = 0, demoniator2 = 0;
for (int i = 0; i < TotalSize; i++) {
FLOAT_T n = (values_ref[i] - mean_ref) * (values_curr[i] - mean_curr);
numerator += n;
demoniator1 += (values_ref[i] - mean_ref) * (values_ref[i] - mean_ref);
demoniator2 += (values_curr[i] - mean_curr) * (values_curr[i] - mean_curr);
}
return numerator / sqrt(demoniator1 * demoniator2 + 1e-10); //
}
__device__ bool epipolarSearch_gpu(
const PtrStepSz<uint8_t> &ref, const PtrStepSz<uint8_t> &curr,
const SE3_T &T_C_R, const EigenVector2 &pt_ref,
const FLOAT_T &depth_mu, const FLOAT_T &depth_cov,
EigenVector2 &pt_curr, EigenVector2 &epipolar_direction,PtrStepSz<FLOAT_T> debug_mat) {
EigenVector3 f_ref = px2cam_gpu(pt_ref);
f_ref.normalize();
EigenVector3 P_ref = f_ref * depth_mu; // P
EigenVector2 px_mean_curr = cam2px_gpu(T_C_R * P_ref); //
FLOAT_T d_min = depth_mu - 3 * depth_cov, d_max = depth_mu + 3 * depth_cov;
if (d_min < 0.1) d_min = 0.1;
EigenVector2 px_min_curr = cam2px_gpu(T_C_R * (f_ref * d_min)); //
EigenVector2 px_max_curr = cam2px_gpu(T_C_R * (f_ref * d_max)); //
EigenVector2 epipolar_line = px_max_curr - px_min_curr; //
epipolar_direction = epipolar_line; //
epipolar_direction.normalize();
FLOAT_T half_length = 0.5 * epipolar_line.norm(); //
if (half_length > 100) half_length = 100; //
//
// showEpipolarLine( ref, curr, pt_ref, px_min_curr, px_max_curr );
//
FLOAT_T best_ncc = -1.0;
EigenVector2 best_px_curr;
for (FLOAT_T l = -half_length; l <= half_length; l += 0.7) { // l+=sqrt(2)
EigenVector2 px_curr = px_mean_curr + l * epipolar_direction; //
if (!inside_gpu(px_curr))
{
continue;
}
// NCC
FLOAT_T ncc = NCC_gpu(ref, curr, pt_ref, px_curr);
if (ncc > best_ncc)
{
best_ncc = ncc;
best_px_curr = px_curr;
}
}
debug_mat((int)pt_ref(1,0),(int)pt_ref(0,0)) = best_ncc;
if (best_ncc < 0.90f) // NCC //0.85
{
return false;
}
pt_curr = best_px_curr;
return true;
}
__device__ bool updateDepthFilter_gpu(
const EigenVector2 &pt_ref,
const EigenVector2 &pt_curr,
const SE3_T &T_C_R,
const EigenVector2 &epipolar_direction,
PtrStepSz<FLOAT_T> &depth,
PtrStepSz<FLOAT_T> &depth_cov2
)
{
//
//
SE3_T T_R_C = T_C_R.inverse();
EigenVector3 f_ref = px2cam_gpu(pt_ref);
f_ref.normalize();
EigenVector3 f_curr = px2cam_gpu(pt_curr);
f_curr.normalize();
//
// d_ref * f_ref = d_cur * ( R_RC * f_cur ) + t_RC
// f2 = R_RC * f_cur
//
// => [ f_ref^T f_ref, -f_ref^T f2 ] [d_ref] [f_ref^T t]
// [ f_2^T f_ref, -f2^T f2 ] [d_cur] = [f2^T t ]
EigenVector3 t = T_R_C.translation();
EigenVector3 f2 = T_R_C.so3() * f_curr;
EigenVector2 b = EigenVector2(t.dot(f_ref), t.dot(f2));
EigenMatrix2 A;
A(0, 0) = f_ref.dot(f_ref);
A(0, 1) = -f_ref.dot(f2);
A(1, 0) = -A(0, 1);
A(1, 1) = -f2.dot(f2);
EigenMatrix2 A_inverse;
A_inverse(0,0) = A(1,1);
A_inverse(0,1) = -A(0,1);
A_inverse(1,0) = -A(1,0);
A_inverse(1,1) = A(0,0);
A_inverse*= 1.0/(A(1,1)*A(0,0)-A(0,1)*A(1,0));
//Vector2d ans = A.inverse() * b; //manually solve equation.
EigenVector2 ans = A_inverse * b;
EigenVector3 xm = ans[0] * f_ref; // ref
EigenVector3 xn = t + ans[1] * f2; // cur
EigenVector3 p_esti = (xm + xn) / 2.0; // P
FLOAT_T depth_estimation = p_esti.norm(); //
//
EigenVector3 p = f_ref * depth_estimation;
EigenVector3 a = p - t;
FLOAT_T t_norm = t.norm();
FLOAT_T a_norm = a.norm();
FLOAT_T alpha = acos(f_ref.dot(t) / t_norm);
FLOAT_T beta = acos(-a.dot(t) / (a_norm * t_norm));
EigenVector3 f_curr_prime = px2cam_gpu(pt_curr + epipolar_direction);
f_curr_prime.normalize();
FLOAT_T beta_prime = acos(f_curr_prime.dot(-t) / t_norm);
FLOAT_T gamma = M_PI - alpha - beta_prime;
FLOAT_T p_prime = t_norm * sin(beta_prime) / sin(gamma);
FLOAT_T d_cov = p_prime - depth_estimation;
FLOAT_T d_cov2 = d_cov * d_cov;
//
FLOAT_T mu = depth((int)pt_ref(1, 0),(int)pt_ref(0, 0));
FLOAT_T sigma2 = depth_cov2((int)pt_ref(1, 0),(int)pt_ref(0, 0));
FLOAT_T mu_fuse = (d_cov2 * mu + sigma2 * depth_estimation) / (sigma2 + d_cov2);
FLOAT_T sigma_fuse2 = (sigma2 * d_cov2) / (sigma2 + d_cov2);
depth((int)pt_ref(1, 0),(int)pt_ref(0, 0)) = mu_fuse;
depth_cov2((int)pt_ref(1, 0),(int)pt_ref(0, 0)) = sigma_fuse2;
return true;
}
__global__ void update_kernel(PtrStepSz<uint8_t> ref, PtrStepSz<uint8_t> curr, SE3_T T_C_R, PtrStepSz<FLOAT_T> depth, PtrStepSz<FLOAT_T> depth_cov2,
PtrStepSz<FLOAT_T> debug_mat)
//Here we cannot use any const ref.Just PtrStepSz<Type>.
{
int y = threadIdx.x+blockIdx.x*blockDim.x;
int x = threadIdx.y+blockIdx.y*blockDim.y;
// if(x == 0&&y == 0)
// {
// printf("grid_size:%d,%d\n",gridDim.x,gridDim.y);
// }
if((x >= boarder&& x < width - boarder) && (y>=boarder&&y<height-boarder))
{
//
if (depth_cov2(y,x) < min_cov || depth_cov2(y,x) > max_cov) //
{
//return;
goto return_to_cpu;
}
// (x,y)
EigenVector2 pt_curr;
EigenVector2 epipolar_direction;
bool ret = epipolarSearch_gpu(
ref,
curr,
T_C_R,
EigenVector2(x, y),
depth(y,x),
sqrt(depth_cov2(y,x)),
pt_curr,
epipolar_direction,debug_mat
);
//__syncthreads();
if (ret == true) //
{
//
//showEpipolarMatch(ref, curr, EigenVector2(x, y), pt_curr);
//debug_mat(y,x) = 255;
//
updateDepthFilter_gpu(EigenVector2(x, y), pt_curr, T_C_R, epipolar_direction, depth, depth_cov2);
}
//__syncthreads();
}
else
{
;
}
return_to_cpu:
__syncthreads();
return;
}
void initGpuMats(const Mat& ref)
{
// HostMem ref_gpu_host(HostMem::SHARED),curr_gpu_host(HostMem::SHARED),depth_gpu_host(HostMem::SHARED),depth_cov2_gpu_host(HostMem::SHARED),debug_gpu_host(HostMem::SHARED);
// ref_gpu_host.create(ref.rows,ref.cols,CV_8U);
// ref_gpu=ref_gpu_host.createGpuMatHeader();
// curr_gpu_host.create(ref.rows,ref.cols,CV_8U);
// curr_gpu=curr_gpu_host.createGpuMatHeader();
// depth_gpu_host.create(ref.rows,ref.cols,CV_FLOAT_TYPE);
// depth_gpu=depth_gpu_host.createGpuMatHeader();
// depth_cov2_gpu_host.create(ref.rows,ref.cols,CV_FLOAT_TYPE);
// depth_cov2_gpu=depth_cov2_gpu_host.createGpuMatHeader();//make these gpumats static.
// debug_gpu_host.create(ref.rows,ref.cols,CV_FLOAT_TYPE);
// debug_mat_gpu=debug_gpu_host.createGpuMatHeader();
}
void update_kernel_wrapper_cpu( Mat& ref,Mat& curr,SE3_T T_C_R,Mat& depth,Mat& depth_cov2)
{
GpuMat ref_gpu,curr_gpu,depth_gpu,depth_cov2_gpu;//make these gpumats static.
GpuMat debug_mat_gpu;
ScopeTimer t_kernel("kernel");
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
cv::Mat debug_mat(ref.rows,ref.cols,CV_FLOAT_TYPE,1);
cv::cuda::registerPageLocked(ref);
cv::cuda::registerPageLocked(curr);
cv::cuda::registerPageLocked(depth);
cv::cuda::registerPageLocked(depth_cov2);
cv::cuda::registerPageLocked(debug_mat);
t_kernel.watch("page_locked");
ref_gpu.upload(ref);
curr_gpu.upload(curr);
depth_gpu.upload(depth);
depth_cov2_gpu.upload(depth_cov2);
debug_mat_gpu.upload(debug_mat);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
float timeCost_cuda;
hipEventElapsedTime(&timeCost_cuda,start,stop);
cout<<"time to upload:"<<timeCost_cuda<<endl;
hipEventDestroy(start);
hipEventDestroy(stop);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
t_kernel.watch("uploaded");
const int MAX_THREAD_SQRT = 16;
dim3 threads(MAX_THREAD_SQRT, MAX_THREAD_SQRT);
dim3 grids((ref.rows + MAX_THREAD_SQRT - 1)/MAX_THREAD_SQRT, (ref.cols + MAX_THREAD_SQRT - 1)/ MAX_THREAD_SQRT);
hipLaunchKernelGGL(( update_kernel), dim3(grids), dim3(threads), 0, 0, ref_gpu,curr_gpu,T_C_R,depth_gpu,depth_cov2_gpu,debug_mat_gpu);
t_kernel.watch("kernel func finished");
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeCost_cuda,start,stop);
cout<<"time to call kernel:"<<timeCost_cuda<<endl;
depth_gpu.download(depth);
t_kernel.watch("downloaded depth");
depth_cov2_gpu.download(depth_cov2);
t_kernel.watch("downloaded depth cov2");
debug_mat_gpu.download(debug_mat);
t_kernel.watch("downloaded debug mat");
cv::imshow("debug_ncc_val",debug_mat);
cv::waitKey(1);
cv::cuda::unregisterPageLocked(ref);
cv::cuda::unregisterPageLocked(curr);
cv::cuda::unregisterPageLocked(depth);
cv::cuda::unregisterPageLocked(depth_cov2);
cv::cuda::unregisterPageLocked(debug_mat);
t_kernel.watch("unregistered");
// ref_gpu.release();
// curr_gpu.release();
// depth_gpu.release();
// depth_cov2_gpu.release();
// debug_mat_gpu.release();
}
//#include "happly/happly.h"
//typedef std::array<double,3> P3d_PLY_T;
//inline void getXYZbyUVDepth(//const double& fx,const double& fy,const double& cx,const double& cy,
// const double &depth,
// const double& u,const double& v,
// double& x,double& y,double& z)
//{
// Eigen::Vector3d v_((x-cx)/fx,(y-cy)/fy,1);
// v_.normalize();
// v_*=depth;
// x = v_[0];
// y = v_[1];
// z = v_[2];
//}
//void dump_ply(const Mat& depth_estimate,const Mat& depth_cov2,const Mat& original_img,std::string ply_output_path)
//{
// //write a PLY.
// vector<P3d_PLY_T> vVertices;
// for (int v = 0; v < original_img.rows; v++)
// {
// for (int u = 0; u < original_img.cols; u++)
// {
// if(depth_cov2.at<FLOAT_T>(v,u) <= min_cov*15)//.
// {
// P3d_PLY_T pt;
// double x_3d,y_3d,z_3d,depth_3d;
// depth_3d = depth_estimate.at<FLOAT_T>(v,u);
// if(z_3d >1000||z_3d<0)
// {
// continue;
// }
// getXYZbyUVDepth(depth_3d,
// u,v,
// x_3d,y_3d,z_3d);
// if(isnan(x_3d)||isnan(y_3d)||isnan(z_3d) ||
// isinf(x_3d)||isinf(y_3d)||isinf(z_3d) )
// {
// continue;
// }
// pt[0] = x_3d*100;pt[1] = y_3d*100;pt[2] = z_3d*100;
// vVertices.push_back(pt);
// cout<<"uv:"<<u<<","<<v<<endl;
// }
// }
// }
// happly::PLYData plyOut;
// // Add mesh data (elements are created automatically)
// //LOG(INFO)<<"Writing ply with "<<vVertices.size()<<"vertices and "<<vTriangleIDs.size()<<"triangles."<<endl;
// plyOut.addVertexPositions(vVertices);
// //plyOut.addVertexColors(vVertexColors);
// //plyOut.addFaceIndices(vTriangleIDs);
// // Write the object to file
// plyOut.write("mesh.ply", happly::DataFormat::ASCII);
// //pcl::io::savePLYFile("mesh.ply",output_cloud);
//}
int main(int argc, char **argv) {
if (argc != 2) {
cout << "Usage: dense_mapping path_to_test_dataset" << endl;
return -1;
}
hipSetDevice(0);
hipProfilerStart();
//
vector<string> color_image_files;
vector<SE3_T> poses_TWC;
Mat ref_depth;
//bool ret = readDatasetFiles(argv[1], color_image_files, poses_TWC, ref_depth);
bool ret = readGeneratedDatasetFiles(argv[1], color_image_files, poses_TWC, ref_depth);
if (ret == false) {
cout << "Reading image files failed!" << endl;
return -1;
}
cout << "read total " << color_image_files.size() << " files." << endl;
//
Mat ref = cv::imread(color_image_files[0], 0); // gray-scale image
SE3_T pose_ref_TWC = poses_TWC[0];
FLOAT_T init_depth = 3.0; //
FLOAT_T init_cov2 = 3.0; //
Mat depth(height, width, CV_FLOAT_TYPE, init_depth); //
Mat depth_cov2(height, width, CV_FLOAT_TYPE, init_cov2); //
initGpuMats(depth);
//for (int index = 1; index < color_image_files.size(); index++) {
for (int index = 1; index < 50; index++) {
cout << "*** loop " << index << " ***" << endl;
Mat curr = cv::imread(color_image_files[index], 0);
if (curr.data == nullptr) continue;
SE3_T pose_curr_TWC = poses_TWC[index];
SE3_T pose_T_C_R = pose_curr_TWC.inverse() * pose_ref_TWC; // T_C_W * T_W_R = T_C_R
update_kernel_wrapper_cpu(ref, curr, pose_T_C_R, depth, depth_cov2);
//evaludateDepth(ref_depth, depth);
plotDepth(ref_depth, depth);
imshow("image", curr);
waitKey(100);
}
cout << "estimation returns, saving depth map ..." << endl;
imwrite("depth.png", depth);
imwrite("depth_cov2.png",depth_cov2);
dump_ply(depth,depth_cov2,ref,"mesh.ply");
cout << "done." << endl;
hipProfilerStop();
return 0;
}
| f9efa59e1efa9cd7f842162ea3766f9afcf7cee2.cu | // workaround issue between gcc >= 4.7 and cuda 5.5
#if (defined __GNUC__) && (__GNUC__>4 || __GNUC_MINOR__>=7)
#undef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_USE_INT128
#endif
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include "opencv2/core/cuda.hpp"
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda_profiler_api.h"
#include <iostream>
#include <vector>
#include <fstream>
using namespace std;
#include <boost/timer.hpp>
// for sophus
#include <sophus/se3.hpp>
using Sophus::SE3d;
using Sophus::SE3f;
// for eigen
#include <Eigen/Core>
#include <Eigen/Geometry>
using namespace Eigen;
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "common.h"
#include "Timer.h"
using cv::cuda::GpuMat;
using cv::cuda::HostMem;
using cv::Mat;
using cv::cuda::PtrStepSz;
#include "generate_ply.h"
__device__ __forceinline__ EigenVector3 px2cam_gpu(const EigenVector2 px) {
return EigenVector3(
(px(0, 0) - cx) / fx,
(px(1, 0) - cy) / fy,
1
);
}
// 相机坐标系到像素
__device__ __forceinline__ EigenVector2 cam2px_gpu(const EigenVector3 p_cam) {
return EigenVector2(
p_cam(0, 0) * fx / p_cam(2, 0) + cx,
p_cam(1, 0) * fy / p_cam(2, 0) + cy
);
}
__device__ __forceinline__ bool inside_gpu(const EigenVector2 &pt) {
return pt(0, 0) >= boarder && pt(1, 0) >= boarder
&& pt(0, 0) + boarder < width && pt(1, 0) + boarder <= height;
}
__device__ __forceinline__ FLOAT_T NCC_gpu(const PtrStepSz<uint8_t> &ref, const PtrStepSz<uint8_t> &curr, const EigenVector2 &pt_ref, const EigenVector2 &pt_curr);
__device__ __forceinline__ FLOAT_T getBilinearInterpolatedValue_gpu(const PtrStepSz<uint8_t> &img, const EigenVector2 &pt) {
//uchar *d = &img.data[int(pt(1, 0)) * img.step + int(pt(0, 0))];
const uint8_t* pixel_ptr = &img(floor(pt(1, 0)),floor(pt(0, 0)));//usage of PtrStepSz.
const uint8_t* next_ptr = &img(floor(pt(1, 0)+1) , floor(pt(0, 0)) );
FLOAT_T xx = pt(0, 0) - floor(pt(0, 0));
FLOAT_T yy = pt(1, 0) - floor(pt(1, 0));
return ((1 - xx) * (1 - yy) * FLOAT_T(pixel_ptr[0]) +
xx * (1 - yy) * FLOAT_T(pixel_ptr[1]) +
(1 - xx) * yy * FLOAT_T(next_ptr[0]) +
xx * yy * FLOAT_T(next_ptr[1])) / 255.0;
}
__device__ bool epipolarSearch_gpu(
const GpuMat &ref,
const GpuMat &curr,
const SE3_T &T_C_R,
const EigenVector2 &pt_ref,
const FLOAT_T &depth_mu,
const FLOAT_T &depth_cov,
EigenVector2 &pt_curr,
EigenVector2 &epipolar_direction
);
/**
* 更新深度滤波器
* @param pt_ref 参考图像点
* @param pt_curr 当前图像点
* @param T_C_R 位姿
* @param epipolar_direction 极线方向
* @param depth 深度均值
* @param depth_cov2 深度方向
* @return 是否成功
*/
__device__ bool updateDepthFilter_gpu(
const EigenVector2 &pt_ref,
const EigenVector2 &pt_curr,
const SE3_T &T_C_R,
const EigenVector2 &epipolar_direction,
PtrStepSz<FLOAT_T> &depth,
PtrStepSz<FLOAT_T> &depth_cov2
);
__device__ FLOAT_T NCC_gpu(
const PtrStepSz<uint8_t> &ref, const PtrStepSz<uint8_t> &curr,
const EigenVector2 &pt_ref, const EigenVector2 &pt_curr) {
// 零均值-归一化互相关
// 先算均值
FLOAT_T mean_ref = 0, mean_curr = 0;
//thrust::device_vector<double> values_ref(2*ncc_window_size+1),values_curr(2*ncc_window_size+1);//vector<double> values_ref, values_curr; // 参考帧和当前帧的均值
const int TotalSize = (2*ncc_window_size+1)*(2*ncc_window_size+1);
FLOAT_T values_ref[TotalSize];
FLOAT_T values_curr[TotalSize];
int index = 0;
for (int x = -ncc_window_size; x <= ncc_window_size; x++)
{
for (int y = -ncc_window_size; y <= ncc_window_size; y++)
{
uint8_t pixel_val = ref((int)(pt_ref(1, 0)+y),(int)(pt_ref(0, 0)+x));//method to get val directly by PtrStepSz.
FLOAT_T value_ref = FLOAT_T(pixel_val)/255.0;
//double value_ref = double(ref.ptr<uchar>(int(y + pt_ref(1, 0)))[int(x + pt_ref(0, 0))]) / 255.0;
mean_ref += value_ref;
FLOAT_T value_curr = getBilinearInterpolatedValue_gpu(curr, pt_curr + EigenVector2(x, y));
mean_curr += value_curr;
//values_ref.push_back(value_ref);
//values_curr.push_back(value_curr);
values_ref[index] = value_ref;
values_curr[index] = value_curr;
index++;
}
}
mean_ref /= ncc_area;
mean_curr /= ncc_area;
// 计算 Zero mean NCC
FLOAT_T numerator = 0, demoniator1 = 0, demoniator2 = 0;
for (int i = 0; i < TotalSize; i++) {
FLOAT_T n = (values_ref[i] - mean_ref) * (values_curr[i] - mean_curr);
numerator += n;
demoniator1 += (values_ref[i] - mean_ref) * (values_ref[i] - mean_ref);
demoniator2 += (values_curr[i] - mean_curr) * (values_curr[i] - mean_curr);
}
return numerator / sqrt(demoniator1 * demoniator2 + 1e-10); // 防止分母出现零
}
__device__ bool epipolarSearch_gpu(
const PtrStepSz<uint8_t> &ref, const PtrStepSz<uint8_t> &curr,
const SE3_T &T_C_R, const EigenVector2 &pt_ref,
const FLOAT_T &depth_mu, const FLOAT_T &depth_cov,
EigenVector2 &pt_curr, EigenVector2 &epipolar_direction,PtrStepSz<FLOAT_T> debug_mat) {
EigenVector3 f_ref = px2cam_gpu(pt_ref);
f_ref.normalize();
EigenVector3 P_ref = f_ref * depth_mu; // 参考帧的 P 向量
EigenVector2 px_mean_curr = cam2px_gpu(T_C_R * P_ref); // 按深度均值投影的像素
FLOAT_T d_min = depth_mu - 3 * depth_cov, d_max = depth_mu + 3 * depth_cov;
if (d_min < 0.1) d_min = 0.1;
EigenVector2 px_min_curr = cam2px_gpu(T_C_R * (f_ref * d_min)); // 按最小深度投影的像素
EigenVector2 px_max_curr = cam2px_gpu(T_C_R * (f_ref * d_max)); // 按最大深度投影的像素
EigenVector2 epipolar_line = px_max_curr - px_min_curr; // 极线(线段形式)
epipolar_direction = epipolar_line; // 极线方向
epipolar_direction.normalize();
FLOAT_T half_length = 0.5 * epipolar_line.norm(); // 极线线段的半长度
if (half_length > 100) half_length = 100; // 我们不希望搜索太多东西
// 取消此句注释以显示极线(线段)
// showEpipolarLine( ref, curr, pt_ref, px_min_curr, px_max_curr );
// 在极线上搜索,以深度均值点为中心,左右各取半长度
FLOAT_T best_ncc = -1.0;
EigenVector2 best_px_curr;
for (FLOAT_T l = -half_length; l <= half_length; l += 0.7) { // l+=sqrt(2)
EigenVector2 px_curr = px_mean_curr + l * epipolar_direction; // 待匹配点
if (!inside_gpu(px_curr))
{
continue;
}
// 计算待匹配点与参考帧的 NCC
FLOAT_T ncc = NCC_gpu(ref, curr, pt_ref, px_curr);
if (ncc > best_ncc)
{
best_ncc = ncc;
best_px_curr = px_curr;
}
}
debug_mat((int)pt_ref(1,0),(int)pt_ref(0,0)) = best_ncc;
if (best_ncc < 0.90f) // 只相信 NCC 很高的匹配 //0.85
{
return false;
}
pt_curr = best_px_curr;
return true;
}
__device__ bool updateDepthFilter_gpu(
const EigenVector2 &pt_ref,
const EigenVector2 &pt_curr,
const SE3_T &T_C_R,
const EigenVector2 &epipolar_direction,
PtrStepSz<FLOAT_T> &depth,
PtrStepSz<FLOAT_T> &depth_cov2
)
{
// 不知道这段还有没有人看
// 用三角化计算深度
SE3_T T_R_C = T_C_R.inverse();
EigenVector3 f_ref = px2cam_gpu(pt_ref);
f_ref.normalize();
EigenVector3 f_curr = px2cam_gpu(pt_curr);
f_curr.normalize();
// 方程
// d_ref * f_ref = d_cur * ( R_RC * f_cur ) + t_RC
// f2 = R_RC * f_cur
// 转化成下面这个矩阵方程组
// => [ f_ref^T f_ref, -f_ref^T f2 ] [d_ref] [f_ref^T t]
// [ f_2^T f_ref, -f2^T f2 ] [d_cur] = [f2^T t ]
EigenVector3 t = T_R_C.translation();
EigenVector3 f2 = T_R_C.so3() * f_curr;
EigenVector2 b = EigenVector2(t.dot(f_ref), t.dot(f2));
EigenMatrix2 A;
A(0, 0) = f_ref.dot(f_ref);
A(0, 1) = -f_ref.dot(f2);
A(1, 0) = -A(0, 1);
A(1, 1) = -f2.dot(f2);
EigenMatrix2 A_inverse;
A_inverse(0,0) = A(1,1);
A_inverse(0,1) = -A(0,1);
A_inverse(1,0) = -A(1,0);
A_inverse(1,1) = A(0,0);
A_inverse*= 1.0/(A(1,1)*A(0,0)-A(0,1)*A(1,0));
//Vector2d ans = A.inverse() * b; //manually solve equation.
EigenVector2 ans = A_inverse * b;
EigenVector3 xm = ans[0] * f_ref; // ref 侧的结果
EigenVector3 xn = t + ans[1] * f2; // cur 结果
EigenVector3 p_esti = (xm + xn) / 2.0; // P的位置,取两者的平均
FLOAT_T depth_estimation = p_esti.norm(); // 深度值
// 计算不确定性(以一个像素为误差)
EigenVector3 p = f_ref * depth_estimation;
EigenVector3 a = p - t;
FLOAT_T t_norm = t.norm();
FLOAT_T a_norm = a.norm();
FLOAT_T alpha = acos(f_ref.dot(t) / t_norm);
FLOAT_T beta = acos(-a.dot(t) / (a_norm * t_norm));
EigenVector3 f_curr_prime = px2cam_gpu(pt_curr + epipolar_direction);
f_curr_prime.normalize();
FLOAT_T beta_prime = acos(f_curr_prime.dot(-t) / t_norm);
FLOAT_T gamma = M_PI - alpha - beta_prime;
FLOAT_T p_prime = t_norm * sin(beta_prime) / sin(gamma);
FLOAT_T d_cov = p_prime - depth_estimation;
FLOAT_T d_cov2 = d_cov * d_cov;
// 高斯融合
FLOAT_T mu = depth((int)pt_ref(1, 0),(int)pt_ref(0, 0));
FLOAT_T sigma2 = depth_cov2((int)pt_ref(1, 0),(int)pt_ref(0, 0));
FLOAT_T mu_fuse = (d_cov2 * mu + sigma2 * depth_estimation) / (sigma2 + d_cov2);
FLOAT_T sigma_fuse2 = (sigma2 * d_cov2) / (sigma2 + d_cov2);
depth((int)pt_ref(1, 0),(int)pt_ref(0, 0)) = mu_fuse;
depth_cov2((int)pt_ref(1, 0),(int)pt_ref(0, 0)) = sigma_fuse2;
return true;
}
__global__ void update_kernel(PtrStepSz<uint8_t> ref, PtrStepSz<uint8_t> curr, SE3_T T_C_R, PtrStepSz<FLOAT_T> depth, PtrStepSz<FLOAT_T> depth_cov2,
PtrStepSz<FLOAT_T> debug_mat)
//Here we cannot use any const ref.Just PtrStepSz<Type>.
{
int y = threadIdx.x+blockIdx.x*blockDim.x;
int x = threadIdx.y+blockIdx.y*blockDim.y;
// if(x == 0&&y == 0)
// {
// printf("grid_size:%d,%d\n",gridDim.x,gridDim.y);
// }
if((x >= boarder&& x < width - boarder) && (y>=boarder&&y<height-boarder))
{
// 遍历每个像素
if (depth_cov2(y,x) < min_cov || depth_cov2(y,x) > max_cov) // 深度已收敛或发散
{
//return;
goto return_to_cpu;
}
// 在极线上搜索 (x,y) 的匹配
EigenVector2 pt_curr;
EigenVector2 epipolar_direction;
bool ret = epipolarSearch_gpu(
ref,
curr,
T_C_R,
EigenVector2(x, y),
depth(y,x),
sqrt(depth_cov2(y,x)),
pt_curr,
epipolar_direction,debug_mat
);
//__syncthreads();
if (ret == true) // 匹配失败
{
// 取消该注释以显示匹配
//showEpipolarMatch(ref, curr, EigenVector2(x, y), pt_curr);
//debug_mat(y,x) = 255;
// 匹配成功,更新深度图
updateDepthFilter_gpu(EigenVector2(x, y), pt_curr, T_C_R, epipolar_direction, depth, depth_cov2);
}
//__syncthreads();
}
else
{
;
}
return_to_cpu:
__syncthreads();
return;
}
void initGpuMats(const Mat& ref)
{
// HostMem ref_gpu_host(HostMem::SHARED),curr_gpu_host(HostMem::SHARED),depth_gpu_host(HostMem::SHARED),depth_cov2_gpu_host(HostMem::SHARED),debug_gpu_host(HostMem::SHARED);
// ref_gpu_host.create(ref.rows,ref.cols,CV_8U);
// ref_gpu=ref_gpu_host.createGpuMatHeader();
// curr_gpu_host.create(ref.rows,ref.cols,CV_8U);
// curr_gpu=curr_gpu_host.createGpuMatHeader();
// depth_gpu_host.create(ref.rows,ref.cols,CV_FLOAT_TYPE);
// depth_gpu=depth_gpu_host.createGpuMatHeader();
// depth_cov2_gpu_host.create(ref.rows,ref.cols,CV_FLOAT_TYPE);
// depth_cov2_gpu=depth_cov2_gpu_host.createGpuMatHeader();//make these gpumats static.
// debug_gpu_host.create(ref.rows,ref.cols,CV_FLOAT_TYPE);
// debug_mat_gpu=debug_gpu_host.createGpuMatHeader();
}
void update_kernel_wrapper_cpu( Mat& ref,Mat& curr,SE3_T T_C_R,Mat& depth,Mat& depth_cov2)
{
GpuMat ref_gpu,curr_gpu,depth_gpu,depth_cov2_gpu;//make these gpumats static.
GpuMat debug_mat_gpu;
ScopeTimer t_kernel("kernel");
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
cv::Mat debug_mat(ref.rows,ref.cols,CV_FLOAT_TYPE,1);
cv::cuda::registerPageLocked(ref);
cv::cuda::registerPageLocked(curr);
cv::cuda::registerPageLocked(depth);
cv::cuda::registerPageLocked(depth_cov2);
cv::cuda::registerPageLocked(debug_mat);
t_kernel.watch("page_locked");
ref_gpu.upload(ref);
curr_gpu.upload(curr);
depth_gpu.upload(depth);
depth_cov2_gpu.upload(depth_cov2);
debug_mat_gpu.upload(debug_mat);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float timeCost_cuda;
cudaEventElapsedTime(&timeCost_cuda,start,stop);
cout<<"time to upload:"<<timeCost_cuda<<endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
t_kernel.watch("uploaded");
const int MAX_THREAD_SQRT = 16;
dim3 threads(MAX_THREAD_SQRT, MAX_THREAD_SQRT);
dim3 grids((ref.rows + MAX_THREAD_SQRT - 1)/MAX_THREAD_SQRT, (ref.cols + MAX_THREAD_SQRT - 1)/ MAX_THREAD_SQRT);
update_kernel<<<grids, threads>>>(ref_gpu,curr_gpu,T_C_R,depth_gpu,depth_cov2_gpu,debug_mat_gpu);
t_kernel.watch("kernel func finished");
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeCost_cuda,start,stop);
cout<<"time to call kernel:"<<timeCost_cuda<<endl;
depth_gpu.download(depth);
t_kernel.watch("downloaded depth");
depth_cov2_gpu.download(depth_cov2);
t_kernel.watch("downloaded depth cov2");
debug_mat_gpu.download(debug_mat);
t_kernel.watch("downloaded debug mat");
cv::imshow("debug_ncc_val",debug_mat);
cv::waitKey(1);
cv::cuda::unregisterPageLocked(ref);
cv::cuda::unregisterPageLocked(curr);
cv::cuda::unregisterPageLocked(depth);
cv::cuda::unregisterPageLocked(depth_cov2);
cv::cuda::unregisterPageLocked(debug_mat);
t_kernel.watch("unregistered");
// ref_gpu.release();
// curr_gpu.release();
// depth_gpu.release();
// depth_cov2_gpu.release();
// debug_mat_gpu.release();
}
//#include "happly/happly.h"
//typedef std::array<double,3> P3d_PLY_T;
//inline void getXYZbyUVDepth(//const double& fx,const double& fy,const double& cx,const double& cy,
// const double &depth,
// const double& u,const double& v,
// double& x,double& y,double& z)
//{
// Eigen::Vector3d v_((x-cx)/fx,(y-cy)/fy,1);
// v_.normalize();
// v_*=depth;
// x = v_[0];
// y = v_[1];
// z = v_[2];
//}
//void dump_ply(const Mat& depth_estimate,const Mat& depth_cov2,const Mat& original_img,std::string ply_output_path)
//{
// //write a PLY.
// vector<P3d_PLY_T> vVertices;
// for (int v = 0; v < original_img.rows; v++)
// {
// for (int u = 0; u < original_img.cols; u++)
// {
// if(depth_cov2.at<FLOAT_T>(v,u) <= min_cov*15)//填充成功的点.
// {
// P3d_PLY_T pt;
// double x_3d,y_3d,z_3d,depth_3d;
// depth_3d = depth_estimate.at<FLOAT_T>(v,u);
// if(z_3d >1000||z_3d<0)
// {
// continue;
// }
// getXYZbyUVDepth(depth_3d,
// u,v,
// x_3d,y_3d,z_3d);
// if(isnan(x_3d)||isnan(y_3d)||isnan(z_3d) ||
// isinf(x_3d)||isinf(y_3d)||isinf(z_3d) )
// {
// continue;
// }
// pt[0] = x_3d*100;pt[1] = y_3d*100;pt[2] = z_3d*100;
// vVertices.push_back(pt);
// cout<<"uv:"<<u<<","<<v<<endl;
// }
// }
// }
// happly::PLYData plyOut;
// // Add mesh data (elements are created automatically)
// //LOG(INFO)<<"Writing ply with "<<vVertices.size()<<"vertices and "<<vTriangleIDs.size()<<"triangles."<<endl;
// plyOut.addVertexPositions(vVertices);
// //plyOut.addVertexColors(vVertexColors);
// //plyOut.addFaceIndices(vTriangleIDs);
// // Write the object to file
// plyOut.write("mesh.ply", happly::DataFormat::ASCII);
// //pcl::io::savePLYFile("mesh.ply",output_cloud);
//}
int main(int argc, char **argv) {
if (argc != 2) {
cout << "Usage: dense_mapping path_to_test_dataset" << endl;
return -1;
}
cudaSetDevice(0);
cudaProfilerStart();
// 从数据集读取数据
vector<string> color_image_files;
vector<SE3_T> poses_TWC;
Mat ref_depth;
//bool ret = readDatasetFiles(argv[1], color_image_files, poses_TWC, ref_depth);
bool ret = readGeneratedDatasetFiles(argv[1], color_image_files, poses_TWC, ref_depth);
if (ret == false) {
cout << "Reading image files failed!" << endl;
return -1;
}
cout << "read total " << color_image_files.size() << " files." << endl;
// 第一张图
Mat ref = cv::imread(color_image_files[0], 0); // gray-scale image
SE3_T pose_ref_TWC = poses_TWC[0];
FLOAT_T init_depth = 3.0; // 深度初始值
FLOAT_T init_cov2 = 3.0; // 方差初始值
Mat depth(height, width, CV_FLOAT_TYPE, init_depth); // 深度图
Mat depth_cov2(height, width, CV_FLOAT_TYPE, init_cov2); // 深度图方差
initGpuMats(depth);
//for (int index = 1; index < color_image_files.size(); index++) {
for (int index = 1; index < 50; index++) {
cout << "*** loop " << index << " ***" << endl;
Mat curr = cv::imread(color_image_files[index], 0);
if (curr.data == nullptr) continue;
SE3_T pose_curr_TWC = poses_TWC[index];
SE3_T pose_T_C_R = pose_curr_TWC.inverse() * pose_ref_TWC; // 坐标转换关系: T_C_W * T_W_R = T_C_R
update_kernel_wrapper_cpu(ref, curr, pose_T_C_R, depth, depth_cov2);
//evaludateDepth(ref_depth, depth);
plotDepth(ref_depth, depth);
imshow("image", curr);
waitKey(100);
}
cout << "estimation returns, saving depth map ..." << endl;
imwrite("depth.png", depth);
imwrite("depth_cov2.png",depth_cov2);
dump_ply(depth,depth_cov2,ref,"mesh.ply");
cout << "done." << endl;
cudaProfilerStop();
return 0;
}
|
8ec3d94da86402cf6d1a87d02ffd243ac1296efa.hip | // !!! This is a file automatically generated by hipify!!!
/*
Compiling with nvcc:
nvcc texture.cu -o texture -std=c++11
./texture
*/
// Iniating a cuda texture object
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <chrono>
using namespace std::chrono;
using namespace std;
// Define CUDA texture Desciption
struct hipTextureDesc
{
enum hipTextureAddressMode addressMode[3];
enum hipTextureFilterMode filterMode;
enum hipTextureReadMode readMode;
int sRGB;
int normalizedCoords;
unsigned int maxAnisotropy;
enum hipTextureFilterMode mipmapFilterMode;
float mipmapLevelBias;
float minMipmapLevelClamp;
float maxMipmapLevelClamp;
};
#define N 1024
// texture object is a kernel argument
__global__ void kernel(hipTextureObject_t tex) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float x = tex1Dfetch<float>(tex, i);
// Do something with x
}
int main() {
// declare and allocate memory
float *buffer;
hipMalloc(&buffer, N*sizeof(float));
// create texture object
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = buffer;
resDesc.res.linear.desc.f = hipChannelFormatKindFloat;
resDesc.res.linear.sizeInBytes = N*sizeof(float);
const hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = hipReadModeElementType;
// create texture object: we only have to do this once!
hipTextureObject_t tex;
hipCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
hipLaunchKernelGGL(( kernel) , dim3(512), dim3(512), 0, 0, tex); // pass texture as argument
// destroy texture object
hipDestroyTextureObject(tex);
hipFree(buffer);
} | 8ec3d94da86402cf6d1a87d02ffd243ac1296efa.cu | /*
Compiling with nvcc:
nvcc texture.cu -o texture -std=c++11
./texture
*/
// Iniating a cuda texture object
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <chrono>
using namespace std::chrono;
using namespace std;
// Define CUDA texture Desciption
struct cudaTextureDesc
{
enum cudaTextureAddressMode addressMode[3];
enum cudaTextureFilterMode filterMode;
enum cudaTextureReadMode readMode;
int sRGB;
int normalizedCoords;
unsigned int maxAnisotropy;
enum cudaTextureFilterMode mipmapFilterMode;
float mipmapLevelBias;
float minMipmapLevelClamp;
float maxMipmapLevelClamp;
};
#define N 1024
// texture object is a kernel argument
__global__ void kernel(cudaTextureObject_t tex) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float x = tex1Dfetch<float>(tex, i);
// Do something with x
}
int main() {
// declare and allocate memory
float *buffer;
cudaMalloc(&buffer, N*sizeof(float));
// create texture object
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = buffer;
resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
resDesc.res.linear.sizeInBytes = N*sizeof(float);
const cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
// create texture object: we only have to do this once!
cudaTextureObject_t tex;
cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL);
kernel <<<512, 512>>>(tex); // pass texture as argument
// destroy texture object
cudaDestroyTextureObject(tex);
cudaFree(buffer);
} |
54643f4590ecbf4c4378338a4b8cb9d7992407b4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2008-2009 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ocustorage/coarray_internal.h"
namespace ocu {
void
TransferRequestQ::process1d()
{
#ifdef OCU_OMP
omp_set_lock(&_lock);
#endif
for (int i=0; i < _q1d.size(); i++) {
TransferRequest1D d = _q1d[i];
// handle the request
switch(d.cmd) {
case TRANSFER_HOST_TO_DEVICE:
xfer_hregion1d_to_dregion1d(d.dst, d.src, d.num_bytes);
break;
case TRANSFER_DEVICE_TO_HOST:
xfer_dregion1d_to_hregion1d(d.dst, d.src, d.num_bytes);
break;
default:
printf("[ERROR] ransferRequestQ::process1d - invalid cmd %d\n", (unsigned int)d.cmd);
}
}
_q1d.clear();
#ifdef OCU_OMP
omp_unset_lock(&_lock);
#endif
}
void
TransferRequestQ::process3d()
{
#ifdef OCU_OMP
omp_set_lock(&_lock);
#endif
for (int i=0; i < _q3d.size(); i++) {
TransferRequest3D d = _q3d[i];
// handle the request
switch(d.cmd) {
case TRANSFER_HOSTBUFFER_TO_DEVICE:
xfer_hbuffer_to_dregion3d(d.dst, d.host_buffer, d.device_buffer, d.method);
break;
case TRANSFER_DEVICE_TO_HOSTBUFFER:
xfer_dregion3d_to_hbuffer(d.host_buffer, d.device_buffer, d.src, d.method);
break;
default:
printf("[ERROR] ransferRequestQ::process3d - invalid cmd %d\n", (unsigned int)d.cmd);
}
}
_q3d.clear();
#ifdef OCU_OMP
omp_unset_lock(&_lock);
#endif
}
void
TransferRequestQ::processalloc()
{
#ifdef OCU_OMP
omp_set_lock(&_lock);
#endif
for (int i=0; i < _qalloc.size(); i++) {
TransferRequestAlloc d = _qalloc[i];
// handle the request
switch(d.cmd) {
case TRANSFER_ALLOCATE:
{
hipError_t ok = hipMalloc(d.result, d.num_bytes);
if (ok != hipSuccess)
printf("[ERROR] TransferRequestQ::processalloc - hipMalloc failed: %s\n", hipGetErrorString(ok));
else
*d.valid = true;
}
break;
case TRANSFER_FREE:
{
hipFree(d.ptr);
}
break;
default:
printf("[ERROR] ransferRequestQ::processalloc - invalid cmd %d\n", (unsigned int)d.cmd);
}
}
_qalloc.clear();
#ifdef OCU_OMP
omp_unset_lock(&_lock);
#endif
}
} // end namespace
| 54643f4590ecbf4c4378338a4b8cb9d7992407b4.cu | /*
* Copyright 2008-2009 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ocustorage/coarray_internal.h"
namespace ocu {
void
TransferRequestQ::process1d()
{
#ifdef OCU_OMP
omp_set_lock(&_lock);
#endif
for (int i=0; i < _q1d.size(); i++) {
TransferRequest1D d = _q1d[i];
// handle the request
switch(d.cmd) {
case TRANSFER_HOST_TO_DEVICE:
xfer_hregion1d_to_dregion1d(d.dst, d.src, d.num_bytes);
break;
case TRANSFER_DEVICE_TO_HOST:
xfer_dregion1d_to_hregion1d(d.dst, d.src, d.num_bytes);
break;
default:
printf("[ERROR] ransferRequestQ::process1d - invalid cmd %d\n", (unsigned int)d.cmd);
}
}
_q1d.clear();
#ifdef OCU_OMP
omp_unset_lock(&_lock);
#endif
}
void
TransferRequestQ::process3d()
{
#ifdef OCU_OMP
omp_set_lock(&_lock);
#endif
for (int i=0; i < _q3d.size(); i++) {
TransferRequest3D d = _q3d[i];
// handle the request
switch(d.cmd) {
case TRANSFER_HOSTBUFFER_TO_DEVICE:
xfer_hbuffer_to_dregion3d(d.dst, d.host_buffer, d.device_buffer, d.method);
break;
case TRANSFER_DEVICE_TO_HOSTBUFFER:
xfer_dregion3d_to_hbuffer(d.host_buffer, d.device_buffer, d.src, d.method);
break;
default:
printf("[ERROR] ransferRequestQ::process3d - invalid cmd %d\n", (unsigned int)d.cmd);
}
}
_q3d.clear();
#ifdef OCU_OMP
omp_unset_lock(&_lock);
#endif
}
void
TransferRequestQ::processalloc()
{
#ifdef OCU_OMP
omp_set_lock(&_lock);
#endif
for (int i=0; i < _qalloc.size(); i++) {
TransferRequestAlloc d = _qalloc[i];
// handle the request
switch(d.cmd) {
case TRANSFER_ALLOCATE:
{
cudaError_t ok = cudaMalloc(d.result, d.num_bytes);
if (ok != cudaSuccess)
printf("[ERROR] TransferRequestQ::processalloc - cudaMalloc failed: %s\n", cudaGetErrorString(ok));
else
*d.valid = true;
}
break;
case TRANSFER_FREE:
{
cudaFree(d.ptr);
}
break;
default:
printf("[ERROR] ransferRequestQ::processalloc - invalid cmd %d\n", (unsigned int)d.cmd);
}
}
_qalloc.clear();
#ifdef OCU_OMP
omp_unset_lock(&_lock);
#endif
}
} // end namespace
|
d13aceb4adead02975f3cd0446b616d1b2ad165a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/bboxUtils.h"
#include "hipcub/hipcub.hpp"
#include "hip/hip_runtime_api.h"
#include "efficientNMSInference.cuh"
#include "efficientNMSInference.h"
#define NMS_TILES 5
using namespace nvinfer1;
template <typename T>
__device__ float IOU(EfficientNMSParameters param, BoxCorner<T> box1, BoxCorner<T> box2)
{
// Regardless of the selected box coding, IOU is always performed in BoxCorner coding.
// The boxes are copied so that they can be reordered without affecting the originals.
BoxCorner<T> b1 = box1;
BoxCorner<T> b2 = box2;
b1.reorder();
b2.reorder();
float intersectArea = BoxCorner<T>::intersect(b1, b2).area();
if (intersectArea <= 0.f)
{
return 0.f;
}
float unionArea = b1.area() + b2.area() - intersectArea;
if (unionArea <= 0.f)
{
return 0.f;
}
return intersectArea / unionArea;
}
template <typename T, typename Tb>
__device__ BoxCorner<T> DecodeBoxes(EfficientNMSParameters param, int boxIdx, int anchorIdx,
const Tb* __restrict__ boxesInput, const Tb* __restrict__ anchorsInput)
{
// The inputs will be in the selected coding format, as well as the decoding function. But the decoded box
// will always be returned as BoxCorner.
Tb box = boxesInput[boxIdx];
if (!param.boxDecoder)
{
return BoxCorner<T>(box);
}
Tb anchor = anchorsInput[anchorIdx];
box.reorder();
anchor.reorder();
return BoxCorner<T>(box.decode(anchor));
}
template <typename T, typename Tb>
__device__ void MapNMSData(EfficientNMSParameters param, int idx, int imageIdx, const Tb* __restrict__ boxesInput,
const Tb* __restrict__ anchorsInput, const int* __restrict__ topClassData, const int* __restrict__ topAnchorsData,
const int* __restrict__ topNumData, const T* __restrict__ sortedScoresData, const int* __restrict__ sortedIndexData,
T& scoreMap, int& classMap, BoxCorner<T>& boxMap, int& boxIdxMap)
{
// idx: Holds the NMS box index, within the current batch.
// idxSort: Holds the batched NMS box index, which indexes the (filtered, but sorted) score buffer.
// scoreMap: Holds the score that corresponds to the indexed box being processed by NMS.
if (idx >= topNumData[imageIdx])
{
return;
}
int idxSort = imageIdx * param.numScoreElements + idx;
scoreMap = sortedScoresData[idxSort];
// idxMap: Holds the re-mapped index, which indexes the (filtered, but unsorted) buffers.
// classMap: Holds the class that corresponds to the idx'th sorted score being processed by NMS.
// anchorMap: Holds the anchor that corresponds to the idx'th sorted score being processed by NMS.
int idxMap = imageIdx * param.numScoreElements + sortedIndexData[idxSort];
classMap = topClassData[idxMap];
int anchorMap = topAnchorsData[idxMap];
// boxIdxMap: Holds the re-re-mapped index, which indexes the (unfiltered, and unsorted) boxes input buffer.
boxIdxMap = -1;
if (param.shareLocation) // Shape of boxesInput: [batchSize, numAnchors, 1, 4]
{
boxIdxMap = imageIdx * param.numAnchors + anchorMap;
}
else // Shape of boxesInput: [batchSize, numAnchors, numClasses, 4]
{
int batchOffset = imageIdx * param.numAnchors * param.numClasses;
int anchorOffset = anchorMap * param.numClasses;
boxIdxMap = batchOffset + anchorOffset + classMap;
}
// anchorIdxMap: Holds the re-re-mapped index, which indexes the (unfiltered, and unsorted) anchors input buffer.
int anchorIdxMap = -1;
if (param.shareAnchors) // Shape of anchorsInput: [1, numAnchors, 4]
{
anchorIdxMap = anchorMap;
}
else // Shape of anchorsInput: [batchSize, numAnchors, 4]
{
anchorIdxMap = imageIdx * param.numAnchors + anchorMap;
}
// boxMap: Holds the box that corresponds to the idx'th sorted score being processed by NMS.
boxMap = DecodeBoxes<T, Tb>(param, boxIdxMap, anchorIdxMap, boxesInput, anchorsInput);
}
template <typename T>
__device__ void WriteNMSResult(EfficientNMSParameters param, int* __restrict__ numDetectionsOutput,
T* __restrict__ nmsScoresOutput, int* __restrict__ nmsClassesOutput, BoxCorner<T>* __restrict__ nmsBoxesOutput,
T threadScore, int threadClass, BoxCorner<T> threadBox, int imageIdx, unsigned int resultsCounter)
{
int outputIdx = imageIdx * param.numOutputBoxes + resultsCounter - 1;
if (param.scoreSigmoid)
{
nmsScoresOutput[outputIdx] = sigmoid_mp(threadScore);
}
else if (param.scoreBits > 0)
{
nmsScoresOutput[outputIdx] = add_mp(threadScore, (T) -1);
}
else
{
nmsScoresOutput[outputIdx] = threadScore;
}
nmsClassesOutput[outputIdx] = threadClass;
if (param.clipBoxes)
{
nmsBoxesOutput[outputIdx] = threadBox.clip((T) 0, (T) 1);
}
else
{
nmsBoxesOutput[outputIdx] = threadBox;
}
numDetectionsOutput[imageIdx] = resultsCounter;
}
__device__ void WriteONNXResult(EfficientNMSParameters param, int* outputIndexData, int* __restrict__ nmsIndicesOutput,
int imageIdx, int threadClass, int boxIdxMap)
{
int index = boxIdxMap % param.numAnchors;
int idx = atomicAdd((unsigned int*) &outputIndexData[0], 1);
nmsIndicesOutput[idx * 3 + 0] = imageIdx;
nmsIndicesOutput[idx * 3 + 1] = threadClass;
nmsIndicesOutput[idx * 3 + 2] = index;
}
__global__ void PadONNXResult(EfficientNMSParameters param, int* outputIndexData, int* __restrict__ nmsIndicesOutput)
{
if (threadIdx.x > 0)
{
return;
}
int pidx = outputIndexData[0] - 1;
if (pidx < 0)
{
return;
}
for (int idx = pidx + 1; idx < param.batchSize * param.numOutputBoxes; idx++)
{
nmsIndicesOutput[idx * 3 + 0] = nmsIndicesOutput[pidx * 3 + 0];
nmsIndicesOutput[idx * 3 + 1] = nmsIndicesOutput[pidx * 3 + 1];
nmsIndicesOutput[idx * 3 + 2] = nmsIndicesOutput[pidx * 3 + 2];
}
}
template <typename T, typename Tb>
__global__ void EfficientNMS(EfficientNMSParameters param, const int* topNumData, int* outputIndexData,
int* outputClassData, const int* sortedIndexData, const T* __restrict__ sortedScoresData,
const int* __restrict__ topClassData, const int* __restrict__ topAnchorsData, const Tb* __restrict__ boxesInput,
const Tb* __restrict__ anchorsInput, int* __restrict__ numDetectionsOutput, T* __restrict__ nmsScoresOutput,
int* __restrict__ nmsClassesOutput, int* __restrict__ nmsIndicesOutput, BoxCorner<T>* __restrict__ nmsBoxesOutput)
{
unsigned int thread = threadIdx.x;
unsigned int imageIdx = blockIdx.y;
unsigned int tileSize = blockDim.x;
if (imageIdx >= param.batchSize)
{
return;
}
int numSelectedBoxes = min(topNumData[imageIdx], param.numSelectedBoxes);
int numTiles = (numSelectedBoxes + tileSize - 1) / tileSize;
if (thread >= numSelectedBoxes)
{
return;
}
__shared__ int blockState;
__shared__ unsigned int resultsCounter;
if (thread == 0)
{
blockState = 0;
resultsCounter = 0;
}
int threadState[NMS_TILES];
unsigned int boxIdx[NMS_TILES];
T threadScore[NMS_TILES];
int threadClass[NMS_TILES];
BoxCorner<T> threadBox[NMS_TILES];
int boxIdxMap[NMS_TILES];
for (int tile = 0; tile < numTiles; tile++)
{
threadState[tile] = 0;
boxIdx[tile] = thread + tile * blockDim.x;
MapNMSData<T, Tb>(param, boxIdx[tile], imageIdx, boxesInput, anchorsInput, topClassData, topAnchorsData,
topNumData, sortedScoresData, sortedIndexData, threadScore[tile], threadClass[tile], threadBox[tile],
boxIdxMap[tile]);
}
// Iterate through all boxes to NMS against.
for (int i = 0; i < numSelectedBoxes; i++)
{
int tile = i / tileSize;
if (boxIdx[tile] == i)
{
// Iteration lead thread, figure out what the other threads should do,
// this will be signaled via the blockState shared variable.
if (threadState[tile] == -1)
{
// Thread already dead, this box was already dropped in a previous iteration,
// because it had a large IOU overlap with another lead thread previously, so
// it would never be kept anyway, therefore it can safely be skip all IOU operations
// in this iteration.
blockState = -1; // -1 => Signal all threads to skip iteration
}
else if (threadState[tile] == 0)
{
// As this box will be kept, this is a good place to find what index in the results buffer it
// should have, as this allows to perform an early loop exit if there are enough results.
if (resultsCounter >= param.numOutputBoxes)
{
blockState = -2; // -2 => Signal all threads to do an early loop exit.
}
else
{
// Thread is still alive, because it has not had a large enough IOU overlap with
// any other kept box previously. Therefore, this box will be kept for sure. However,
// we need to check against all other subsequent boxes from this position onward,
// to see how those other boxes will behave in future iterations.
blockState = 1; // +1 => Signal all (higher index) threads to calculate IOU against this box
threadState[tile] = 1; // +1 => Mark this box's thread to be kept and written out to results
// If the numOutputBoxesPerClass check is enabled, write the result only if the limit for this
// class on this image has not been reached yet. Other than (possibly) skipping the write, this
// won't affect anything else in the NMS threading.
bool write = true;
if (param.numOutputBoxesPerClass >= 0)
{
int classCounterIdx = imageIdx * param.numClasses + threadClass[tile];
write = (outputClassData[classCounterIdx] < param.numOutputBoxesPerClass);
outputClassData[classCounterIdx]++;
}
if (write)
{
// This branch is visited by one thread per iteration, so it's safe to do non-atomic increments.
resultsCounter++;
if (param.outputONNXIndices)
{
WriteONNXResult(
param, outputIndexData, nmsIndicesOutput, imageIdx, threadClass[tile], boxIdxMap[tile]);
}
else
{
WriteNMSResult<T>(param, numDetectionsOutput, nmsScoresOutput, nmsClassesOutput,
nmsBoxesOutput, threadScore[tile], threadClass[tile], threadBox[tile], imageIdx,
resultsCounter);
}
}
}
}
else
{
// This state should never be reached, but just in case...
blockState = 0; // 0 => Signal all threads to not do any updates, nothing happens.
}
}
__syncthreads();
if (blockState == -2)
{
// This is the signal to exit from the loop.
return;
}
if (blockState == -1)
{
// This is the signal for all threads to just skip this iteration, as no IOU's need to be checked.
continue;
}
// Grab a box and class to test the current box against. The test box corresponds to iteration i,
// therefore it will have a lower index than the current thread box, and will therefore have a higher score
// than the current box because it's located "before" in the sorted score list.
T testScore;
int testClass;
BoxCorner<T> testBox;
int testBoxIdxMap;
MapNMSData<T, Tb>(param, i, imageIdx, boxesInput, anchorsInput, topClassData, topAnchorsData, topNumData,
sortedScoresData, sortedIndexData, testScore, testClass, testBox, testBoxIdxMap);
for (int tile = 0; tile < numTiles; tile++)
{
// IOU
if (boxIdx[tile] > i && // Make sure two different boxes are being tested, and that it's a higher index;
boxIdx[tile] < numSelectedBoxes && // Make sure the box is within numSelectedBoxes;
blockState == 1 && // Signal that allows IOU checks to be performed;
threadState[tile] == 0 && // Make sure this box hasn't been either dropped or kept already;
threadClass[tile] == testClass && // Compare only boxes of matching classes;
lte_mp(threadScore[tile], testScore) && // Make sure the sorting order of scores is as expected;
IOU<T>(param, threadBox[tile], testBox) >= param.iouThreshold) // And... IOU overlap.
{
// Current box overlaps with the box tested in this iteration, this box will be skipped.
threadState[tile] = -1; // -1 => Mark this box's thread to be dropped.
}
}
}
}
template <typename T>
hipError_t EfficientNMSLauncher(EfficientNMSParameters& param, int* topNumData, int* outputIndexData,
int* outputClassData, int* sortedIndexData, T* sortedScoresData, int* topClassData, int* topAnchorsData,
const void* boxesInput, const void* anchorsInput, int* numDetectionsOutput, T* nmsScoresOutput,
int* nmsClassesOutput, int* nmsIndicesOutput, void* nmsBoxesOutput, hipStream_t stream)
{
unsigned int tileSize = param.numSelectedBoxes / NMS_TILES;
if (param.numSelectedBoxes <= 512)
{
tileSize = 512;
}
if (param.numSelectedBoxes <= 256)
{
tileSize = 256;
}
const dim3 blockSize = {tileSize, 1, 1};
const dim3 gridSize = {1, (unsigned int) param.batchSize, 1};
if (param.boxCoding == 0)
{
hipLaunchKernelGGL(( EfficientNMS<T, BoxCorner<T>>), dim3(gridSize), dim3(blockSize), 0, stream, param, topNumData, outputIndexData,
outputClassData, sortedIndexData, sortedScoresData, topClassData, topAnchorsData,
(BoxCorner<T>*) boxesInput, (BoxCorner<T>*) anchorsInput, numDetectionsOutput, nmsScoresOutput,
nmsClassesOutput, nmsIndicesOutput, (BoxCorner<T>*) nmsBoxesOutput);
}
else if (param.boxCoding == 1)
{
// Note that nmsBoxesOutput is always coded as BoxCorner<T>, regardless of the input coding type.
hipLaunchKernelGGL(( EfficientNMS<T, BoxCenterSize<T>>), dim3(gridSize), dim3(blockSize), 0, stream, param, topNumData, outputIndexData,
outputClassData, sortedIndexData, sortedScoresData, topClassData, topAnchorsData,
(BoxCenterSize<T>*) boxesInput, (BoxCenterSize<T>*) anchorsInput, numDetectionsOutput, nmsScoresOutput,
nmsClassesOutput, nmsIndicesOutput, (BoxCorner<T>*) nmsBoxesOutput);
}
if (param.outputONNXIndices)
{
hipLaunchKernelGGL(( PadONNXResult), dim3(1), dim3(1), 0, stream, param, outputIndexData, nmsIndicesOutput);
}
return hipGetLastError();
}
__global__ void EfficientNMSFilterSegments(EfficientNMSParameters param, const int* __restrict__ topNumData,
int* __restrict__ topOffsetsStartData, int* __restrict__ topOffsetsEndData)
{
int imageIdx = threadIdx.x;
if (imageIdx > param.batchSize)
{
return;
}
topOffsetsStartData[imageIdx] = imageIdx * param.numScoreElements;
topOffsetsEndData[imageIdx] = imageIdx * param.numScoreElements + topNumData[imageIdx];
}
template <typename T>
__global__ void EfficientNMSFilter(EfficientNMSParameters param, const T* __restrict__ scoresInput,
int* __restrict__ topNumData, int* __restrict__ topIndexData, int* __restrict__ topAnchorsData,
T* __restrict__ topScoresData, int* __restrict__ topClassData)
{
int elementIdx = blockDim.x * blockIdx.x + threadIdx.x;
int imageIdx = blockDim.y * blockIdx.y + threadIdx.y;
// Boundary Conditions
if (elementIdx >= param.numScoreElements || imageIdx >= param.batchSize)
{
return;
}
// Shape of scoresInput: [batchSize, numAnchors, numClasses]
int scoresInputIdx = imageIdx * param.numScoreElements + elementIdx;
// For each class, check its corresponding score if it crosses the threshold, and if so select this anchor,
// and keep track of the maximum score and the corresponding (argmax) class id
T score = scoresInput[scoresInputIdx];
if (gte_mp(score, (T) param.scoreThreshold))
{
// Unpack the class and anchor index from the element index
int classIdx = elementIdx % param.numClasses;
int anchorIdx = elementIdx / param.numClasses;
// If this is a background class, ignore it.
if (classIdx == param.backgroundClass)
{
return;
}
// Use an atomic to find an open slot where to write the selected anchor data.
if (topNumData[imageIdx] >= param.numScoreElements)
{
return;
}
int selectedIdx = atomicAdd((unsigned int*) &topNumData[imageIdx], 1);
if (selectedIdx >= param.numScoreElements)
{
topNumData[imageIdx] = param.numScoreElements;
return;
}
// Shape of topScoresData / topClassData: [batchSize, numScoreElements]
int topIdx = imageIdx * param.numScoreElements + selectedIdx;
if (param.scoreBits > 0)
{
score = add_mp(score, (T) 1);
if (gt_mp(score, (T) (2.f - 1.f / 1024.f)))
{
// Ensure the incremented score fits in the mantissa without changing the exponent
score = (2.f - 1.f / 1024.f);
}
}
topIndexData[topIdx] = selectedIdx;
topAnchorsData[topIdx] = anchorIdx;
topScoresData[topIdx] = score;
topClassData[topIdx] = classIdx;
}
}
template <typename T>
__global__ void EfficientNMSDenseIndex(EfficientNMSParameters param, int* __restrict__ topNumData,
int* __restrict__ topIndexData, int* __restrict__ topAnchorsData, int* __restrict__ topOffsetsStartData,
int* __restrict__ topOffsetsEndData, T* __restrict__ topScoresData, int* __restrict__ topClassData)
{
int elementIdx = blockDim.x * blockIdx.x + threadIdx.x;
int imageIdx = blockDim.y * blockIdx.y + threadIdx.y;
if (elementIdx >= param.numScoreElements || imageIdx >= param.batchSize)
{
return;
}
int dataIdx = imageIdx * param.numScoreElements + elementIdx;
int anchorIdx = elementIdx / param.numClasses;
int classIdx = elementIdx % param.numClasses;
if (param.scoreBits > 0)
{
T score = topScoresData[dataIdx];
if (lt_mp(score, (T) param.scoreThreshold))
{
score = (T) 1;
}
else if (classIdx == param.backgroundClass)
{
score = (T) 1;
}
else
{
score = add_mp(score, (T) 1);
if (gt_mp(score, (T) (2.f - 1.f / 1024.f)))
{
// Ensure the incremented score fits in the mantissa without changing the exponent
score = (2.f - 1.f / 1024.f);
}
}
topScoresData[dataIdx] = score;
}
else
{
T score = topScoresData[dataIdx];
if (lt_mp(score, (T) param.scoreThreshold))
{
topScoresData[dataIdx] = -(1 << 15);
}
else if (classIdx == param.backgroundClass)
{
topScoresData[dataIdx] = -(1 << 15);
}
}
topIndexData[dataIdx] = elementIdx;
topAnchorsData[dataIdx] = anchorIdx;
topClassData[dataIdx] = classIdx;
if (elementIdx == 0)
{
// Saturate counters
topNumData[imageIdx] = param.numScoreElements;
topOffsetsStartData[imageIdx] = imageIdx * param.numScoreElements;
topOffsetsEndData[imageIdx] = (imageIdx + 1) * param.numScoreElements;
}
}
template <typename T>
hipError_t EfficientNMSFilterLauncher(EfficientNMSParameters& param, const T* scoresInput, int* topNumData,
int* topIndexData, int* topAnchorsData, int* topOffsetsStartData, int* topOffsetsEndData, T* topScoresData,
int* topClassData, hipStream_t stream)
{
const unsigned int elementsPerBlock = 512;
const unsigned int imagesPerBlock = 1;
const unsigned int elementBlocks = (param.numScoreElements + elementsPerBlock - 1) / elementsPerBlock;
const unsigned int imageBlocks = (param.batchSize + imagesPerBlock - 1) / imagesPerBlock;
const dim3 blockSize = {elementsPerBlock, imagesPerBlock, 1};
const dim3 gridSize = {elementBlocks, imageBlocks, 1};
float kernelSelectThreshold = 0.007f;
if (param.scoreSigmoid)
{
// Inverse Sigmoid
if (param.scoreThreshold <= 0.f)
{
param.scoreThreshold = -(1 << 15);
}
else
{
param.scoreThreshold = logf(param.scoreThreshold / (1.f - param.scoreThreshold));
}
kernelSelectThreshold = logf(kernelSelectThreshold / (1.f - kernelSelectThreshold));
// Disable Score Bits Optimization
param.scoreBits = -1;
}
if (param.scoreThreshold < kernelSelectThreshold)
{
// A full copy of the buffer is necessary because sorting will scramble the input data otherwise.
PLUGIN_CHECK_CUDA(hipMemcpyAsync(topScoresData, scoresInput,
param.batchSize * param.numScoreElements * sizeof(T), hipMemcpyDeviceToDevice, stream));
hipLaunchKernelGGL(( EfficientNMSDenseIndex<T>), dim3(gridSize), dim3(blockSize), 0, stream, param, topNumData, topIndexData, topAnchorsData,
topOffsetsStartData, topOffsetsEndData, topScoresData, topClassData);
}
else
{
hipLaunchKernelGGL(( EfficientNMSFilter<T>), dim3(gridSize), dim3(blockSize), 0, stream,
param, scoresInput, topNumData, topIndexData, topAnchorsData, topScoresData, topClassData);
hipLaunchKernelGGL(( EfficientNMSFilterSegments), dim3(1), dim3(param.batchSize), 0, stream,
param, topNumData, topOffsetsStartData, topOffsetsEndData);
}
return hipGetLastError();
}
template <typename T>
size_t EfficientNMSSortWorkspaceSize(int batchSize, int numScoreElements)
{
size_t sortedWorkspaceSize = 0;
cub::DoubleBuffer<T> keysDB(nullptr, nullptr);
cub::DoubleBuffer<int> valuesDB(nullptr, nullptr);
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(nullptr, sortedWorkspaceSize, keysDB, valuesDB,
numScoreElements, batchSize, (const int*) nullptr, (const int*) nullptr);
return sortedWorkspaceSize;
}
size_t EfficientNMSWorkspaceSize(int batchSize, int numScoreElements, int numClasses, DataType datatype)
{
size_t total = 0;
const size_t align = 256;
// Counters
// 3 for Filtering
// 1 for Output Indexing
// C for Max per Class Limiting
size_t size = (3 + 1 + numClasses) * batchSize * sizeof(int);
total += size + (size % align ? align - (size % align) : 0);
// Int Buffers
for (int i = 0; i < 4; i++)
{
size = batchSize * numScoreElements * sizeof(int);
total += size + (size % align ? align - (size % align) : 0);
}
// Float Buffers
for (int i = 0; i < 2; i++)
{
size = batchSize * numScoreElements * dataTypeSize(datatype);
total += size + (size % align ? align - (size % align) : 0);
}
// Sort Workspace
if (datatype == DataType::kHALF)
{
size = EfficientNMSSortWorkspaceSize<__half>(batchSize, numScoreElements);
total += size + (size % align ? align - (size % align) : 0);
}
else if (datatype == DataType::kFLOAT)
{
size = EfficientNMSSortWorkspaceSize<float>(batchSize, numScoreElements);
total += size + (size % align ? align - (size % align) : 0);
}
return total;
}
template <typename T>
T* EfficientNMSWorkspace(void* workspace, size_t& offset, size_t elements)
{
T* buffer = (T*) ((size_t) workspace + offset);
size_t align = 256;
size_t size = elements * sizeof(T);
size_t sizeAligned = size + (size % align ? align - (size % align) : 0);
offset += sizeAligned;
return buffer;
}
template <typename T>
pluginStatus_t EfficientNMSDispatch(EfficientNMSParameters param, const void* boxesInput, const void* scoresInput,
const void* anchorsInput, void* numDetectionsOutput, void* nmsBoxesOutput, void* nmsScoresOutput,
void* nmsClassesOutput, void* nmsIndicesOutput, void* workspace, hipStream_t stream)
{
// Clear Outputs (not all elements will get overwritten by the kernels, so safer to clear everything out)
if (param.outputONNXIndices)
{
CSC(hipMemsetAsync(nmsIndicesOutput, 0xFF, param.batchSize * param.numOutputBoxes * 3 * sizeof(int), stream), STATUS_FAILURE);
}
else
{
CSC(hipMemsetAsync(numDetectionsOutput, 0x00, param.batchSize * sizeof(int), stream), STATUS_FAILURE);
CSC(hipMemsetAsync(nmsScoresOutput, 0x00, param.batchSize * param.numOutputBoxes * sizeof(T), stream), STATUS_FAILURE);
CSC(hipMemsetAsync(nmsBoxesOutput, 0x00, param.batchSize * param.numOutputBoxes * 4 * sizeof(T), stream), STATUS_FAILURE);
CSC(hipMemsetAsync(nmsClassesOutput, 0x00, param.batchSize * param.numOutputBoxes * sizeof(int), stream), STATUS_FAILURE);
}
// Empty Inputs
if (param.numScoreElements < 1)
{
return STATUS_SUCCESS;
}
// Counters Workspace
size_t workspaceOffset = 0;
int countersTotalSize = (3 + 1 + param.numClasses) * param.batchSize;
int* topNumData = EfficientNMSWorkspace<int>(workspace, workspaceOffset, countersTotalSize);
int* topOffsetsStartData = topNumData + param.batchSize;
int* topOffsetsEndData = topNumData + 2 * param.batchSize;
int* outputIndexData = topNumData + 3 * param.batchSize;
int* outputClassData = topNumData + 4 * param.batchSize;
CSC(hipMemsetAsync(topNumData, 0x00, countersTotalSize * sizeof(int), stream), STATUS_FAILURE);
hipError_t status = hipGetLastError();
CSC(status, STATUS_FAILURE);
// Other Buffers Workspace
int* topIndexData
= EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements);
int* topClassData
= EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements);
int* topAnchorsData
= EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements);
int* sortedIndexData
= EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements);
T* topScoresData = EfficientNMSWorkspace<T>(workspace, workspaceOffset, param.batchSize * param.numScoreElements);
T* sortedScoresData
= EfficientNMSWorkspace<T>(workspace, workspaceOffset, param.batchSize * param.numScoreElements);
size_t sortedWorkspaceSize = EfficientNMSSortWorkspaceSize<T>(param.batchSize, param.numScoreElements);
char* sortedWorkspaceData = EfficientNMSWorkspace<char>(workspace, workspaceOffset, sortedWorkspaceSize);
cub::DoubleBuffer<T> scoresDB(topScoresData, sortedScoresData);
cub::DoubleBuffer<int> indexDB(topIndexData, sortedIndexData);
// Device Specific Properties
int device;
CSC(hipGetDevice(&device), STATUS_FAILURE);
struct hipDeviceProp_t properties;
CSC(hipGetDeviceProperties(&properties, device), STATUS_FAILURE);
if (properties.regsPerBlock >= 65536)
{
// Most Devices
param.numSelectedBoxes = 5000;
}
else
{
// Jetson TX1/TX2
param.numSelectedBoxes = 2000;
}
// Kernels
status = EfficientNMSFilterLauncher<T>(param, (T*) scoresInput, topNumData, topIndexData, topAnchorsData,
topOffsetsStartData, topOffsetsEndData, topScoresData, topClassData, stream);
CSC(status, STATUS_FAILURE);
status = hipcub::DeviceSegmentedRadixSort::SortPairsDescending(sortedWorkspaceData, sortedWorkspaceSize, scoresDB,
indexDB, param.batchSize * param.numScoreElements, param.batchSize, topOffsetsStartData, topOffsetsEndData,
param.scoreBits > 0 ? (10 - param.scoreBits) : 0, param.scoreBits > 0 ? 10 : sizeof(T) * 8, stream, false);
CSC(status, STATUS_FAILURE);
status = EfficientNMSLauncher<T>(param, topNumData, outputIndexData, outputClassData, indexDB.Current(),
scoresDB.Current(), topClassData, topAnchorsData, boxesInput, anchorsInput, (int*) numDetectionsOutput,
(T*) nmsScoresOutput, (int*) nmsClassesOutput, (int*) nmsIndicesOutput, nmsBoxesOutput, stream);
CSC(status, STATUS_FAILURE);
return STATUS_SUCCESS;
}
pluginStatus_t EfficientNMSInference(EfficientNMSParameters param, const void* boxesInput, const void* scoresInput,
const void* anchorsInput, void* numDetectionsOutput, void* nmsBoxesOutput, void* nmsScoresOutput,
void* nmsClassesOutput, void* nmsIndicesOutput, void* workspace, hipStream_t stream)
{
if (param.datatype == DataType::kFLOAT)
{
param.scoreBits = -1;
return EfficientNMSDispatch<float>(param, boxesInput, scoresInput, anchorsInput, numDetectionsOutput,
nmsBoxesOutput, nmsScoresOutput, nmsClassesOutput, nmsIndicesOutput, workspace, stream);
}
else if (param.datatype == DataType::kHALF)
{
if (param.scoreBits <= 0 || param.scoreBits > 10)
{
param.scoreBits = -1;
}
return EfficientNMSDispatch<__half>(param, boxesInput, scoresInput, anchorsInput, numDetectionsOutput,
nmsBoxesOutput, nmsScoresOutput, nmsClassesOutput, nmsIndicesOutput, workspace, stream);
}
else
{
return STATUS_NOT_SUPPORTED;
}
}
| d13aceb4adead02975f3cd0446b616d1b2ad165a.cu | /*
* SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/bboxUtils.h"
#include "cub/cub.cuh"
#include "cuda_runtime_api.h"
#include "efficientNMSInference.cuh"
#include "efficientNMSInference.h"
#define NMS_TILES 5
using namespace nvinfer1;
template <typename T>
__device__ float IOU(EfficientNMSParameters param, BoxCorner<T> box1, BoxCorner<T> box2)
{
// Regardless of the selected box coding, IOU is always performed in BoxCorner coding.
// The boxes are copied so that they can be reordered without affecting the originals.
BoxCorner<T> b1 = box1;
BoxCorner<T> b2 = box2;
b1.reorder();
b2.reorder();
float intersectArea = BoxCorner<T>::intersect(b1, b2).area();
if (intersectArea <= 0.f)
{
return 0.f;
}
float unionArea = b1.area() + b2.area() - intersectArea;
if (unionArea <= 0.f)
{
return 0.f;
}
return intersectArea / unionArea;
}
template <typename T, typename Tb>
__device__ BoxCorner<T> DecodeBoxes(EfficientNMSParameters param, int boxIdx, int anchorIdx,
const Tb* __restrict__ boxesInput, const Tb* __restrict__ anchorsInput)
{
// The inputs will be in the selected coding format, as well as the decoding function. But the decoded box
// will always be returned as BoxCorner.
Tb box = boxesInput[boxIdx];
if (!param.boxDecoder)
{
return BoxCorner<T>(box);
}
Tb anchor = anchorsInput[anchorIdx];
box.reorder();
anchor.reorder();
return BoxCorner<T>(box.decode(anchor));
}
template <typename T, typename Tb>
__device__ void MapNMSData(EfficientNMSParameters param, int idx, int imageIdx, const Tb* __restrict__ boxesInput,
const Tb* __restrict__ anchorsInput, const int* __restrict__ topClassData, const int* __restrict__ topAnchorsData,
const int* __restrict__ topNumData, const T* __restrict__ sortedScoresData, const int* __restrict__ sortedIndexData,
T& scoreMap, int& classMap, BoxCorner<T>& boxMap, int& boxIdxMap)
{
// idx: Holds the NMS box index, within the current batch.
// idxSort: Holds the batched NMS box index, which indexes the (filtered, but sorted) score buffer.
// scoreMap: Holds the score that corresponds to the indexed box being processed by NMS.
if (idx >= topNumData[imageIdx])
{
return;
}
int idxSort = imageIdx * param.numScoreElements + idx;
scoreMap = sortedScoresData[idxSort];
// idxMap: Holds the re-mapped index, which indexes the (filtered, but unsorted) buffers.
// classMap: Holds the class that corresponds to the idx'th sorted score being processed by NMS.
// anchorMap: Holds the anchor that corresponds to the idx'th sorted score being processed by NMS.
int idxMap = imageIdx * param.numScoreElements + sortedIndexData[idxSort];
classMap = topClassData[idxMap];
int anchorMap = topAnchorsData[idxMap];
// boxIdxMap: Holds the re-re-mapped index, which indexes the (unfiltered, and unsorted) boxes input buffer.
boxIdxMap = -1;
if (param.shareLocation) // Shape of boxesInput: [batchSize, numAnchors, 1, 4]
{
boxIdxMap = imageIdx * param.numAnchors + anchorMap;
}
else // Shape of boxesInput: [batchSize, numAnchors, numClasses, 4]
{
int batchOffset = imageIdx * param.numAnchors * param.numClasses;
int anchorOffset = anchorMap * param.numClasses;
boxIdxMap = batchOffset + anchorOffset + classMap;
}
// anchorIdxMap: Holds the re-re-mapped index, which indexes the (unfiltered, and unsorted) anchors input buffer.
int anchorIdxMap = -1;
if (param.shareAnchors) // Shape of anchorsInput: [1, numAnchors, 4]
{
anchorIdxMap = anchorMap;
}
else // Shape of anchorsInput: [batchSize, numAnchors, 4]
{
anchorIdxMap = imageIdx * param.numAnchors + anchorMap;
}
// boxMap: Holds the box that corresponds to the idx'th sorted score being processed by NMS.
boxMap = DecodeBoxes<T, Tb>(param, boxIdxMap, anchorIdxMap, boxesInput, anchorsInput);
}
template <typename T>
__device__ void WriteNMSResult(EfficientNMSParameters param, int* __restrict__ numDetectionsOutput,
T* __restrict__ nmsScoresOutput, int* __restrict__ nmsClassesOutput, BoxCorner<T>* __restrict__ nmsBoxesOutput,
T threadScore, int threadClass, BoxCorner<T> threadBox, int imageIdx, unsigned int resultsCounter)
{
int outputIdx = imageIdx * param.numOutputBoxes + resultsCounter - 1;
if (param.scoreSigmoid)
{
nmsScoresOutput[outputIdx] = sigmoid_mp(threadScore);
}
else if (param.scoreBits > 0)
{
nmsScoresOutput[outputIdx] = add_mp(threadScore, (T) -1);
}
else
{
nmsScoresOutput[outputIdx] = threadScore;
}
nmsClassesOutput[outputIdx] = threadClass;
if (param.clipBoxes)
{
nmsBoxesOutput[outputIdx] = threadBox.clip((T) 0, (T) 1);
}
else
{
nmsBoxesOutput[outputIdx] = threadBox;
}
numDetectionsOutput[imageIdx] = resultsCounter;
}
__device__ void WriteONNXResult(EfficientNMSParameters param, int* outputIndexData, int* __restrict__ nmsIndicesOutput,
int imageIdx, int threadClass, int boxIdxMap)
{
int index = boxIdxMap % param.numAnchors;
int idx = atomicAdd((unsigned int*) &outputIndexData[0], 1);
nmsIndicesOutput[idx * 3 + 0] = imageIdx;
nmsIndicesOutput[idx * 3 + 1] = threadClass;
nmsIndicesOutput[idx * 3 + 2] = index;
}
__global__ void PadONNXResult(EfficientNMSParameters param, int* outputIndexData, int* __restrict__ nmsIndicesOutput)
{
if (threadIdx.x > 0)
{
return;
}
int pidx = outputIndexData[0] - 1;
if (pidx < 0)
{
return;
}
for (int idx = pidx + 1; idx < param.batchSize * param.numOutputBoxes; idx++)
{
nmsIndicesOutput[idx * 3 + 0] = nmsIndicesOutput[pidx * 3 + 0];
nmsIndicesOutput[idx * 3 + 1] = nmsIndicesOutput[pidx * 3 + 1];
nmsIndicesOutput[idx * 3 + 2] = nmsIndicesOutput[pidx * 3 + 2];
}
}
template <typename T, typename Tb>
__global__ void EfficientNMS(EfficientNMSParameters param, const int* topNumData, int* outputIndexData,
int* outputClassData, const int* sortedIndexData, const T* __restrict__ sortedScoresData,
const int* __restrict__ topClassData, const int* __restrict__ topAnchorsData, const Tb* __restrict__ boxesInput,
const Tb* __restrict__ anchorsInput, int* __restrict__ numDetectionsOutput, T* __restrict__ nmsScoresOutput,
int* __restrict__ nmsClassesOutput, int* __restrict__ nmsIndicesOutput, BoxCorner<T>* __restrict__ nmsBoxesOutput)
{
unsigned int thread = threadIdx.x;
unsigned int imageIdx = blockIdx.y;
unsigned int tileSize = blockDim.x;
if (imageIdx >= param.batchSize)
{
return;
}
int numSelectedBoxes = min(topNumData[imageIdx], param.numSelectedBoxes);
int numTiles = (numSelectedBoxes + tileSize - 1) / tileSize;
if (thread >= numSelectedBoxes)
{
return;
}
__shared__ int blockState;
__shared__ unsigned int resultsCounter;
if (thread == 0)
{
blockState = 0;
resultsCounter = 0;
}
int threadState[NMS_TILES];
unsigned int boxIdx[NMS_TILES];
T threadScore[NMS_TILES];
int threadClass[NMS_TILES];
BoxCorner<T> threadBox[NMS_TILES];
int boxIdxMap[NMS_TILES];
for (int tile = 0; tile < numTiles; tile++)
{
threadState[tile] = 0;
boxIdx[tile] = thread + tile * blockDim.x;
MapNMSData<T, Tb>(param, boxIdx[tile], imageIdx, boxesInput, anchorsInput, topClassData, topAnchorsData,
topNumData, sortedScoresData, sortedIndexData, threadScore[tile], threadClass[tile], threadBox[tile],
boxIdxMap[tile]);
}
// Iterate through all boxes to NMS against.
for (int i = 0; i < numSelectedBoxes; i++)
{
int tile = i / tileSize;
if (boxIdx[tile] == i)
{
// Iteration lead thread, figure out what the other threads should do,
// this will be signaled via the blockState shared variable.
if (threadState[tile] == -1)
{
// Thread already dead, this box was already dropped in a previous iteration,
// because it had a large IOU overlap with another lead thread previously, so
// it would never be kept anyway, therefore it can safely be skip all IOU operations
// in this iteration.
blockState = -1; // -1 => Signal all threads to skip iteration
}
else if (threadState[tile] == 0)
{
// As this box will be kept, this is a good place to find what index in the results buffer it
// should have, as this allows to perform an early loop exit if there are enough results.
if (resultsCounter >= param.numOutputBoxes)
{
blockState = -2; // -2 => Signal all threads to do an early loop exit.
}
else
{
// Thread is still alive, because it has not had a large enough IOU overlap with
// any other kept box previously. Therefore, this box will be kept for sure. However,
// we need to check against all other subsequent boxes from this position onward,
// to see how those other boxes will behave in future iterations.
blockState = 1; // +1 => Signal all (higher index) threads to calculate IOU against this box
threadState[tile] = 1; // +1 => Mark this box's thread to be kept and written out to results
// If the numOutputBoxesPerClass check is enabled, write the result only if the limit for this
// class on this image has not been reached yet. Other than (possibly) skipping the write, this
// won't affect anything else in the NMS threading.
bool write = true;
if (param.numOutputBoxesPerClass >= 0)
{
int classCounterIdx = imageIdx * param.numClasses + threadClass[tile];
write = (outputClassData[classCounterIdx] < param.numOutputBoxesPerClass);
outputClassData[classCounterIdx]++;
}
if (write)
{
// This branch is visited by one thread per iteration, so it's safe to do non-atomic increments.
resultsCounter++;
if (param.outputONNXIndices)
{
WriteONNXResult(
param, outputIndexData, nmsIndicesOutput, imageIdx, threadClass[tile], boxIdxMap[tile]);
}
else
{
WriteNMSResult<T>(param, numDetectionsOutput, nmsScoresOutput, nmsClassesOutput,
nmsBoxesOutput, threadScore[tile], threadClass[tile], threadBox[tile], imageIdx,
resultsCounter);
}
}
}
}
else
{
// This state should never be reached, but just in case...
blockState = 0; // 0 => Signal all threads to not do any updates, nothing happens.
}
}
__syncthreads();
if (blockState == -2)
{
// This is the signal to exit from the loop.
return;
}
if (blockState == -1)
{
// This is the signal for all threads to just skip this iteration, as no IOU's need to be checked.
continue;
}
// Grab a box and class to test the current box against. The test box corresponds to iteration i,
// therefore it will have a lower index than the current thread box, and will therefore have a higher score
// than the current box because it's located "before" in the sorted score list.
T testScore;
int testClass;
BoxCorner<T> testBox;
int testBoxIdxMap;
MapNMSData<T, Tb>(param, i, imageIdx, boxesInput, anchorsInput, topClassData, topAnchorsData, topNumData,
sortedScoresData, sortedIndexData, testScore, testClass, testBox, testBoxIdxMap);
for (int tile = 0; tile < numTiles; tile++)
{
// IOU
if (boxIdx[tile] > i && // Make sure two different boxes are being tested, and that it's a higher index;
boxIdx[tile] < numSelectedBoxes && // Make sure the box is within numSelectedBoxes;
blockState == 1 && // Signal that allows IOU checks to be performed;
threadState[tile] == 0 && // Make sure this box hasn't been either dropped or kept already;
threadClass[tile] == testClass && // Compare only boxes of matching classes;
lte_mp(threadScore[tile], testScore) && // Make sure the sorting order of scores is as expected;
IOU<T>(param, threadBox[tile], testBox) >= param.iouThreshold) // And... IOU overlap.
{
// Current box overlaps with the box tested in this iteration, this box will be skipped.
threadState[tile] = -1; // -1 => Mark this box's thread to be dropped.
}
}
}
}
template <typename T>
cudaError_t EfficientNMSLauncher(EfficientNMSParameters& param, int* topNumData, int* outputIndexData,
int* outputClassData, int* sortedIndexData, T* sortedScoresData, int* topClassData, int* topAnchorsData,
const void* boxesInput, const void* anchorsInput, int* numDetectionsOutput, T* nmsScoresOutput,
int* nmsClassesOutput, int* nmsIndicesOutput, void* nmsBoxesOutput, cudaStream_t stream)
{
unsigned int tileSize = param.numSelectedBoxes / NMS_TILES;
if (param.numSelectedBoxes <= 512)
{
tileSize = 512;
}
if (param.numSelectedBoxes <= 256)
{
tileSize = 256;
}
const dim3 blockSize = {tileSize, 1, 1};
const dim3 gridSize = {1, (unsigned int) param.batchSize, 1};
if (param.boxCoding == 0)
{
EfficientNMS<T, BoxCorner<T>><<<gridSize, blockSize, 0, stream>>>(param, topNumData, outputIndexData,
outputClassData, sortedIndexData, sortedScoresData, topClassData, topAnchorsData,
(BoxCorner<T>*) boxesInput, (BoxCorner<T>*) anchorsInput, numDetectionsOutput, nmsScoresOutput,
nmsClassesOutput, nmsIndicesOutput, (BoxCorner<T>*) nmsBoxesOutput);
}
else if (param.boxCoding == 1)
{
// Note that nmsBoxesOutput is always coded as BoxCorner<T>, regardless of the input coding type.
EfficientNMS<T, BoxCenterSize<T>><<<gridSize, blockSize, 0, stream>>>(param, topNumData, outputIndexData,
outputClassData, sortedIndexData, sortedScoresData, topClassData, topAnchorsData,
(BoxCenterSize<T>*) boxesInput, (BoxCenterSize<T>*) anchorsInput, numDetectionsOutput, nmsScoresOutput,
nmsClassesOutput, nmsIndicesOutput, (BoxCorner<T>*) nmsBoxesOutput);
}
if (param.outputONNXIndices)
{
PadONNXResult<<<1, 1, 0, stream>>>(param, outputIndexData, nmsIndicesOutput);
}
return cudaGetLastError();
}
__global__ void EfficientNMSFilterSegments(EfficientNMSParameters param, const int* __restrict__ topNumData,
int* __restrict__ topOffsetsStartData, int* __restrict__ topOffsetsEndData)
{
int imageIdx = threadIdx.x;
if (imageIdx > param.batchSize)
{
return;
}
topOffsetsStartData[imageIdx] = imageIdx * param.numScoreElements;
topOffsetsEndData[imageIdx] = imageIdx * param.numScoreElements + topNumData[imageIdx];
}
template <typename T>
__global__ void EfficientNMSFilter(EfficientNMSParameters param, const T* __restrict__ scoresInput,
int* __restrict__ topNumData, int* __restrict__ topIndexData, int* __restrict__ topAnchorsData,
T* __restrict__ topScoresData, int* __restrict__ topClassData)
{
int elementIdx = blockDim.x * blockIdx.x + threadIdx.x;
int imageIdx = blockDim.y * blockIdx.y + threadIdx.y;
// Boundary Conditions
if (elementIdx >= param.numScoreElements || imageIdx >= param.batchSize)
{
return;
}
// Shape of scoresInput: [batchSize, numAnchors, numClasses]
int scoresInputIdx = imageIdx * param.numScoreElements + elementIdx;
// For each class, check its corresponding score if it crosses the threshold, and if so select this anchor,
// and keep track of the maximum score and the corresponding (argmax) class id
T score = scoresInput[scoresInputIdx];
if (gte_mp(score, (T) param.scoreThreshold))
{
// Unpack the class and anchor index from the element index
int classIdx = elementIdx % param.numClasses;
int anchorIdx = elementIdx / param.numClasses;
// If this is a background class, ignore it.
if (classIdx == param.backgroundClass)
{
return;
}
// Use an atomic to find an open slot where to write the selected anchor data.
if (topNumData[imageIdx] >= param.numScoreElements)
{
return;
}
int selectedIdx = atomicAdd((unsigned int*) &topNumData[imageIdx], 1);
if (selectedIdx >= param.numScoreElements)
{
topNumData[imageIdx] = param.numScoreElements;
return;
}
// Shape of topScoresData / topClassData: [batchSize, numScoreElements]
int topIdx = imageIdx * param.numScoreElements + selectedIdx;
if (param.scoreBits > 0)
{
score = add_mp(score, (T) 1);
if (gt_mp(score, (T) (2.f - 1.f / 1024.f)))
{
// Ensure the incremented score fits in the mantissa without changing the exponent
score = (2.f - 1.f / 1024.f);
}
}
topIndexData[topIdx] = selectedIdx;
topAnchorsData[topIdx] = anchorIdx;
topScoresData[topIdx] = score;
topClassData[topIdx] = classIdx;
}
}
template <typename T>
__global__ void EfficientNMSDenseIndex(EfficientNMSParameters param, int* __restrict__ topNumData,
int* __restrict__ topIndexData, int* __restrict__ topAnchorsData, int* __restrict__ topOffsetsStartData,
int* __restrict__ topOffsetsEndData, T* __restrict__ topScoresData, int* __restrict__ topClassData)
{
int elementIdx = blockDim.x * blockIdx.x + threadIdx.x;
int imageIdx = blockDim.y * blockIdx.y + threadIdx.y;
if (elementIdx >= param.numScoreElements || imageIdx >= param.batchSize)
{
return;
}
int dataIdx = imageIdx * param.numScoreElements + elementIdx;
int anchorIdx = elementIdx / param.numClasses;
int classIdx = elementIdx % param.numClasses;
if (param.scoreBits > 0)
{
T score = topScoresData[dataIdx];
if (lt_mp(score, (T) param.scoreThreshold))
{
score = (T) 1;
}
else if (classIdx == param.backgroundClass)
{
score = (T) 1;
}
else
{
score = add_mp(score, (T) 1);
if (gt_mp(score, (T) (2.f - 1.f / 1024.f)))
{
// Ensure the incremented score fits in the mantissa without changing the exponent
score = (2.f - 1.f / 1024.f);
}
}
topScoresData[dataIdx] = score;
}
else
{
T score = topScoresData[dataIdx];
if (lt_mp(score, (T) param.scoreThreshold))
{
topScoresData[dataIdx] = -(1 << 15);
}
else if (classIdx == param.backgroundClass)
{
topScoresData[dataIdx] = -(1 << 15);
}
}
topIndexData[dataIdx] = elementIdx;
topAnchorsData[dataIdx] = anchorIdx;
topClassData[dataIdx] = classIdx;
if (elementIdx == 0)
{
// Saturate counters
topNumData[imageIdx] = param.numScoreElements;
topOffsetsStartData[imageIdx] = imageIdx * param.numScoreElements;
topOffsetsEndData[imageIdx] = (imageIdx + 1) * param.numScoreElements;
}
}
template <typename T>
cudaError_t EfficientNMSFilterLauncher(EfficientNMSParameters& param, const T* scoresInput, int* topNumData,
int* topIndexData, int* topAnchorsData, int* topOffsetsStartData, int* topOffsetsEndData, T* topScoresData,
int* topClassData, cudaStream_t stream)
{
const unsigned int elementsPerBlock = 512;
const unsigned int imagesPerBlock = 1;
const unsigned int elementBlocks = (param.numScoreElements + elementsPerBlock - 1) / elementsPerBlock;
const unsigned int imageBlocks = (param.batchSize + imagesPerBlock - 1) / imagesPerBlock;
const dim3 blockSize = {elementsPerBlock, imagesPerBlock, 1};
const dim3 gridSize = {elementBlocks, imageBlocks, 1};
float kernelSelectThreshold = 0.007f;
if (param.scoreSigmoid)
{
// Inverse Sigmoid
if (param.scoreThreshold <= 0.f)
{
param.scoreThreshold = -(1 << 15);
}
else
{
param.scoreThreshold = logf(param.scoreThreshold / (1.f - param.scoreThreshold));
}
kernelSelectThreshold = logf(kernelSelectThreshold / (1.f - kernelSelectThreshold));
// Disable Score Bits Optimization
param.scoreBits = -1;
}
if (param.scoreThreshold < kernelSelectThreshold)
{
// A full copy of the buffer is necessary because sorting will scramble the input data otherwise.
PLUGIN_CHECK_CUDA(cudaMemcpyAsync(topScoresData, scoresInput,
param.batchSize * param.numScoreElements * sizeof(T), cudaMemcpyDeviceToDevice, stream));
EfficientNMSDenseIndex<T><<<gridSize, blockSize, 0, stream>>>(param, topNumData, topIndexData, topAnchorsData,
topOffsetsStartData, topOffsetsEndData, topScoresData, topClassData);
}
else
{
EfficientNMSFilter<T><<<gridSize, blockSize, 0, stream>>>(
param, scoresInput, topNumData, topIndexData, topAnchorsData, topScoresData, topClassData);
EfficientNMSFilterSegments<<<1, param.batchSize, 0, stream>>>(
param, topNumData, topOffsetsStartData, topOffsetsEndData);
}
return cudaGetLastError();
}
template <typename T>
size_t EfficientNMSSortWorkspaceSize(int batchSize, int numScoreElements)
{
size_t sortedWorkspaceSize = 0;
cub::DoubleBuffer<T> keysDB(nullptr, nullptr);
cub::DoubleBuffer<int> valuesDB(nullptr, nullptr);
cub::DeviceSegmentedRadixSort::SortPairsDescending(nullptr, sortedWorkspaceSize, keysDB, valuesDB,
numScoreElements, batchSize, (const int*) nullptr, (const int*) nullptr);
return sortedWorkspaceSize;
}
size_t EfficientNMSWorkspaceSize(int batchSize, int numScoreElements, int numClasses, DataType datatype)
{
size_t total = 0;
const size_t align = 256;
// Counters
// 3 for Filtering
// 1 for Output Indexing
// C for Max per Class Limiting
size_t size = (3 + 1 + numClasses) * batchSize * sizeof(int);
total += size + (size % align ? align - (size % align) : 0);
// Int Buffers
for (int i = 0; i < 4; i++)
{
size = batchSize * numScoreElements * sizeof(int);
total += size + (size % align ? align - (size % align) : 0);
}
// Float Buffers
for (int i = 0; i < 2; i++)
{
size = batchSize * numScoreElements * dataTypeSize(datatype);
total += size + (size % align ? align - (size % align) : 0);
}
// Sort Workspace
if (datatype == DataType::kHALF)
{
size = EfficientNMSSortWorkspaceSize<__half>(batchSize, numScoreElements);
total += size + (size % align ? align - (size % align) : 0);
}
else if (datatype == DataType::kFLOAT)
{
size = EfficientNMSSortWorkspaceSize<float>(batchSize, numScoreElements);
total += size + (size % align ? align - (size % align) : 0);
}
return total;
}
template <typename T>
T* EfficientNMSWorkspace(void* workspace, size_t& offset, size_t elements)
{
T* buffer = (T*) ((size_t) workspace + offset);
size_t align = 256;
size_t size = elements * sizeof(T);
size_t sizeAligned = size + (size % align ? align - (size % align) : 0);
offset += sizeAligned;
return buffer;
}
template <typename T>
pluginStatus_t EfficientNMSDispatch(EfficientNMSParameters param, const void* boxesInput, const void* scoresInput,
const void* anchorsInput, void* numDetectionsOutput, void* nmsBoxesOutput, void* nmsScoresOutput,
void* nmsClassesOutput, void* nmsIndicesOutput, void* workspace, cudaStream_t stream)
{
// Clear Outputs (not all elements will get overwritten by the kernels, so safer to clear everything out)
if (param.outputONNXIndices)
{
CSC(cudaMemsetAsync(nmsIndicesOutput, 0xFF, param.batchSize * param.numOutputBoxes * 3 * sizeof(int), stream), STATUS_FAILURE);
}
else
{
CSC(cudaMemsetAsync(numDetectionsOutput, 0x00, param.batchSize * sizeof(int), stream), STATUS_FAILURE);
CSC(cudaMemsetAsync(nmsScoresOutput, 0x00, param.batchSize * param.numOutputBoxes * sizeof(T), stream), STATUS_FAILURE);
CSC(cudaMemsetAsync(nmsBoxesOutput, 0x00, param.batchSize * param.numOutputBoxes * 4 * sizeof(T), stream), STATUS_FAILURE);
CSC(cudaMemsetAsync(nmsClassesOutput, 0x00, param.batchSize * param.numOutputBoxes * sizeof(int), stream), STATUS_FAILURE);
}
// Empty Inputs
if (param.numScoreElements < 1)
{
return STATUS_SUCCESS;
}
// Counters Workspace
size_t workspaceOffset = 0;
int countersTotalSize = (3 + 1 + param.numClasses) * param.batchSize;
int* topNumData = EfficientNMSWorkspace<int>(workspace, workspaceOffset, countersTotalSize);
int* topOffsetsStartData = topNumData + param.batchSize;
int* topOffsetsEndData = topNumData + 2 * param.batchSize;
int* outputIndexData = topNumData + 3 * param.batchSize;
int* outputClassData = topNumData + 4 * param.batchSize;
CSC(cudaMemsetAsync(topNumData, 0x00, countersTotalSize * sizeof(int), stream), STATUS_FAILURE);
cudaError_t status = cudaGetLastError();
CSC(status, STATUS_FAILURE);
// Other Buffers Workspace
int* topIndexData
= EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements);
int* topClassData
= EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements);
int* topAnchorsData
= EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements);
int* sortedIndexData
= EfficientNMSWorkspace<int>(workspace, workspaceOffset, param.batchSize * param.numScoreElements);
T* topScoresData = EfficientNMSWorkspace<T>(workspace, workspaceOffset, param.batchSize * param.numScoreElements);
T* sortedScoresData
= EfficientNMSWorkspace<T>(workspace, workspaceOffset, param.batchSize * param.numScoreElements);
size_t sortedWorkspaceSize = EfficientNMSSortWorkspaceSize<T>(param.batchSize, param.numScoreElements);
char* sortedWorkspaceData = EfficientNMSWorkspace<char>(workspace, workspaceOffset, sortedWorkspaceSize);
cub::DoubleBuffer<T> scoresDB(topScoresData, sortedScoresData);
cub::DoubleBuffer<int> indexDB(topIndexData, sortedIndexData);
// Device Specific Properties
int device;
CSC(cudaGetDevice(&device), STATUS_FAILURE);
struct cudaDeviceProp properties;
CSC(cudaGetDeviceProperties(&properties, device), STATUS_FAILURE);
if (properties.regsPerBlock >= 65536)
{
// Most Devices
param.numSelectedBoxes = 5000;
}
else
{
// Jetson TX1/TX2
param.numSelectedBoxes = 2000;
}
// Kernels
status = EfficientNMSFilterLauncher<T>(param, (T*) scoresInput, topNumData, topIndexData, topAnchorsData,
topOffsetsStartData, topOffsetsEndData, topScoresData, topClassData, stream);
CSC(status, STATUS_FAILURE);
status = cub::DeviceSegmentedRadixSort::SortPairsDescending(sortedWorkspaceData, sortedWorkspaceSize, scoresDB,
indexDB, param.batchSize * param.numScoreElements, param.batchSize, topOffsetsStartData, topOffsetsEndData,
param.scoreBits > 0 ? (10 - param.scoreBits) : 0, param.scoreBits > 0 ? 10 : sizeof(T) * 8, stream, false);
CSC(status, STATUS_FAILURE);
status = EfficientNMSLauncher<T>(param, topNumData, outputIndexData, outputClassData, indexDB.Current(),
scoresDB.Current(), topClassData, topAnchorsData, boxesInput, anchorsInput, (int*) numDetectionsOutput,
(T*) nmsScoresOutput, (int*) nmsClassesOutput, (int*) nmsIndicesOutput, nmsBoxesOutput, stream);
CSC(status, STATUS_FAILURE);
return STATUS_SUCCESS;
}
pluginStatus_t EfficientNMSInference(EfficientNMSParameters param, const void* boxesInput, const void* scoresInput,
const void* anchorsInput, void* numDetectionsOutput, void* nmsBoxesOutput, void* nmsScoresOutput,
void* nmsClassesOutput, void* nmsIndicesOutput, void* workspace, cudaStream_t stream)
{
if (param.datatype == DataType::kFLOAT)
{
param.scoreBits = -1;
return EfficientNMSDispatch<float>(param, boxesInput, scoresInput, anchorsInput, numDetectionsOutput,
nmsBoxesOutput, nmsScoresOutput, nmsClassesOutput, nmsIndicesOutput, workspace, stream);
}
else if (param.datatype == DataType::kHALF)
{
if (param.scoreBits <= 0 || param.scoreBits > 10)
{
param.scoreBits = -1;
}
return EfficientNMSDispatch<__half>(param, boxesInput, scoresInput, anchorsInput, numDetectionsOutput,
nmsBoxesOutput, nmsScoresOutput, nmsClassesOutput, nmsIndicesOutput, workspace, stream);
}
else
{
return STATUS_NOT_SUPPORTED;
}
}
|
267252af93df09a9da6adc1bdde4b62f74d67950.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void transposeNoBankConflicts(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
} | 267252af93df09a9da6adc1bdde4b62f74d67950.cu | #include "includes.h"
__global__ void transposeNoBankConflicts(float *odata, const float *idata)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x];
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j];
} |
4fcf53db13f61382bb3c48cfc0c42bc8bb7ca4b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_copy_velocity( const int num_atoms, const double* g_vx_i, const double* g_vy_i, const double* g_vz_i, double* g_vx_o, double* g_vy_o, double* g_vz_o)
{
const int n = threadIdx.x + blockIdx.x * blockDim.x;
if (n < num_atoms) {
g_vx_o[n] = g_vx_i[n];
g_vy_o[n] = g_vy_i[n];
g_vz_o[n] = g_vz_i[n];
}
} | 4fcf53db13f61382bb3c48cfc0c42bc8bb7ca4b1.cu | #include "includes.h"
__global__ void gpu_copy_velocity( const int num_atoms, const double* g_vx_i, const double* g_vy_i, const double* g_vz_i, double* g_vx_o, double* g_vy_o, double* g_vz_o)
{
const int n = threadIdx.x + blockIdx.x * blockDim.x;
if (n < num_atoms) {
g_vx_o[n] = g_vx_i[n];
g_vy_o[n] = g_vy_i[n];
g_vz_o[n] = g_vz_i[n];
}
} |
cec5c02cf72027d06df6aaa3de130559a20f4608.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Version 0.51
// Been over some attempts to ameliorate local accesses -- not v successful basically.
// Correction in "Get Lap phi" routine.
// Version 0.52:
// Change Lap A, Grad A routines to load CHAR4 p_tri_per_neigh instead of loading data
// to interrogate neighbour periodic status.
// Change major area calc in the INNERMOST/OUTERMOST case.
// Note that central area calc does not look right.
// Version 0.53:
// Made changes to Reladvect_nT because it was taking wrong connection for OUTERMOST.
// Changed flag tests & treatment of Inner verts in preceding routines.
// Version 0.54:
// Adjusted area calculations as written in spec.
// We set ins crossing tri minor area = 0, centroid on ins;
// frill area = 0, centroid on boundary.
// Version 0.6:
// Debugging and making corrections.
// ==
// Version 0.7:
// Debugging ... there is a kernel launch failure for Antiadvect Adot
// PLAN:
// Allow that on GPU we can move outside domain and it's fine, we do not change PB data.
// PB data will be only changed on CPU.
// Nonetheless we kept PBCTri lists which can be updated, unlike has_periodic alone, in case
// of moving something to its image within the domain.
// NOTES:
// Ensure that outside the domain, n_major is recorded as 0
// Ensure that outside the domain, resistive_heat is recorded as 0
extern real FRILL_CENTROID_OUTER_RADIUS, FRILL_CENTROID_INNER_RADIUS;
__global__ void Kernel_CalculateTriMinorAreas_AndCentroids
(structural * __restrict__ p_info_sharing, // for vertex positions
LONG3 * __restrict__ p_tri_corner_index,
CHAR4 * __restrict__ p_tri_perinfo,
// Output:
f64 * __restrict__ p_area_minor,
f64_vec2 * __restrict__ p_tri_centroid)
{
__shared__ f64_vec2 shared_vertex_pos[SIZE_OF_MAJOR_PER_TRI_TILE];
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long EndMajor = StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE;
long tid = threadIdx.x + blockIdx.x * blockDim.x;
// Note that we only do a fetch with the first half of threads:
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
structural info = p_info_sharing[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
// Well here is a major problem.
// mti.StartMajor is a separate value for every thread.
// How can we make it do a contiguous access?
// Suppose a 1:1 correspondence between minor blocks and major blocks...
// that is ONE way.
shared_vertex_pos[threadIdx.x] = info.pos;
// shared_shorts[threadIdx.x].flag = info.flag;
// shared_shorts[threadIdx.x].neigh_len = info.neigh_len;
// these were never used.
};
// If we make an extended array then we can always go through that code.
__syncthreads();
// Triangle area * 2/3 is area of minor cell.
// if (tid < Ntris) { // redundant test if we do it right
LONG3 corner_index = p_tri_corner_index[tid];
CHAR4 perinfo = p_tri_perinfo[tid];
// Do we ever require those and not the neighbours?
// Yes - this time for instance.
f64_vec2 pos1, pos2, pos3;
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
pos1 = shared_vertex_pos[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i1];
pos1 = info.pos;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
pos2 = shared_vertex_pos[corner_index.i2-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i2];
pos2 = info.pos;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
pos3 = shared_vertex_pos[corner_index.i3-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i3];
pos3 = info.pos;
}
if (perinfo.per0+perinfo.per1+perinfo.per2 == 0) {
} else {
// In this case which ones are periodic?
// Should we just store per flags?
// How it should work:
// CHAR4 perinfo: periodic, per0, per1, per2;
if (perinfo.per0 == NEEDS_ANTI)
pos1 = Anticlock_rotate2(pos1);
if (perinfo.per0 == NEEDS_CLOCK)
pos1 = Clockwise_rotate2(pos1);
if (perinfo.per1 == NEEDS_ANTI)
pos2 = Anticlock_rotate2(pos2);
if (perinfo.per1 == NEEDS_CLOCK)
pos2 = Clockwise_rotate2(pos2);
if (perinfo.per2 == NEEDS_ANTI)
pos3 = Anticlock_rotate2(pos3);
if (perinfo.per2 == NEEDS_CLOCK)
pos3 = Clockwise_rotate2(pos3);
};
// Now we've got to decide what to do about minor cells near the edges.
// Edge of memory: triangles should not continue to the edge.
// Ultimately the edge of memory will be mostly within cathode rods and suchlike things.
// So we don't need to connect tri mesh beyond outermost row of vertices, even if we could.
// Realise that this edge cell crosses into the insulator and so should be assigned nv_r = 0
// We do not know what order the corners are given in.
// So use fabs:
f64 area = fabs(0.5*( (pos2.x+pos1.x)*(pos2.y-pos1.y)
+ (pos3.x+pos2.x)*(pos3.y-pos2.y)
+ (pos1.x+pos3.x)*(pos1.y-pos3.y)
) );
f64_vec2 centroid = THIRD*(pos1+pos2+pos3);
if (area > 1.0e-3) {
printf("tri %d area %1.3E pos_x %1.6E %1.6E %1.6E \n"
" pos_y %1.6E %1.6E %1.6E \n",tid,area,
pos1.x,pos2.x,pos3.x,
pos1.y,pos2.y,pos3.y);
}
if (perinfo.flag == OUTER_FRILL)
{
f64_vec2 temp = 0.5*(pos1+pos2);
temp.project_to_radius(centroid, FRILL_CENTROID_OUTER_RADIUS_d);
area = 1.0e-14; // == 0 but tiny is less likely to cause 1/0
}
if (perinfo.flag == INNER_FRILL)
{
f64_vec2 temp = 0.5*(pos1+pos2);
temp.project_to_radius(centroid, FRILL_CENTROID_INNER_RADIUS_d);
area = 1.0e-14; // == 0 but tiny is less likely to cause 1/0
}
if (perinfo.flag == CROSSING_INS) {
f64_vec2 centroid2;
centroid.project_to_ins(centroid2);
centroid = centroid2;
// The major cells will abut the insulator.
// Only count the % of the area that is in the domain.
//bool b1, b2, b3;
//b1 = (pos1.x*pos1.x+pos1.y*pos1.y > INSULATOR_OUTER_RADIUS*INSULATOR_OUTER_RADIUS);
//b2 = (pos2.x*pos2.x+pos2.y*pos2.y > INSULATOR_OUTER_RADIUS*INSULATOR_OUTER_RADIUS);
//b3 = (pos3.x*pos3.x+pos3.y*pos3.y > INSULATOR_OUTER_RADIUS*INSULATOR_OUTER_RADIUS);
// Save ourselves some bother for now by setting area to be near 0.
// area = 1.0e-14;
// FOR NOW, legislate v = 0 in insulator-crossing tris.
// And so avoid having to do an awkward area calculation.
// Stick with correct area for tri as area variable.
// Possibly we never use 'area' except for domain-related matters; if that can be
// verified, then it's best to change to 'domain_intersection_area', however tricky.
}
p_tri_centroid[tid] = centroid;
p_area_minor[tid] = 0.666666666666667*area;
if (p_area_minor[tid] < 0.0) {
printf("kernel -- tid %d flag %d area %1.8E \n",tid,perinfo.flag,area);
};
// Perhaps we need instead to read data from neighbours to create tri minor area.
// Note that we subsequently CHANGED the nodes of minor mesh to be at averages
// so that we could average neatly A to edges. However, this means TWOTHIRDS*tri area
// is not an exact estimate.
}
// FOR OUTERMOST,
//
// | 4 \/ 3 |
// pt0| ------- |pt3
// 0 2
// pt1| 1 |pt2
// If we have an outer point,
// then the number of neighs is not the number of tris;
// SO EXPLOIT THIS
// Make sure that the omitted edge is the one that would go between the frill tris.
// This has to go into the reconstructing code that will
// generate the mesh with frill tris.
// ---------------------------------------------------------------------------------
// We'll want to calculate areas for triangles AND for central cells.
// But they require different codes so might as well be different kernels.
// Central area = sum of 1/6 of each neighbouring tri minor area.
// So now let's write central area calc routine:
// We should be passed the pointer to the start of the central minor area array.
__global__ void Kernel_CalculateCentralMinorAreas (
structural * __restrict__ p_info_sharing,
long * __restrict__ p_IndexTri,
f64 * __restrict__ p_triminor_area,
// Output:
f64 * __restrict__ p_area_minor
// pass output array starting from the central array start
)
{
__shared__ f64 shared_area[SIZE_OF_TRI_TILE_FOR_MAJOR];
__shared__ long Indextri[MAXNEIGH_d*threadsPerTileMajor];
// 2*8+12*4 = 64 bytes => room for 768 threads in 48K
long index = threadIdx.x + blockIdx.x * blockDim.x;
// Load in minor data: how to manage this? fill in with 2 strides; rely on contiguity.
long StartMinor = blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR; // Have to do this way.
// will this be recognised as contiguous access?
shared_area[threadIdx.x] =
p_triminor_area[blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
shared_area[blockDim.x + threadIdx.x] =
p_triminor_area[blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + blockDim.x + threadIdx.x];
// Loaded in 2 times as many areas as central cells
__syncthreads();
//if (index < Nverts)
{
structural info = p_info_sharing[index];
memcpy(Indextri + MAXNEIGH_d*threadIdx.x,
p_IndexTri + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(long)); // MAXNEIGH_d should be chosen to be 12, for 1 full bus.
f64 sum = 0.0;
#pragma unroll 12
for (short iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indextri < StartMinor) || (indextri >= StartMinor+SIZE_OF_TRI_TILE_FOR_MAJOR))
{
sum += p_triminor_area[indextri];
} else {
sum += shared_area[indextri-StartMinor];
}
}
// place separation of central from edge cell at 1/3 along line.
// Then have 1/9 area in central shard, 2/3 in edge minor,
// so (1/9)/(2/3) = 1/6
p_area_minor[index] = sum*SIXTH;
if (sum < 0.0) {
printf("kerncentral -- tid %d area %1.2E \n",index,p_area_minor[index]);
};
};
// This may give funny results at the edges of memory, where we have added
// areas only of shards that formed part of triangles. But that is the expected
// behaviour here.
// If we had frills with repeated corners, then we get area 0 from them.
// If we used a special vertex then we had to do something special in setting area - perhaps we want it to = 0.
// BUG:
// This 1/6 only holds as long as we position the minor joins on the lines
// between vertices. If we use (1/3)(vertex + centroid 1 + centroid 2)
// then we should not be doing this area sum. Rather, given each pair of
// triangles, we can infer the area of the triangle that is part of central cell.
}
__global__ void Kernel_CalculateMajorAreas (
structural * __restrict__ p_info,
f64_vec2 * __restrict__ p_tri_centroid,
long * __restrict__ pIndexTri,
char * __restrict__ pPBCtri,
// Output:
f64 * __restrict__ p_area
)
{
__shared__ f64_vec2 shared_centroids[SIZE_OF_TRI_TILE_FOR_MAJOR];
__shared__ long Indextri[MAXNEIGH_d*threadsPerTileMajor];
__shared__ char PBCtri[MAXNEIGH_d*threadsPerTileMajor];
long StartMinor = blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR; // Have to do this way.
shared_centroids[threadIdx.x] =
p_tri_centroid[blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
shared_centroids[blockDim.x + threadIdx.x] =
p_tri_centroid[blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + blockDim.x + threadIdx.x];
//
//if (shared_centroids[threadIdx.x].x*shared_centroids[threadIdx.x].x
// + shared_centroids[threadIdx.x].y*shared_centroids[threadIdx.x].y > DOMAIN_OUTER_RADIUS*DOMAIN_OUTER_RADIUS)
// shared_centroids[threadIdx.x].project_to_radius(shared_centroids[threadIdx.x],DOMAIN_OUTER_RADIUS);
//
//if (shared_centroids[threadIdx.x].x*shared_centroids[threadIdx.x].x
// + shared_centroids[threadIdx.x].y*shared_centroids[threadIdx.x].y < INNER_A_BOUNDARY*INNER_A_BOUNDARY)
// shared_centroids[threadIdx.x].project_to_radius(shared_centroids[threadIdx.x],INNER_A_BOUNDARY);
//
//if (shared_centroids[blockDim.x + threadIdx.x].x*shared_centroids[blockDim.x + threadIdx.x].x
// + shared_centroids[blockDim.x + threadIdx.x].y*shared_centroids[blockDim.x + threadIdx.x].y > DOMAIN_OUTER_RADIUS*DOMAIN_OUTER_RADIUS)
// shared_centroids[blockDim.x + threadIdx.x].project_to_radius(shared_centroids[blockDim.x + threadIdx.x],DOMAIN_OUTER_RADIUS);
//
//if (shared_centroids[blockDim.x + threadIdx.x].x*shared_centroids[blockDim.x + threadIdx.x].x
// + shared_centroids[blockDim.x + threadIdx.x].y*shared_centroids[blockDim.x + threadIdx.x].y < INNER_A_BOUNDARY*INNER_A_BOUNDARY)
// shared_centroids[blockDim.x + threadIdx.x].project_to_radius(shared_centroids[blockDim.x + threadIdx.x],INNER_A_BOUNDARY);
//
__syncthreads();
long index = threadIdx.x + blockIdx.x * blockDim.x;
f64_vec2 uprev, unext;
//if (index < Nverts) { // redundant test, should be
structural info = p_info[index];
memcpy(Indextri + MAXNEIGH_d*threadIdx.x,
pIndexTri + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(long)); // MAXNEIGH_d should be chosen to be 12, for 1 full bus.
memcpy(PBCtri + MAXNEIGH_d*threadIdx.x,
pPBCtri + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(char)); // MAXNEIGH_d should be chosen to be 12, for 1 full bus.
f64 grad_x_integrated_x = 0.0;
// Going to do shoelace on tri centroids which must be sorted anticlockwise.
// If we have a frilled e.g.OUTERMOST vertex, we shall find that
// info.neigh_len = 4 whereas tri_len = 5. Bear in mind...
if ((info.flag != OUTERMOST) && (info.flag != INNERMOST))
{
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + info.neigh_len-1];
if ((indextri >= StartMinor) && (indextri < StartMinor + SIZE_OF_TRI_TILE_FOR_MAJOR)) {
uprev = shared_centroids[indextri-StartMinor];
} else {
uprev = p_tri_centroid[indextri];
}
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + info.neigh_len-1];
if (PBC == NEEDS_CLOCK) {
uprev = Clockwise_rotate2(uprev);
}
if (PBC == NEEDS_ANTI) {
uprev = Anticlock_rotate2(uprev);
}
short iNeigh;
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++) // iNeigh is the anticlockwise one
{
indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indextri >= StartMinor) && (indextri < StartMinor + SIZE_OF_TRI_TILE_FOR_MAJOR)) {
unext = shared_centroids[indextri-StartMinor];
} else {
unext = p_tri_centroid[indextri];
}
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + iNeigh];
if (PBC == NEEDS_CLOCK) {
unext = Clockwise_rotate2(unext);
}
if (PBC == NEEDS_ANTI) {
unext = Anticlock_rotate2(unext);
}
// Get edge_normal.x and average x on edge
grad_x_integrated_x += //0.5*(unext.x+uprev.x)*edge_normal.x
0.5*(unext.x+uprev.x)*(unext.y-uprev.y);
uprev = unext;
};
} else {
// FOR THE OUTERMOST / INNERMOST CELLS :
// In this case we basically substituted tri_len for neigh_len:
// Also we project frill centroid on to the inner/outer radius.
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + info.neigh_len];
if ((indextri >= StartMinor) && (indextri < StartMinor + SIZE_OF_TRI_TILE_FOR_MAJOR)) {
uprev = shared_centroids[indextri-StartMinor];
} else {
uprev = p_tri_centroid[indextri];
}
if (uprev.x*uprev.x + uprev.y*uprev.y > DOMAIN_OUTER_RADIUS*DOMAIN_OUTER_RADIUS)
uprev.project_to_radius(uprev,DOMAIN_OUTER_RADIUS);
if (uprev.x*uprev.x + uprev.y*uprev.y < INNER_A_BOUNDARY*INNER_A_BOUNDARY)
uprev.project_to_radius(uprev,INNER_A_BOUNDARY);
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + info.neigh_len];
if (PBC == NEEDS_CLOCK) uprev = Clockwise_rotate2(uprev);
if (PBC == NEEDS_ANTI) uprev = Anticlock_rotate2(uprev);
short iNeigh;
for (iNeigh = 0; iNeigh < info.neigh_len+1; iNeigh++) // iNeigh is the anticlockwise one
{
indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indextri >= StartMinor) && (indextri < StartMinor + SIZE_OF_TRI_TILE_FOR_MAJOR)) {
unext = shared_centroids[indextri-StartMinor];
} else {
unext = p_tri_centroid[indextri];
}
if (unext.x*unext.x + unext.y*unext.y > DOMAIN_OUTER_RADIUS*DOMAIN_OUTER_RADIUS)
unext.project_to_radius(unext,DOMAIN_OUTER_RADIUS);
if (unext.x*unext.x + unext.y*unext.y < INNER_A_BOUNDARY*INNER_A_BOUNDARY)
unext.project_to_radius(unext,INNER_A_BOUNDARY);
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + iNeigh];
if (PBC == NEEDS_CLOCK) unext = Clockwise_rotate2(unext);
if (PBC == NEEDS_ANTI) unext = Anticlock_rotate2(unext);
grad_x_integrated_x += 0.5*(unext.x+uprev.x)*(unext.y-uprev.y);
// We do have to even count the edge looking into frills, or polygon
// area would not be right.
uprev = unext;
};
};
p_area[index] = grad_x_integrated_x;
/*if ((index == 36685)) {
printf("index %d flag %d area %1.3E \n",
index, info.flag, grad_x_integrated_x);
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + info.neigh_len-1];
if ((indextri >= StartMinor) && (indextri < StartMinor + SIZE_OF_TRI_TILE_FOR_MAJOR)) {
uprev = shared_centroids[indextri-StartMinor];
} else {
uprev = p_tri_centroid[indextri];
}
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + info.neigh_len-1];
if (PBC == NEEDS_CLOCK) {
uprev = Clockwise_rotate2(uprev);
}
if (PBC == NEEDS_ANTI) {
uprev = Anticlock_rotate2(uprev);
}
//printf("uprev %1.5E %1.5E ... %1.5E\n",uprev.x,uprev.y,uprev.modulus());
short iNeigh;
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++) // iNeigh is the anticlockwise one
{
indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indextri >= StartMinor) && (indextri < StartMinor + SIZE_OF_TRI_TILE_FOR_MAJOR)) {
unext = shared_centroids[indextri-StartMinor];
} else {
unext = p_tri_centroid[indextri];
}
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + iNeigh];
if (PBC == NEEDS_CLOCK) {
unext = Clockwise_rotate2(unext);
}
if (PBC == NEEDS_ANTI) {
unext = Anticlock_rotate2(unext);
}
// printf("unext %1.5E %1.5E ... %1.5E \n",unext.x,unext.y,unext.modulus());
// Get edge_normal.x and average x on edge
grad_x_integrated_x += //0.5*(unext.x+uprev.x)*edge_normal.x
0.5*(unext.x+uprev.x)*(unext.y-uprev.y);
uprev = unext;
};
};*/
}
__global__ void Kernel_Average_nT_to_tri_minors (
LONG3 * __restrict__ p_tri_corner_index,
CHAR4 * __restrict__ p_tri_perinfo,
nT * __restrict__ p_nT_neut,
nT * __restrict__ p_nT_ion,
nT * __restrict__ p_nT_elec,
// Output:
nT * __restrict__ p_minor_nT_neut,
nT * __restrict__ p_minor_nT_ion,
nT * __restrict__ p_minor_nT_elec
)
{
// Average by area so that we get the same total mass on minor mesh as on major.
// We have to know intersection. It's not always 1/3 of triangle is it.
// ??
// Even corner positions do not tell us intersection. We'd have to know the neighbouring
// centroid also.
__shared__ nT shared_nT[SIZE_OF_MAJOR_PER_TRI_TILE];
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
shared_nT[threadIdx.x] = p_nT_neut[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
}
__syncthreads();
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long EndMajor = StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE;
long tid = threadIdx.x + blockIdx.x * blockDim.x;
nT nT1, nT2, nT3, nT_out;
LONG3 corner_index;
CHAR4 per_info = p_tri_perinfo[tid];
corner_index = p_tri_corner_index[tid];
// Do we ever require those and not the neighbours? Yes - this time for instance.
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
nT1 = shared_nT[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
nT1 = p_nT_neut[corner_index.i1];
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
nT2 = shared_nT[corner_index.i2-StartMajor];
} else {
nT2 = p_nT_neut[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
nT3 = shared_nT[corner_index.i3-StartMajor];
} else {
nT3 = p_nT_neut[corner_index.i3];
}
if (per_info.flag == CROSSING_INS) {
// An idea: Ensure that outside the domain, n is recorded as 0
int divide = 0.0;
nT_out.n = 0.0;
nT_out.T = 0.0;
if (nT1.n > 0.0) {
nT_out.n += nT1.n;
nT_out.T += nT1.T;
divide++;
}
if (nT2.n > 0.0) {
nT_out.n += nT2.n;
nT_out.T += nT2.T;
divide++;
}
if (nT3.n > 0.0) {
nT_out.n += nT3.n;
nT_out.T += nT3.T;
divide++;
}
nT_out.n /= (real)divide;
nT_out.T /= (real)divide;
} else {
nT_out.n = THIRD*(nT1.n+nT2.n+nT3.n);
nT_out.T = THIRD*(nT1.T+nT2.T+nT3.T);
};
// SO THIS IS JUST ROUGH FOR NOW? What we wanted to do:
// Sum (Area_intersection * nT) / Sum(Area_intersection)
// You cannot get the intersection area just from knowing the corner positions.
// But do note that since centroid = (1/3)(sum of positions), (1/3) represents linear interpolation on a plane.
p_minor_nT_neut[tid] = nT_out;
__syncthreads();
// Now repeat same thing for each species
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
shared_nT[threadIdx.x] = p_nT_ion[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
}
__syncthreads();
//if (tid < Ntris) {
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
nT1 = shared_nT[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
nT1 = p_nT_ion[corner_index.i1];
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
nT2 = shared_nT[corner_index.i2-StartMajor];
} else {
nT2 = p_nT_ion[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
nT3 = shared_nT[corner_index.i3-StartMajor];
} else {
nT3 = p_nT_ion[corner_index.i3];
}
if (per_info.flag == CROSSING_INS) {
// An idea: Ensure that outside the domain, n is recorded as 0
int divide = 0.0;
nT_out.n = 0.0;
nT_out.T = 0.0;
if (nT1.n > 0.0) {
nT_out.n += nT1.n;
nT_out.T += nT1.T;
divide++;
}
if (nT2.n > 0.0) {
nT_out.n += nT2.n;
nT_out.T += nT2.T;
divide++;
}
if (nT3.n > 0.0) {
nT_out.n += nT3.n;
nT_out.T += nT3.T;
divide++;
}
nT_out.n /= (real)divide;
nT_out.T /= (real)divide;
} else {
nT_out.n = THIRD*(nT1.n+nT2.n+nT3.n);
nT_out.T = THIRD*(nT1.T+nT2.T+nT3.T);
};
p_minor_nT_ion[tid] = nT_out;
//};
__syncthreads();
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
shared_nT[threadIdx.x] = p_nT_elec[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
}
__syncthreads();
//if (tid < Ntris) {
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
nT1 = shared_nT[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
nT1 = p_nT_elec[corner_index.i1];
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
nT2 = shared_nT[corner_index.i2-StartMajor];
} else {
// have to load in from global memory:
nT2 = p_nT_elec[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
nT3 = shared_nT[corner_index.i3-StartMajor];
} else {
// have to load in from global memory:
nT3 = p_nT_elec[corner_index.i3];
}
if (per_info.flag == CROSSING_INS) {
// An idea: Ensure that outside the domain, n is recorded as 0
int divide = 0.0;
nT_out.n = 0.0;
nT_out.T = 0.0;
if (nT1.n > 0.0) {
nT_out.n += nT1.n;
nT_out.T += nT1.T;
divide++;
}
if (nT2.n > 0.0) {
nT_out.n += nT2.n;
nT_out.T += nT2.T;
divide++;
}
if (nT3.n > 0.0) {
nT_out.n += nT3.n;
nT_out.T += nT3.T;
divide++;
}
nT_out.n /= (real)divide;
nT_out.T /= (real)divide;
} else {
nT_out.n = THIRD*(nT1.n+nT2.n+nT3.n);
nT_out.T = THIRD*(nT1.T+nT2.T+nT3.T);
};
p_minor_nT_elec[tid] = nT_out;
// if frills have corners repeated, we end up with 1/3+2/3 --- should never matter.
// If special vertex, probably we set nT at special vertex to 0 so 1/3+1/3.
// nT should not be important at frills, as outermost points and innermost points
// do not need to know pressure.
}
__global__ void Kernel_GetZCurrent(
CHAR4 * __restrict__ p_minor_info,
nT * __restrict__ p_minor_nT_ion,
nT * __restrict__ p_minor_nT_elec,
f64_vec3 * __restrict__ p_minor_v_ion,
f64_vec3 * __restrict__ p_minor_v_elec, // Not clear if this should be nv or {n,v} ? {n,v}
f64 * __restrict__ p_area_minor,
f64 * __restrict__ p_summands )
{
__shared__ f64 intrablock[threadsPerTileMinor];
long tid = threadIdx.x + blockIdx.x * blockDim.x;
CHAR4 minor_info = p_minor_info[tid];
// This is called for all minor cells.
if ((minor_info.flag == DOMAIN_MINOR) || (minor_info.flag == OUTERMOST)) {
// Let DOMAIN_MINOR == DOMAIN_TRIANGLE ...
// And if you are DOMAIN_MINOR then n,v should be meaningful.
// Other possible values:
// OUTERMOST_CENTRAL == OUTERMOST, OUTER_FRILL, INNERMOST_CENTRAL, INNER_FRILL, INNER_TRIANGLE,
// CROSSING_INS, INNER_CENTRAL --
f64 n_ion = p_minor_nT_ion[tid].n;
f64 n_e = p_minor_nT_elec[tid].n;
f64_vec3 v_ion = p_minor_v_ion[tid];
f64_vec3 v_e = p_minor_v_elec[tid];
f64 Iz = q*(n_ion*v_ion.z - n_e*v_e.z)*p_area_minor[tid];
// Lots of bus loads, hopefully all contig.
intrablock[threadIdx.x] = Iz;
// HERE ASSUMED that area is calculated as DOMAIN INTERSECTION AREA
// if we start including nv in insulator-crossing tris.
} else {
intrablock[threadIdx.x] = 0.0;
};
__syncthreads();
// Now it's the aggregation:
int s = blockDim.x;
int k = s/2;
while (s != 1) {
if (threadIdx.x < k)
{
intrablock[threadIdx.x] += intrablock[threadIdx.x + k];
};
__syncthreads();
// Attempt to modify:
if ((s % 2 == 1) && (threadIdx.x == k-1)){
intrablock[threadIdx.x] += intrablock[threadIdx.x+s-1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s/2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_summands[blockIdx.x] = intrablock[0];
};
} // Doesn't matter much if function is slow, I think it is only called for debug purposes anyway.
__global__ void Kernel_Create_v_overall_and_newpos(
structural * __restrict__ p_info,
f64 const h,
nT * __restrict__ p_nT_neut,
nT * __restrict__ p_nT_ion,
nT * __restrict__ p_nT_elec,
f64_vec3 * __restrict__ p_v_neut,
f64_vec3 * __restrict__ p_v_ion,
f64_vec3 * __restrict__ p_v_elec,
// Output:
structural * __restrict__ p_info_out,
f64_vec2 * __restrict__ p_v_overall
)
{
long tid = threadIdx.x + blockIdx.x * blockDim.x;
//if (tid < Nverts)
structural info = p_info[tid];
f64_vec2 v_save;
if (info.flag == DOMAIN_VERTEX)
{
nT nT_neut, nT_ion, nT_elec;
f64_vec3 v_n, v_i, v_e;
nT_neut = p_nT_neut[tid];
nT_ion = p_nT_ion[tid];
nT_elec = p_nT_elec[tid];
v_n = p_v_neut[tid];
v_i = p_v_ion[tid];
v_e = p_v_elec[tid]; // expensive loads; can we avoid function by putting it in with smth else?
f64_vec3 v_overall = (m_n*nT_neut.n*v_n + m_ion*nT_ion.n*v_i + m_e*nT_elec.n*v_e)/
(m_n*nT_neut.n + m_ion*nT_ion.n + m_e*nT_elec.n);
v_save.x = v_overall.x;
v_save.y = v_overall.y;
info.pos += h*v_save;
} else {
v_save.x = 0.0; v_save.y = 0.0;
}
p_v_overall[tid] = v_save;
p_info_out[tid] = info; // safer to do unnecessary write of whole object to get contiguity.
// can we do anything else with the data?
// We could transfer it to shared and do something with it. But there isn't anything.
}
__global__ void Kernel_Average_v_overall_to_tris (
LONG3 * __restrict__ p_tri_corner_index,
CHAR4 * __restrict__ p_tri_perinfo,
f64_vec2 * __restrict__ p_v_overall,
f64_vec2 * __restrict__ p_tri_centroid,
// Output:
f64_vec2 * __restrict__ p_minor_v_overall
)
{
__shared__ f64_vec2 shared_v[SIZE_OF_MAJOR_PER_TRI_TILE];
// Averaging as 1/3 to tris.
// Even corner positions do not tell us intersection. We'd have to know the neighbouring
// centroid also.
// Load to shared:
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
shared_v[threadIdx.x] = p_v_overall[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
}
// Let's hope it works with that sort of index. If it doesn't we're in a tough situation.
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long EndMajor = StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE;
long tid = threadIdx.x + blockIdx.x * blockDim.x;
__syncthreads();
f64_vec2 v0, v1, v2, v_out;
LONG3 corner_index;
CHAR4 perinfo;
//if (tid < Ntris) { // redundant check
corner_index = p_tri_corner_index[tid];
perinfo = p_tri_perinfo[tid];
if ((perinfo.flag == DOMAIN_TRIANGLE) ||
(perinfo.flag == CROSSING_INS))
{
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
v0 = shared_v[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
v0 = p_v_overall[corner_index.i1];
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
v1 = shared_v[corner_index.i2-StartMajor];
} else {
v1 = p_v_overall[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
v2 = shared_v[corner_index.i3-StartMajor];
} else {
v2 = p_v_overall[corner_index.i3];
}
if (perinfo.per0+perinfo.per1+perinfo.per2 == 0) {
} else {
// In this case which ones are periodic?
// Should we just store per flags?
// How it should work:
// CHAR4 perinfo: periodic, per0, per1, per2;
if (perinfo.per0 == NEEDS_ANTI)
v0 = Anticlock_rotate2(v0);
if (perinfo.per0 == NEEDS_CLOCK)
v0 = Clockwise_rotate2(v0);
if (perinfo.per1 == NEEDS_ANTI)
v1 = Anticlock_rotate2(v1);
if (perinfo.per1 == NEEDS_CLOCK)
v1 = Clockwise_rotate2(v1);
if (perinfo.per2 == NEEDS_ANTI)
v2 = Anticlock_rotate2(v2);
if (perinfo.per2 == NEEDS_CLOCK)
v2 = Clockwise_rotate2(v2);
};
v_out = THIRD*(v0+v1+v2);
// For insulator triangle,
// we should take v_overall_r = 0
// because this tri centroid will remain on the insulator.
// It is OK to average with places that should have v_overall = 0.
if (perinfo.flag == CROSSING_INS)
{
f64_vec2 r = p_tri_centroid[tid]; // random accesses??
//f64_vec2 rhat = r/r.modulus();
// v_out = v_out - rhat*v_out.dot(rhat);
v_out = v_out - r*v_out.dot(r)/(r.x*r.x+r.y*r.y);
// Well this is kinda wrong.
}
} else {
v_out.x = 0.0; v_out.y = 0.0;
}
p_minor_v_overall[tid] = v_out;
}
__global__ void Kernel_Average_nnionrec_to_tris
(
CHAR4 * __restrict__ p_tri_perinfo,
LONG3 * __restrict__ p_tri_corner_index,
nn * __restrict__ p_nn_ionrec,
nn * __restrict__ p_nn_ionrec_minor
)
{
__shared__ nn shared_nn[SIZE_OF_MAJOR_PER_TRI_TILE];
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
shared_nn[threadIdx.x] = p_nn_ionrec[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
}
// Let's hope it works with that sort of index. If it doesn't we're in a tough situation.
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long EndMajor = StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE;
long tid = threadIdx.x + blockIdx.x * blockDim.x;
__syncthreads();
nn nn0, nn1, nn2;
LONG3 corner_index;
nn nn_out;
//if (tid < Ntris) { // redundant check - ?
corner_index = p_tri_corner_index[tid];
CHAR4 perinfo = p_tri_perinfo[tid];
if ((perinfo.flag == DOMAIN_TRIANGLE) ||
(perinfo.flag == CROSSING_INS))
{
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
nn0 = shared_nn[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
nn0 = p_nn_ionrec[corner_index.i1];
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
nn1 = shared_nn[corner_index.i2-StartMajor];
} else {
nn1 = p_nn_ionrec[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
nn2 = shared_nn[corner_index.i3-StartMajor];
} else {
nn2 = p_nn_ionrec[corner_index.i3];
}
nn_out.n_ionise = THIRD*(nn0.n_ionise+nn1.n_ionise+nn2.n_ionise);
nn_out.n_recombine = THIRD*(nn0.n_recombine+nn1.n_recombine+nn2.n_recombine);
if (perinfo.flag == CROSSING_INS)
{
// Ensure that we are not using silly data...
// Assume n_ionise = 0 outside domain.
nn_out.n_ionise = 0.5*(nn0.n_ionise+nn1.n_ionise+nn2.n_ionise);
nn_out.n_recombine = 0.5*(nn0.n_recombine+nn1.n_recombine+nn2.n_recombine);
}
} else {
nn_out.n_ionise = 0.0;
nn_out.n_recombine = 0.0;
}
p_nn_ionrec_minor[tid] = nn_out;
}
__global__ void Kernel_RelAdvect_nT(
real h,
structural * __restrict__ p_info, // Advection for domain vertices only
long * __restrict__ pIndexTri,
// char * __restrict__ pPBCTri,
// do we want this - or should we just use has_periodic flag ?
// Debatable: has_periodic flag is non-maintainable if things cross the PB.
// However that is probably all right, we should only like doing PB manip on CPU.
f64_vec2 * __restrict__ p_minor_centroid, // work out tri centroids b4hand
nT * __restrict__ p_nT_neut,
nT * __restrict__ p_nT_ion,
nT * __restrict__ p_nT_elec,
nT * __restrict__ p_minor_nT_neut,
nT * __restrict__ p_minor_nT_ion,
nT * __restrict__ p_minor_nT_elec,
f64_vec3 * __restrict__ p_minor_v_neut,
f64_vec3 * __restrict__ p_minor_v_ion,
f64_vec3 * __restrict__ p_minor_v_elec,
f64_vec2 * __restrict__ p_minor_v_overall,
f64 * __restrict__ p_area_old,
f64 * __restrict__ p_area_new,
// dest:
nT * __restrict__ p_nT_neut_out,
nT * __restrict__ p_nT_ion_out,
nT * __restrict__ p_nT_elec_out
)
{
// Idea is, we don't need to look at other nT
// when we do this one -- though we can afterwards
// push through from registry into shared, take grad nT,
// if we want.
// It is reasonable to overwrite and do one species after another.
__shared__ f64_vec2 p_tri_centroid[SIZE_OF_TRI_TILE_FOR_MAJOR]; // + 2*2
__shared__ f64_vec2 p_nv_shared[SIZE_OF_TRI_TILE_FOR_MAJOR]; // + 2*2
// Note: trimmed to vxy since we do not advect v here.
__shared__ f64 p_T_shared[SIZE_OF_TRI_TILE_FOR_MAJOR]; // +1*2
__shared__ long Indextri[MAXNEIGH_d*threadsPerTileMajor]; // +6 doublesworth
// We could increase the occupancy by changing Indextri to load 1 index at a time
// but if we use 63 registers then I think we get only about 512 per SM anyway.
// FIRM PLAN:
// Put triangles outside the outermost row of vertices. Padding that will make us have
// 2x the number of triangles as vertices.
// These triangles will have v = 0 in our setup.
// In general though they serve no purpose?
// We still need to load a periodic flag for each triangle so not loading a general flag didn't achieve much...
// Alternatively, load a "has periodic" flag for this vertex as part of structural:
// instead of two shorts, have short char char = neigh_len,has_periodic,general_flag
// That seems logical - we don't need a general flag to be a short rather than a char.
// Occupancy calculator says try having 192 instead of 128 in a major tile.
// Just have to see empirically when programming is all done.
// 2 ways round: with tri centroids loading in:
// shared 2 * (2 + 2 + 1) + 6 from indextri = 16 doubles equiv!!
// with vertex pos loading in:
// shared 2 + 2*(2 + 1) + 6 + 6 = 20 doubles equiv!!
// This is simply a huge amount of data to have to deal with.
// Was advection a full up routine before?
// Yes - it was so bad we could not fit in IndexNeigh into shared.
// One way is to CONTIGUOUSLY load Indextri on the go:
// put arrays of member0, member1, etc.
// Would it even recognise as a contiguous load? We could force it to.
// That is an interesting alternative but not sure about it.
// On the plus side, bus activity is reduced by doing the way we ARE doing it.
// This way, we reuse Indextri for 3 species instead of loading x3.
// Don't have a sense of really how long bus trips take compared to everything else.
// That will be something to learn this time - with nvprof, nSight.
// How much shared now? About 10 doubles per thread.
// 80*256 vs 48*1024 = 192*256. 2 blocks of 256 at a time.
// We are ending up with too few threads running.
// Solution: Store both nv, and T, thus to compose nvT when needed.
// *****
// It would be more sensible to run thread for each triangle but with same stored data as in this block --- surely?
// *****
long StartMinor = blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR;
long EndMinor = (blockIdx.x+1)*SIZE_OF_TRI_TILE_FOR_MAJOR;
long index = blockIdx.x*blockDim.x + threadIdx.x;
f64_vec3 v_3;
f64_vec2 v1, v2, v_overall, v_overall2; // can drop 1 of these...
f64 area_old, area_new;
structural info = p_info[index];
//if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
// TRY WITH AND WITHOUT THIS.
// LOADING IN UNNECESSARY DATA FOR OUT-OF-DOMAIN .VS.
// IF IT DOES NOT TRIGGER CONTIG ACCESS PROPERLY WITHIN BRANCH.
// ... Probably it _IS_ necessary to load unnecessary data.
// ##################################################################
// The easy and correct thing, if we are only treating those
// that are DOMAIN/OUTERMOST, should be to only call for those blocks.
// ##################################################################
// Behaviour we want:
// Valid edges for traffic: domain-domain
// domain-outermost
// outermost-outermost [optional]
// Not valid: traffic into insulator --- but should get v_tri_r == 0
// traffic into frills --- careful to avoid!
// anything not involving domain/outermost
{
{
nT nT_temp = p_minor_nT_neut[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_3 = p_minor_v_neut[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_overall = p_minor_v_overall[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v2.x = v_3.x-v_overall.x;
v2.y = v_3.y-v_overall.y;
p_nv_shared[threadIdx.x] = nT_temp.n*v2;
// **********************************************************************
// CONSIDER: We promised that we would use the J that appears in the
// A-dot advance formula, for flowing charge. Is that what we are doing?
// We averaged n to the minor tile and multiplied by minor velocity rel to mesh.
// I guess that is okay...
// **********************************************************************
p_T_shared[threadIdx.x] = nT_temp.T;
nT_temp = p_minor_nT_neut[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_3 = p_minor_v_neut[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_overall2 = p_minor_v_overall[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v2.x = v_3.x-v_overall2.x;
v2.y = v_3.y-v_overall2.y;
p_nv_shared[blockDim.x + threadIdx.x] = nT_temp.n*v2;
p_T_shared[blockDim.x + threadIdx.x] = nT_temp.T;
}
p_tri_centroid[threadIdx.x] = p_minor_centroid[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
p_tri_centroid[blockDim.x + threadIdx.x] = p_minor_centroid[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
}
__syncthreads(); // Avoid putting within branch.
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
// h*n*v.dot(edgenormal) is amount of traffic between major cells
// Next job is to compute edge_normal
// And decide whether this is a legit edge for traffic or not.
nT nTsrc = p_nT_neut[index]; // Not needed up here...
area_old = p_area_old[index];
area_new = p_area_new[index];
// hope that by putting here we get contiguous access.
memcpy(Indextri + MAXNEIGH_d*threadIdx.x,
pIndexTri + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(long)); // MAXNEIGH should be chosen to be 12, for 1 full bus.
// memcpy(PBCtri + MAXNEIGH_d*threadIdx.x,
// pPBCTri + MAXNEIGH_d*index,
// MAXNEIGH_d*sizeof(char)); // MAXNEIGH should be chosen to be 12, for 1 full bus.
// By running threads per tri, we'd dispense with Indextri in shared and store solution (towards colour
// array) for vertex tile instead.
// Then we just incur a reload to aggregate the colour array.
// Easiest way:
// Edge involves 2 centres and 2 values of nv etc
f64_vec2 nv1, nvT1, nv2, nvT2, pos1, pos2; // lots of registers ... here 12
short iNeigh1 = info.neigh_len-1; // NOTE it is possible vertex has
// different number of neighs and triangles. What happens?
if (info.flag == OUTERMOST) iNeigh1++; // get to end of array.
short iNeigh2 = 0;
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh1];
if ((indextri >= StartMinor) && (indextri < EndMinor))
{
nv1 = p_nv_shared[indextri-StartMinor];
nvT1 = p_T_shared[indextri-StartMinor]*nv1;
pos1 = p_tri_centroid[indextri-StartMinor];
} else {
nT nT1 = p_minor_nT_neut[indextri];
v_3 = p_minor_v_neut[indextri];
v_overall = p_minor_v_overall[indextri];
v1.x = v_3.x-v_overall.x;
v1.y = v_3.y-v_overall.y;
nv1 = nT1.n*v1;
nvT1 = nT1.T*nv1;
pos1 = p_minor_centroid[indextri];
// Bad news: 3 separate bus journeys.
// We probably spend AT LEAST half our time here. Tile of 12 x 12 -> 144 within, 48 edge.
// We could be sending a more full bus by putting nTv.
// That would reduce costs here by 33%.
// The increased cost would be, that when we create n,T by averaging, we have to write access only
// part of an nvT struct.
// However, it is possible that a lot of these bus journeys take place at the same time that
// other threads are NOT needing a bus journey. Consider that.
// Stick with separate nT,v for now. We may never know, how much faster nvT would have been.
};
//char PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh1];
if (info.has_periodic) {
if ((pos1.x > pos1.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x < -info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv1 = Anticlock_rotate2(nv1); // ANTI is to mean that the tri is on the right, vertex on left.
nvT1 = Anticlock_rotate2(nvT1); // ANTI means apply anticlockwise.
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -pos1.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x > info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv1 = Clockwise_rotate2(nv1);
nvT1 = Clockwise_rotate2(nvT1);
pos1 = Clockwise_rotate2(pos1);
};
// Assume we always find periodic neigh to right/left of 1/4-way line, and same
// for the point itself.
};
f64 mass, heat;
mass = 0.0; heat = 0.0;
#pragma unroll 12
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
{
indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh2];
if ((indextri >= StartMinor) && (indextri < EndMinor))
{
nv2 = p_nv_shared[indextri-StartMinor];
nvT2 = p_T_shared[indextri-StartMinor]*nv2;
pos2 = p_tri_centroid[indextri-StartMinor];
} else {
nT nT2 = p_minor_nT_neut[indextri];
v_3 = p_minor_v_neut[indextri];
f64_vec2 v_overall_ = p_minor_v_overall[indextri];
v2.x = v_3.x-v_overall_.x;
v2.y = v_3.y-v_overall_.y;
nv2 = nT2.n*v2;
nvT2 = nT2.T*nv2;
pos2 = p_minor_centroid[indextri];
};
// Two ways to store periodic: either 3 longs in registers, or,
// as an array of chars in shared memory.
// Alternative: each tri knows if it is periodic and we somehow
// load this alongside tri centroid, as a CHAR4.
if (info.has_periodic) {
if ((pos2.x > pos2.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x < -info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv2 = Anticlock_rotate2(nv2); // ANTI is to mean that the tri is on the right, vertex on left.
nvT2 = Anticlock_rotate2(nvT2); // ANTI means apply anticlockwise.
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -pos2.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x > info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv2 = Clockwise_rotate2(nv2);
nvT2 = Clockwise_rotate2(nvT2);
pos2 = Clockwise_rotate2(pos2);
};
// Assume we always find periodic neigh to right/left of 1/4-way line, and same
// for the point itself.
};
f64_vec2 edgenormal;
edgenormal.x = pos2.y-pos1.y; // 2 is the more anticlockwise one
edgenormal.y = pos1.x-pos2.x;
// At edge of memory, whether we have extra outer tris involved or not,
// counting all edges means we create an edge looking out of the domain.
// It's our choice whether current can flow out of the domain or not.
// Probably best if not.
// So either we need to find a way to import a flag here, OR, set vr to zero (v=0?)
// either in extra-outer tris or in tris just inside.
// The extra-outer tris are sounding more appealing all the time. Let's go for them.
if (1) { // if legitimate edge
f64 flow = 0.5*h*((nv1+nv2).dot(edgenormal));
mass -= flow; // correct? -- compare
flow = 0.5*h*((nvT1+nvT2).dot(edgenormal));
heat -= flow;
};
nvT1 = nvT2;
nv1 = nv2;
pos1 = pos2;
}; // next neigh
// If we did the above with triangle threads that update a solution in shared memory,
// we could switch to half the block doing the following:
mass += nTsrc.n*area_old;
heat += nTsrc.n*nTsrc.T*area_old;
nT nT_out;
nT_out.n = mass/area_new;
nT_out.T = heat/mass;
// Compressive heating:
// We need here new area and old area:
nT_out.T *= (1.0-0.666666666666667*(nT_out.n-nTsrc.n)/nTsrc.n
-0.111111111111111*(nT_out.n-nTsrc.n)*(nT_out.n-nTsrc.n)/(nTsrc.n*nTsrc.n));
// Note: 2 divisions vs 1 call to pow
p_nT_neut_out[index] = nT_out;
} // whether DOMAIN VERTEX --- try with and without.
__syncthreads(); // avoid syncthreads within branch.
// + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
// Ready for next species:
//if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{ // The point here is to reuse both the tri indices and the centroids.
// We should realise that putting a DOMAIN condition on this will make it go wrong:
// we are not loading all ins triangles this way, but we will assume we can
// use them, as far as I can see.
// If we only drop whole blocks that are INNER_VERTEX -- which we should --
// then we should be all right here -- if it's not part of this block then it's loaded
// separately.
nT nT_temp = p_minor_nT_ion[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_3 = p_minor_v_ion[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v2.x = v_3.x-v_overall.x;
v2.y = v_3.y-v_overall.y;
p_nv_shared[threadIdx.x] = nT_temp.n*v2;
p_T_shared[threadIdx.x] = nT_temp.T;
nT_temp = p_minor_nT_ion[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_3 = p_minor_v_ion[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v2.x = v_3.x-v_overall2.x;
v2.y = v_3.y-v_overall2.y;
p_nv_shared[blockDim.x + threadIdx.x] = nT_temp.n*v2;
p_T_shared[blockDim.x + threadIdx.x] = nT_temp.T;
}
__syncthreads(); // Avoid putting within branch.
f64_vec2 nv1, nvT1, pos1, nv2, nvT2, pos2;
nT nT1, nT2;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
// h*n*v.dot(edgenormal) is amount of traffic between major cells
nT nTsrc = p_nT_ion[index];
short iNeigh1 = info.neigh_len-1;
short iNeigh2 = 0;
if (info.flag == OUTERMOST) iNeigh1++; // get to end of array.
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh1];
if ((indextri >= StartMinor) && (indextri < EndMinor))
{
nv1 = p_nv_shared[indextri-StartMinor];
nvT1 = p_T_shared[indextri-StartMinor]*nv1; // extra access to shared - nvm
pos1 = p_tri_centroid[indextri-StartMinor];
} else {
nT1 = p_minor_nT_ion[indextri];
v_3 = p_minor_v_ion[indextri];
v_overall = p_minor_v_overall[indextri];
v1.x = v_3.x-v_overall.x;
v1.y = v_3.y-v_overall.y;
nv1 = nT1.n*v1;
nvT1 = nT1.T*nv1;
pos1 = p_minor_centroid[indextri];
};
//char PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh1];
if (info.has_periodic) {
if ((pos1.x > pos1.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x < -info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv1 = Anticlock_rotate2(nv1); // ANTI is to mean that the tri is on the right, vertex on left.
nvT1 = Anticlock_rotate2(nvT1); // ANTI means apply anticlockwise.
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -pos1.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x > info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv1 = Clockwise_rotate2(nv1);
nvT1 = Clockwise_rotate2(nvT1);
pos1 = Clockwise_rotate2(pos1);
};
// Assume we always find periodic neigh to right/left of 1/4-way line, and same
// for the point itself.
};
f64 mass, heat;
mass = 0.0; heat = 0.0;
#pragma unroll 12
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
// aar - if we have an outer point
// then the number of neighs is not the number of tris
// SO EXPLOIT THIS
// Make sure that the omitted edge is the one that would go between the frill tris.
{
indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh2];
if ((indextri >= StartMinor) && (indextri < EndMinor))
{
nv2 = p_nv_shared[indextri-StartMinor];
nvT2 = p_T_shared[indextri-StartMinor]*nv2;
pos2 = p_tri_centroid[indextri-StartMinor];
} else {
nT2 = p_minor_nT_ion[indextri];
v_3 = p_minor_v_ion[indextri];
f64_vec2 v_overall_ = p_minor_v_overall[indextri];
v2.x = v_3.x-v_overall_.x;
v2.y = v_3.y-v_overall_.y;
f64_vec2 nv2 = nT2.n*v2;
nvT2 = nT2.T*nv2;
pos2 = p_minor_centroid[indextri];
};
if (info.has_periodic) {
if ((pos2.x > pos2.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x < -info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv2 = Anticlock_rotate2(nv2); // ANTI is to mean that the tri is on the right, vertex on left.
nvT2 = Anticlock_rotate2(nvT2); // ANTI means apply anticlockwise.
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -pos2.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x > info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv2 = Clockwise_rotate2(nv2);
nvT2 = Clockwise_rotate2(nvT2);
pos2 = Clockwise_rotate2(pos2);
};
};
f64_vec2 edgenormal;
edgenormal.x = pos2.y-pos1.y; // 2 is the more anticlockwise one
edgenormal.y = pos1.x-pos2.x;
if (1) { // if legitimate edge -- remember we should treat edge the same way from both sides.
f64 flow = 0.5*h*((nv1+nv2).dot(edgenormal));
mass -= flow; // correct? -- compare
flow = 0.5*h*((nvT1+nvT2).dot(edgenormal));
heat -= flow;
};
nvT1 = nvT2;
nv1 = nv2;
pos1 = pos2;
}; // next neigh
mass += nTsrc.n*area_old;
heat += nTsrc.n*nTsrc.T*area_old;
nT nT_out;
nT_out.n = mass/area_new;
nT_out.T = heat/mass;
// Compressive heating:
// We need here new area and old area:
nT_out.T *= (1.0-0.666666666666667*(nT_out.n-nTsrc.n)/nTsrc.n
-0.111111111111111*(nT_out.n-nTsrc.n)*(nT_out.n-nTsrc.n)/(nTsrc.n*nTsrc.n));
p_nT_ion_out[index] = nT_out;
};
// The point here is to reuse both the tri indices and the centroids.
// Ready for next species:
//if (info.flag == DOMAIN_VERTEX) {
// TRY WITH AND WITHOUT THIS.
{
nT nT_temp = p_minor_nT_elec[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_3 = p_minor_v_elec[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v2.x = v_3.x-v_overall.x;
v2.y = v_3.y-v_overall.y;
nv2 = nT2.n*v2;
nvT2 = nT2.T*nv2;
p_nv_shared[threadIdx.x] = nT_temp.n*v2;
p_T_shared[threadIdx.x] = nT_temp.T;
nT_temp = p_minor_nT_elec[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_3 = p_minor_v_elec[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v2.x = v_3.x-v_overall2.x;
v2.y = v_3.y-v_overall2.y;
p_nv_shared[blockDim.x + threadIdx.x] = nT_temp.n*v2;
p_T_shared[blockDim.x + threadIdx.x] = nT_temp.T;
}
//}
__syncthreads(); // Avoid putting within branch.
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
nT nTsrc = p_nT_elec[index];
short iNeigh1 = info.neigh_len-1;
short iNeigh2 = 0;
if (info.flag == OUTERMOST) iNeigh1++; // get to end of array.
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh1];
if ((indextri >= StartMinor) && (indextri < EndMinor))
{
nv1 = p_nv_shared[indextri-StartMinor];
nvT1 = p_T_shared[indextri-StartMinor]*nv1; // extra access to shared - nvm
pos1 = p_tri_centroid[indextri-StartMinor];
} else {
nT1 = p_minor_nT_elec[indextri];
v_3 = p_minor_v_elec[indextri];
v_overall = p_minor_v_overall[indextri];
v1.x = v_3.x-v_overall.x;
v1.y = v_3.y-v_overall.y;
nv1 = nT1.n*v1;
nvT1 = nT1.T*nv1;
pos1 = p_minor_centroid[indextri];
};
//char PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh1];
if (info.has_periodic) {
if ((pos1.x > pos1.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x < -info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv1 = Anticlock_rotate2(nv1); // ANTI is to mean that the tri is on the right, vertex on left.
nvT1 = Anticlock_rotate2(nvT1); // ANTI means apply anticlockwise.
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -pos1.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x > info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv1 = Clockwise_rotate2(nv1);
nvT1 = Clockwise_rotate2(nvT1);
pos1 = Clockwise_rotate2(pos1);
};
// Assume we always find periodic neigh to right/left of 1/4-way line, and same
// for the point itself.
};
f64 mass, heat;
mass = 0.0; heat = 0.0;
#pragma unroll 12
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
{
indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh2];
if ((indextri >= StartMinor) && (indextri < EndMinor))
{
nv2 = p_nv_shared[indextri-StartMinor];
nvT2 = p_T_shared[indextri-StartMinor]*nv2;
pos2 = p_tri_centroid[indextri-StartMinor];
} else {
nT2 = p_minor_nT_elec[indextri];
v_3 = p_minor_v_elec[indextri];
f64_vec2 v_overall_ = p_minor_v_overall[indextri];
v2.x = v_3.x-v_overall_.x;
v2.y = v_3.y-v_overall_.y;
nv2 = nT2.n*v2;
nvT2 = nT2.T*nv2;
pos2 = p_minor_centroid[indextri];
};
if (info.has_periodic) {
if ((pos2.x > pos2.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x < -info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv2 = Anticlock_rotate2(nv2); // ANTI is to mean that the tri is on the right, vertex on left.
nvT2 = Anticlock_rotate2(nvT2); // ANTI means apply anticlockwise.
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -pos2.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x > info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv2 = Clockwise_rotate2(nv2);
nvT2 = Clockwise_rotate2(nvT2);
pos2 = Clockwise_rotate2(pos2);
};
// Assume we always find periodic neigh to right/left of 1/4-way line, and same
// for the point itself.
};
f64_vec2 edgenormal;
edgenormal.x = pos2.y-pos1.y; // 2 is the more anticlockwise one
edgenormal.y = pos1.x-pos2.x;
if (1) { // if legitimate edge --- how to know if we are looking into outermost??
// We are not loading info about neighbours. Yet it is only the neigh that knows
// it is OUTERMOST.
f64 flow = 0.5*h*((nv1+nv2).dot(edgenormal));
mass -= flow; // correct? -- compare
flow = 0.5*h*((nvT1+nvT2).dot(edgenormal));
heat -= flow;
// Meanwhile what if we are looking through insulator.
// We should find there that we have insisted on v_r=0 and so v.dot(edgenormal) roughly = 0.
// But we need to consider about outermost what to do.
// We don't really want to be arbitrarily losing or gaining charge.
// The answer is to include OUTERMOST flag, but, disinclude the outermost edge of an OUTERMOST vertex.
// This can happen automatically by a CAREFUL NUMBERING of outermost tris and neighs.
// Does it disagree with the numbering we previously considered canonical? Probably yes --> edit through :-/
};
nvT1 = nvT2;
nv1 = nv2;
pos1 = pos2;
}; // next neigh
mass += nTsrc.n*area_old;
heat += nTsrc.n*nTsrc.T; // ??? ***
nT nT_out;
nT_out.n = mass/area_new;
nT_out.T = heat/mass;
// Compressive heating:
// We need here new area and old area:
nT_out.T *= (1.0-0.666666666666667*(nT_out.n-nTsrc.n)/nTsrc.n
-0.111111111111111*(nT_out.n-nTsrc.n)*(nT_out.n-nTsrc.n)/(nTsrc.n*nTsrc.n));
p_nT_elec_out[index] = nT_out;
};
}
__global__ void Kernel_Populate_A_frill(
CHAR4 * __restrict__ p_tri_info,
f64_vec3 * __restrict__ p_A, // update own, read others
f64_vec2 * __restrict__ p_tri_centroid,
//LONG3 * __restrict__ p_corner_index
LONG3 * __restrict__ p_tri_neigh_index)
{
//long index = (blockIdx.x + BLOCK_START_OF_FRILL_SEARCH_d)*blockDim.x + threadIdx.x;
long index = blockIdx.x*blockDim.x + threadIdx.x;
// load the two corner indices
CHAR4 perinfo = p_tri_info[index];
if (perinfo.flag == OUTER_FRILL) {
//LONG3 cornerindex = p_corner_index[index];
//A0 = p_A[BEGINNING_OF_CENTRAL + cornerindex.i1];
//A1 = p_A[BEGINNING_OF_CENTRAL + cornerindex.i2];
//if (perinfo.per0 == NEEDS_CLOCK) A0 = Clockwise_rotate2(A0);
//if (perinfo.per1 == NEEDS_CLOCK) A1 = Clockwise_rotate2(A1);
//if (perinfo.per0 == NEEDS_ANTI) A0 = Anticlock_rotate2(A0);
//if (perinfo.per1 == NEEDS_ANTI) A1 = Anticlock_rotate2(A1);
//p_A[index] = 0.5*(A0 + A1);
// Just do this instead:
LONG3 neighindex = p_tri_neigh_index[index];
f64_vec2 cent = p_tri_centroid[index];
f64_vec2 centneigh = p_tri_centroid[neighindex.i1];
f64_vec3 A = p_A[neighindex.i1];
// Axy decrease radially:
f64 factor = sqrt((centneigh.x*centneigh.x+centneigh.y*centneigh.y)/
(cent.x*cent.x+cent.y*cent.y));
A.x *= factor;
A.y *= factor;
p_A[index] = A;
};
if (perinfo.flag == INNER_FRILL) {
//LONG3 cornerindex = p_corner_index[index];
//A0 = p_A[BEGINNING_OF_CENTRAL + cornerindex.i1];
//A1 = p_A[BEGINNING_OF_CENTRAL + cornerindex.i2];
//if (perinfo.per0 == NEEDS_CLOCK) A0 = Clockwise_rotate2(A0);
//if (perinfo.per1 == NEEDS_CLOCK) A1 = Clockwise_rotate2(A1);
//if (perinfo.per0 == NEEDS_ANTI) A0 = Anticlock_rotate2(A0);
//if (perinfo.per1 == NEEDS_ANTI) A1 = Anticlock_rotate2(A1);
//p_A[index] = 0.5*(A0 + A1);
// Just do this instead:
LONG3 neighindex = p_tri_neigh_index[index];
f64_vec2 cent = p_tri_centroid[index];
f64_vec2 centneigh = p_tri_centroid[neighindex.i1];
f64_vec3 A = p_A[neighindex.i1];
// Axy decrease radially:
f64 factor = sqrt((cent.x*cent.x+cent.y*cent.y)/
(centneigh.x*centneigh.x+centneigh.y*centneigh.y));
A.x *= factor;
A.y *= factor;
p_A[index] = A;
};
}
// The same sort of routine as the following will be needed to anti-advect A,Adot,phi,phidot.
// Bad news; no way to avoid though... could we interpolate to new values? Is that really much different.
// Crude estimate of grad is okay.
__global__ void Kernel_Compute_Grad_A_minor_antiadvect(
f64_vec3 * __restrict__ p_A_tri, // for creating grad
f64_vec3 * __restrict__ p_A_vert, //
f64 h,
f64_vec2 * __restrict__ p_v_overall, // hv = amt to anti-advect
structural * __restrict__ p_info, //
f64_vec2 * __restrict__ p_tri_centroid, //
CHAR4 * __restrict__ p_tri_perinfo, //
CHAR4 * __restrict__ p_tri_per_neigh,
LONG3 * __restrict__ p_corner_index, //
LONG3 * __restrict__ p_neigh_tri_index, //
long * __restrict__ p_IndexTri, // we said carry on using this for now.
bool bAdd,
f64_vec3 * __restrict__ p_Addition_Rate,
// output:
f64_vec3 * __restrict__ p_A_out // fill in for both tri and vert...
)
{
__shared__ f64_vec3 A_tri[threadsPerTileMinor];
__shared__ f64_vec2 tri_centroid[threadsPerTileMinor]; // 5
__shared__ f64_vec3 A_vert[SIZE_OF_MAJOR_PER_TRI_TILE]; // +1.5
__shared__ f64_vec2 vertex_pos[SIZE_OF_MAJOR_PER_TRI_TILE];
// If we want 512 threads/SM, 12 doubles in shared per thread is limit.
// We can accommodate 12 .. so 6 per major in addition to this but not when we have shared_per.
// Well we could limit it to 10 tris but it's asking for trouble.
// 6 longs = 3 doublesworth per thread
__shared__ long IndexTri[SIZE_OF_MAJOR_PER_TRI_TILE*MAXNEIGH_d]; // +3
// Do first with 3 dimensions Axyz at once - may be slower but we'll see.
long index = blockIdx.x*blockDim.x + threadIdx.x;
long StartTri = blockIdx.x*blockDim.x; // can replace this one.
long StartMajor = SIZE_OF_MAJOR_PER_TRI_TILE*blockIdx.x; // can replace this one.
// could replace with #define here.
A_tri[threadIdx.x] = p_A_tri[index];
tri_centroid[threadIdx.x] = p_tri_centroid[index];
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE) {
A_vert[threadIdx.x] = p_A_vert[SIZE_OF_MAJOR_PER_TRI_TILE*blockIdx.x + threadIdx.x];
structural info = p_info[SIZE_OF_MAJOR_PER_TRI_TILE*blockIdx.x + threadIdx.x];
vertex_pos[threadIdx.x] = info.pos;
}
// shared_per[threadIdx.x] = perinfo.per0+perinfo.per1+perinfo.per2; // if periodic tri then neigh will need to be able to know.
// note that we have to make sure CHAR4 takes up 32 bits not 4 x 32.
// Is that the root of our problems with footprint?
// If so, what should we do? Bitwise operations on a char?
__syncthreads();
f64_vec2 gradAx(0.0,0.0);
f64_vec2 gradAy(0.0,0.0);
f64_vec2 gradAz(0.0,0.0);
f64_vec2 v_overall = p_v_overall[index];
CHAR4 perinfo = p_tri_perinfo[index];
{
// Allow it to run through and produce nonsense for frills....
CHAR4 tri_rotate = p_tri_per_neigh[index];
LONG3 corner_index = p_corner_index[index];
LONG3 neightri = p_neigh_tri_index[index];
// Note that A, as well as position, has to be ROTATED to make a contiguous image.
// This tri minor has 3 edges with triangles and 3 edges with centrals.
f64 area = 0.0;
f64_vec2 pos0(9.0,19.0), pos1 (1.0,2.0), pos2(4.0,2.0);
// f64_vec3 Avert0,Avert1,Avert2;
// We need 4 values at a time in order to do a side.
// We don't need to have all 7 values (3+ 3 + itself)
// So we'd be better just to load one quadrilateral's doings at a time, given the paucity of register and L1 space.
// Either we store all 7x3 A-values at once + 7 positions. Or we use 4 and 4 positions at once.
// Bear in mind a partial saving might yield big benefits.
// HAZARD: we don't know the ordering of the points.
// Halfway house: for simplicity store all the positions already loaded.
// A does not load from the same place anyway.
// Then go round the quadrilaterals.
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos0 = vertex_pos[corner_index.i1-StartMajor];
} else {
structural info = p_info[corner_index.i1];
pos0 = info.pos;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos1 = vertex_pos[corner_index.i2-StartMajor];
} else {
structural info = p_info[corner_index.i2];
pos1 = info.pos;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos2 = vertex_pos[corner_index.i3-StartMajor];
} else {
structural info = p_info[corner_index.i3];
pos2 = info.pos;
}
if (perinfo.per0+perinfo.per1+perinfo.per2 == 0) {
} else {
// In this case which ones are periodic?
// Should we just store per flags?
// How it should work:
// CHAR4 perinfo: periodic, per0, per1, per2;
if (perinfo.per0 == NEEDS_ANTI) {
pos0 = Anticlock_rotate2(pos0);
}
if (perinfo.per0 == NEEDS_CLOCK) { // this means the corner is off the clockwise side. Therefore anticlockwise rotated.
pos0 = Clockwise_rotate2(pos0);
}
if (perinfo.per1 == NEEDS_ANTI) {
pos1 = Anticlock_rotate2(pos1);
}
if (perinfo.per1 == NEEDS_CLOCK) {
pos1 = Clockwise_rotate2(pos1);
}
if (perinfo.per2 == NEEDS_ANTI) {
pos2 = Anticlock_rotate2(pos2);
}
if (perinfo.per2 == NEEDS_CLOCK) {
pos2 = Clockwise_rotate2(pos2);
}
};
// It worked with none of the calcs in. Now we bring back the above. Still works
f64_vec2 u0(1.0,2.0),
u1(0.0,2.0),
u2(3.0,1.0);
// to be the positions of neighbouring centroids
// CHAR4 tri_rotate; // 4 chars but really using 3
// tri_rotate.per0 = 0; tri_rotate.per1 = 0; tri_rotate.per2 = 0;
char periodic = perinfo.per0+perinfo.per1+perinfo.per2;
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri+blockDim.x))
{
u0 = tri_centroid[neightri.i1-StartTri];
} else {
u0 = p_tri_centroid[neightri.i1];
};
if (tri_rotate.per0 == NEEDS_CLOCK)
u0 = Clockwise_rotate2(u0);
if (tri_rotate.per0 == NEEDS_ANTI)
u0 = Anticlock_rotate2(u0);
if ((neightri.i2 >= StartTri) && (neightri.i2 < StartTri+blockDim.x))
{
u1 = tri_centroid[neightri.i2-StartTri];
} else {
u1 = p_tri_centroid[neightri.i2];
}
if (tri_rotate.per1 == NEEDS_CLOCK)
u1 = Clockwise_rotate2(u1);
if (tri_rotate.per1 == NEEDS_ANTI)
u1 = Anticlock_rotate2(u1);
if ((neightri.i3 >= StartTri) && (neightri.i3 < StartTri+blockDim.x))
{
u2 = tri_centroid[neightri.i3-StartTri];
} else {
u2 = p_tri_centroid[neightri.i3];
}
if (tri_rotate.per2 == NEEDS_CLOCK)
u2 = Clockwise_rotate2(u2);
if (tri_rotate.per2 == NEEDS_ANTI)
u2 = Anticlock_rotate2(u2);
// still works
// ............................................................................................
// . I think working round with 4 has a disadvantage: if we get back around to one that is off-tile,
// we have to load it all over again. Still that is only 1 out of 7 that gets duplicated.
// Here is the best thing I can come up with: store 7 positions. That is already
// 28 longs' worth... each A-value uses 6 of the 7 positions to have an effect.
// Load each A-value at a time and recalc shoelace for 3 quadrilaterals. ??
// Too complicated.
// If we store all positions, can we finish with each A as we handle it? Yes but let's not.
//f64_vec2 ourpos = tri_centroid[threadIdx.x]; // can try with and without this assignment to variable
//f64_vec3 A0 = A_tri[threadIdx.x]; // can try with and without this assignment to variable
f64_vec3 A_1(0.0,0.0,0.0),
A_out(0.0,0.0,0.0),
A_2(0.0,0.0,0.0);
// Our A: A_tri[threadIdx.x]
// Now fill in the A values:
// ____________________________
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_1 = A_vert[corner_index.i2-StartMajor];
} else {
A_1 = p_A_vert[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_2 = A_vert[corner_index.i3-StartMajor];
} else {
A_2 = p_A_vert[corner_index.i3];
}
if (periodic == 0) {
} else {
if (perinfo.per1 == NEEDS_ANTI) {
A_1 = Anticlock_rotate3(A_1);
}
if (perinfo.per1 == NEEDS_CLOCK) {
A_1 = Clockwise_rotate3(A_1);
}
if (perinfo.per2 == NEEDS_ANTI) {
A_2 = Anticlock_rotate3(A_2);
};
if (perinfo.per2 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
};
}
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + blockDim.x))
{
A_out = A_tri[neightri.i1-StartTri];
} else {
A_out = p_A_tri[neightri.i1];
}
if (tri_rotate.per0 != 0) {
if (tri_rotate.per0 == NEEDS_CLOCK) {
A_out = Clockwise_rotate3(A_out);
} else {
A_out = Anticlock_rotate3(A_out);
};
};
// ======================================================
// shoelace = (ourpos.x-u0.x)*(pos1.y-pos2.y)
// + (pos1.x-pos2.x)*(u0.y-ourpos.y); // if u0 is opposite point 0
// clock.x-anti.x
// We are now going to put the corners of the minor cell at
// e.g. 1/3(pos1 + u0 + ourpos)
// rather than at
// e.g. 2/3 pos1 + 1/3 pos2
//corner1 = 0.3333333*(pos1+u0+ourpos)
//corner2 = 0.3333333*(pos2+u0+ourpos)
//edgenormal.x = corner1.y-corner2.y = 0.333333(pos1.y-pos2.y) -- so no change here
f64_vec2 edgenormal;
edgenormal.x = (pos1.y-pos2.y)*0.333333333333333;
edgenormal.y = (pos2.x-pos1.x)*0.333333333333333; // cut off 1/3 of the edge
if (edgenormal.dot(pos0-pos1) > 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// Think about averaging at typical edge.
// Using 5/12:
// corners are say equidistant from 3 points, so on that it would be 1/6
// but allocate the middle half of the bar to 50/50 A_tri[threadIdx.x]+Aout.
// tried without A_tri[threadIdx.x].z+ ...
gradAx += (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A_tri[threadIdx.x].x+A_out.x))*edgenormal;
gradAy += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A_tri[threadIdx.x].y+A_out.y))*edgenormal;
gradAz += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A_tri[threadIdx.x].z+A_out.z))*edgenormal;
// Now that we put minor corners at (1/3)(2 centroids+vertex), this makes even more sense.
area += 0.333333333333333*
(0.5*(pos1.x+pos2.x)+tri_centroid[threadIdx.x].x+u0.x)*edgenormal.x;
// NOT CONSISTENT BEHAVIOUR:
// TO HERE WAS ENOUGH TO FAIL.
// ASSUMING ALL VALUES VALID (consider edge of memory a different case):
// From here on is where it gets thorny as we no longer map A_1 to vertex 1.
// %%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%%
A_1 = A_out; // now A_1 points at tri neigh 0
A_out = A_2; // now looking at vertex 2
// A_2 is now to point at tri neigh 1
if ((neightri.i2 >= StartTri) && (neightri.i2 < StartTri + blockDim.x))
{
A_2 = A_tri[neightri.i2-StartTri];
} else {
A_2 = p_A_tri[neightri.i2];
}
if (tri_rotate.per1 != 0) {
if (tri_rotate.per1 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} else {
A_2 = Anticlock_rotate3(A_2);
};
};
// shoelace = (ourpos.x-pos2.x)*(u0.y-u1.y)
// + (u0.x-u1.x)*(pos2.y-ourpos.y);
//x1 = (2/3)pos2+(1/3)pos0;
//x2 = (2/3)pos2+(1/3)pos1;
//edgenormal.x = (x1.y-x2.y);
//edgenormal.y = (x2.x-x1.x); // cut off 1/3 of the edge
edgenormal.x = 0.333333333333333*(pos0.y-pos1.y);
edgenormal.y = 0.333333333333333*(pos1.x-pos0.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos2-pos1) < 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
gradAx += (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A_tri[threadIdx.x].x+A_out.x))*edgenormal;
gradAy += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A_tri[threadIdx.x].y+A_out.y))*edgenormal;
gradAz += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A_tri[threadIdx.x].z+A_out.z))*edgenormal;
area += 0.333333333333333*(0.5*(u0.x+u1.x)
+tri_centroid[threadIdx.x].x+pos2.x)*edgenormal.x;
A_1 = A_out; // now A_1 points at corner 2
A_out = A_2; // now points at tri 1
// A_2 to point at corner 0
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_2 = A_vert[corner_index.i1-StartMajor];
} else {
A_2 = p_A_vert[corner_index.i1];
}
if (perinfo.per0 != 0) {
if (perinfo.per0 == NEEDS_ANTI) {
A_2 = Anticlock_rotate3(A_2);
}
if (perinfo.per0 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
}
}
//shoelace = (ourpos.x-u1.x)*(pos2.y-pos0.y) // clock.y-anti.y
// + (pos2.x-pos0.x)*(u1.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos0.y-pos2.y);
edgenormal.y = 0.333333333333333*(pos2.x-pos0.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos1-pos0) > 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
gradAx += (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A_tri[threadIdx.x].x+A_out.x))*edgenormal;
gradAy += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A_tri[threadIdx.x].y+A_out.y))*edgenormal;
gradAz += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A_tri[threadIdx.x].z+A_out.z))*edgenormal;
area += 0.333333333333333*(0.5*(pos2.x+pos0.x)+tri_centroid[threadIdx.x].x+u1.x)*edgenormal.x;
A_1 = A_out;
A_out = A_2;
// A_2 is now to point at tri neigh 2
if ((neightri.i3 >= StartTri) && (neightri.i3 < StartTri + blockDim.x))
{
A_2 = A_tri[neightri.i3-StartTri];
} else {
A_2 = p_A_tri[neightri.i3];
}
if (tri_rotate.per2 != 0) {
if (tri_rotate.per2 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} else {
A_2 = Anticlock_rotate3(A_2);
};
};
// f64 shoelace = (ourpos.x-pos0.x)*(u1.y-u2.y) // clock.y-anti.y
// + (u1.x-u2.x)*(pos0.y-ourpos.y);
// Where is it used?
edgenormal.x = 0.333333333333333*(pos1.y-pos2.y);
edgenormal.y = 0.333333333333333*(pos2.x-pos1.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos0-pos1) < 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
gradAx += (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A_tri[threadIdx.x].x+A_out.x))*edgenormal;
gradAy += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A_tri[threadIdx.x].y+A_out.y))*edgenormal;
gradAz += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A_tri[threadIdx.x].z+A_out.z))*edgenormal;
area += 0.333333333333333*(0.5*(u2.x+u1.x)+tri_centroid[threadIdx.x].x+pos0.x)*edgenormal.x;
A_1 = A_out;
A_out = A_2;
// A2 to be for corner 1
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_2 = A_vert[corner_index.i2-StartMajor];
} else {
A_2 = p_A_vert[corner_index.i2];
}
if (perinfo.per1 != 0) {
if (perinfo.per1 == NEEDS_ANTI) {
A_2 = Anticlock_rotate3(A_2);
}
if (perinfo.per1 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
}
}
//shoelace = (ourpos.x-u2.x)*(pos0.y-pos1.y) // clock.y-anti.y
//+ (pos0.x-pos1.x)*(u2.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos1.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos1.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos2-pos1) > 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
gradAx += (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A_tri[threadIdx.x].x+A_out.x))*edgenormal;
gradAy += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A_tri[threadIdx.x].y+A_out.y))*edgenormal;
gradAz += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A_tri[threadIdx.x].z+A_out.z))*edgenormal;
area += 0.333333333333333*(0.5*(pos0.x+pos1.x)+tri_centroid[threadIdx.x].x+u2.x)*edgenormal.x;
A_1 = A_out;
A_out = A_2;
// A2 to be for tri 0
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + blockDim.x))
{
A_2 = A_tri[neightri.i1-StartTri];
} else {
A_2 = p_A_tri[neightri.i1];
}
if (tri_rotate.per0 != 0) {
if (tri_rotate.per0 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} else {
A_2 = Anticlock_rotate3(A_2);
};
};
//shoelace = (ourpos.x-pos1.x)*(u2.y-u0.y) // clock.y-anti.y
// + (u2.x-u0.x)*(pos1.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos2.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos2.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos1-pos2) < 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
gradAx += (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A_tri[threadIdx.x].x+A_out.x))*edgenormal;
gradAy += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A_tri[threadIdx.x].y+A_out.y))*edgenormal;
gradAz += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A_tri[threadIdx.x].z+A_out.z))*edgenormal;
area += 0.333333333333333*(0.5*(u0.x+u2.x)+tri_centroid[threadIdx.x].x+pos1.x)*edgenormal.x;
// CHECKED ALL THAT
gradAx /= area;
gradAy /= area;
gradAz /= area;
}
// Now we have to do something about anti-advecting:
if ((perinfo.flag == DOMAIN_TRIANGLE) || (perinfo.flag == CROSSING_INS)) // otherwise the centroid can be assumed not moving??
{
f64_vec3 anti_Advect;
anti_Advect.x = h*v_overall.dot(gradAx);
anti_Advect.y = h*v_overall.dot(gradAy);
anti_Advect.z = h*v_overall.dot(gradAz);
p_A_out[index] += anti_Advect;
}
// Similar routine will be needed to create grad A ... or Adot ... what a waste of calcs.
// Is there a more sensible way: only do a mesh move every 10 steps -- ??
// Then what do we do on the intermediate steps -- that's a problem -- flowing Eulerian fluid
// will give the right change in pressure, but then mesh has to catch up. Still that might be a thought.
// Next consideration: Lap A on central.
// Idea for doing at same time: (don't do it -- too much atomicAdd, I do not trust)
// ___ only certain major cells "belong" to this tri tile.
// ___ write to a given output from our total effect coming from this tile's tris.
// ____ when we hit a central cell outside this tile, send it atomicAdd to an array
// that collects up all the extra contribs to it.
// __ then we just reload, sum 2 things and divide
// However, atomicAdd fp64 only exists on Compute 6.0 :-(
// Workaround taken from http://stackoverflow.com/questions/16077464/atomicadd-for-double-on-gpu
// Eventually decided not to use but to carry on with half the threads to target centrals in this routine.
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE) {
index = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x;
structural info = p_info[index];
if (info.flag == DOMAIN_VERTEX) {
// Does branching disrupt contiguity???
f64_vec2 v_overall = p_v_overall[BEGINNING_OF_CENTRAL + index];
memcpy(IndexTri+threadIdx.x*MAXNEIGH_d, p_IndexTri+index*MAXNEIGH_d,
sizeof(long)*MAXNEIGH_d);
f64_vec3 A0 = A_vert[threadIdx.x]; // can ditch
f64_vec2 u0 = vertex_pos[threadIdx.x];
f64_vec3 A1(0.0,0.0,0.0),A2(0.0,0.0,0.0),A3(0.0,0.0,0.0);
f64_vec2 u1(0.0,0.0),u2(1.1,1.1),u3(1.0,2.0);
f64 //shoelace,
area = 0.0;
f64_vec2 edgenormal;
// As before we need 4 A values and positions at a time. Now 3 all come from tris.
gradAx.x = 0.0; gradAx.y = 0.0;
gradAy.x = 0.0; gradAy.y = 0.0;
gradAz.x = 0.0; gradAz.y = 0.0;
// Note that we found out, unroll can be slower if registers are used up (!) CAUTION:
// Initial situation: inext = 1, i = 0, iprev = -1
long iindextri = IndexTri[threadIdx.x*MAXNEIGH_d+info.neigh_len-1];
// BEWARE OF FRILLED VERTCELLS: neigh_len < tri_len ??
if ((iindextri >= StartTri) && (iindextri < StartTri +
blockDim.x)) // matching code above to see what happens
// threadsPerTileMinor))
{
// DOES NOT WORK WITH 2 LINES HERE.
A3 = A_tri[iindextri-StartTri]; // this breaks it
u3 = tri_centroid[iindextri-StartTri]; // this breaks it
} else {
A3 = p_A_tri[iindextri];
u3 = p_tri_centroid[iindextri];
};
// The peculiar thing is that a very similar read happens earlier on.
// INCONSISTENT BEHAVIOUR: now does not work with all above reads commented.
// FAILS IF START COMMENT HERE
if (info.has_periodic != 0) {
if ((u3.x > u3.y*GRADIENT_X_PER_Y*0.5) && (u3.x < -0.5*GRADIENT_X_PER_Y*u3.y))
{
A3 = Anticlock_rotate3(A3);
u3 = Anticlock_rotate2(u3);
};
if ((u3.x < -u3.y*GRADIENT_X_PER_Y*0.5) && (u3.x > 0.5*GRADIENT_X_PER_Y*u3.y))
{
A3 = Clockwise_rotate3(A3);
u3 = Clockwise_rotate2(u3);
};
}
iindextri = IndexTri[threadIdx.x*MAXNEIGH_d]; // + 0
if ((iindextri >= StartTri) && (iindextri < StartTri + threadsPerTileMinor))
{
A2 = A_tri[iindextri-StartTri];
u2 = tri_centroid[iindextri-StartTri];
} else {
A2 = p_A_tri[iindextri];
u2 = p_tri_centroid[iindextri];
};
if (info.has_periodic != 0) {
if ((u2.x > u2.y*GRADIENT_X_PER_Y*0.5) && (u2.x < -0.5*GRADIENT_X_PER_Y*u2.y))
{
A2 = Anticlock_rotate3(A2);
u2 = Anticlock_rotate2(u2);
};
if ((u2.x < -u2.y*GRADIENT_X_PER_Y*0.5) && (u2.x > 0.5*GRADIENT_X_PER_Y*u2.y))
{
A2 = Clockwise_rotate3(A2);
u2 = Clockwise_rotate2(u2);
};
}
int inext = 0; // will be ++ straight away.
#pragma unroll MAXNEIGH_d
for (int i = 0; i < info.neigh_len; i++) // WHY ARE WE GOING TO MAXNEIGH_d ?
{
inext++;
if (inext == info.neigh_len) inext = 0;
// Bear in mind, this would not work for OUTERMOST.
iindextri = IndexTri[threadIdx.x*MAXNEIGH_d+inext];
if ((iindextri >= StartTri) && (iindextri < StartTri + threadsPerTileMinor))
{
A1 = A_tri[iindextri-StartTri];
u1 = tri_centroid[iindextri-StartTri];
} else {
A1 = p_A_tri[iindextri];
u1 = p_tri_centroid[iindextri];
};
if (info.has_periodic != 0) {
if ((u1.x > 0.5*GRADIENT_X_PER_Y*u1.y) && (u1.x < -0.5*GRADIENT_X_PER_Y*u1.y))
{
A1 = Anticlock_rotate3(A1);
u1 = Anticlock_rotate2(u1);
};
if ((u1.x < -0.5*GRADIENT_X_PER_Y*u1.y) && (u1.x > 0.5*GRADIENT_X_PER_Y*u1.y))
{
A1 = Clockwise_rotate3(A1);
u1 = Clockwise_rotate2(u1);
};
}
// So how are we going to get the corners of central cell?
// Do we change the plan and make them the average of 2 tri centroids and the vertex?
// That is one way, not sure I'm keen on it, not having thought about it.
// YES, that is what we have to do.
// ==============
// edge_cnr1 = (u1+u2+u0)*0.333333333333333;
// edge_cnr2 = (u3+u2+u0)*0.333333333333333;
edgenormal.x = 0.333333333333333*(u1.y-u3.y);
edgenormal.y = 0.333333333333333*(u3.x-u1.x);
// edgenormal to point at u2:
if ((u2-u1).dot(edgenormal) < 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
}
//shoelace = (u0.x-u2.x)*(u1.y-u3.y) +
// (u1.x-u3.x)*(u2.y-u0.y);
gradAx += (TWELTH*(A1.x+A3.x)+FIVETWELTHS*(A0.x+A2.x))*edgenormal;
gradAy += (TWELTH*(A1.y+A3.y)+FIVETWELTHS*(A0.y+A2.y))*edgenormal;
gradAz += (TWELTH*(A1.z+A3.z)+FIVETWELTHS*(A0.z+A2.z))*edgenormal;
area += (0.3333333333333333*(0.5*(u1.x+u3.x)+u2.x+u0.x))*edgenormal.x;
// ( grad x )_x
// move round A values and positions:
// ----------------------------------
A3 = A2;
u3 = u2;
A2 = A1;
u2 = u1;
}
// COMMENTED ENDING HERE FOR IT TO WORK
gradAx /= area;
gradAy /= area;
gradAz /= area;
f64_vec3 anti_Advect;
anti_Advect.x = h*v_overall.dot(gradAx);
anti_Advect.y = h*v_overall.dot(gradAy);
anti_Advect.z = h*v_overall.dot(gradAz);
// Save off:
if (bAdd) {
anti_Advect += h*p_Addition_Rate[BEGINNING_OF_CENTRAL + index];
}
p_A_out[BEGINNING_OF_CENTRAL + index] += anti_Advect; // best way may be: if we know start of central stuff, can send
}; // ONLY FOR DOMAIN VERTEX
}; // IS THREAD IN THE FIRST HALF OF THE BLOCK
// =============================================================================
// Understand the following important fact:
// If you will use 63 registers (and this routine surely will -
// we have positions 7 x 2 x 2 = 28 registers, A 7 x 3 x 2 = 35 registers
// -- though we could try taking into account, 1 dimension at a time)
// Then the max thread throughput per SM is 512 which means that we will get
// no penalty from using up to 12 doubles in shared memory per thread.
// =============================================================================
// That does mean L1 has room for only 4 doubles. It is not big compared to registry itself.
}
__global__ void Kernel_Compute_Lap_A_and_Grad_A_to_get_B_on_all_minor(
f64_vec3 * __restrict__ p_A_tri,
f64_vec3 * __restrict__ p_A_vert,
structural * __restrict__ p_info,
f64_vec2 * __restrict__ p_tri_centroid,
CHAR4 * __restrict__ p_tri_perinfo,
CHAR4 * __restrict__ p_tri_per_neigh,
LONG3 * __restrict__ p_corner_index,
LONG3 * __restrict__ p_neigh_tri_index,
long * __restrict__ p_IndexTri,
// output:
f64_vec3 * __restrict__ p_Lap_A,
f64_vec3 * __restrict__ p_Lap_A_central,
f64_vec3 * __restrict__ p_B,
f64_vec3 * __restrict__ p_B_central // could just infer
)
{
// The logic here. Lap A requires A on quadrilateral over each honey-edge.
// Therefore we need both tri and vertex values of A at once.
// The same applies for Lap_A_central as for Lap_A_tri.
// Therefore we carry on to do Lap_A_central using the same data ; in fact we can
// avoid loading Indextri because we work on the result in shared memory as we are doing tris.
__shared__ f64_vec3 A_tri[threadsPerTileMinor];
__shared__ f64_vec2 tri_centroid[threadsPerTileMinor]; // 5
__shared__ f64_vec3 A_vert[SIZE_OF_MAJOR_PER_TRI_TILE]; // +1.5
__shared__ f64_vec2 vertex_pos[SIZE_OF_MAJOR_PER_TRI_TILE];// altogether 9 doubles per thread so far here.
// __shared__ short shared_per[threadsPerTileMinor]; // short easier to access than char maybe.
// If we want 512 threads/SM, 12 doubles in shared per thread is limit.
// We can accommodate 12 .. so 6 per major in addition to this but not when we have shared_per.
// Well we could limit it to 10 tris but it's asking for trouble.
// 6 longs = 3 doublesworth per thread
__shared__ long IndexTri[SIZE_OF_MAJOR_PER_TRI_TILE*MAXNEIGH_d];
// __shared__ char PBCtri[SIZE_OF_MAJOR_PER_TRI_TILE*MAXNEIGH_d];
// Total 3+2+1.5+1+3 = 11.5 -- it ought to work -- plus shared_per. Even if that counts
// as a long, we still just about get it.
// It OUGHT still to run 2 blocks per SM.
// Only half the threads will continue to the 2nd part. But on the other hand,
// if each major thread has 6 (ind) + 2*5 + 5 = 21+ doubles, only 256 of those can run.
// Anything else needed? Yes - the list of chars -- which is 6 bytes per thread here
// and thus makes this all too chancy.
// Go with the unintelligent way -- two separate routines ??
// Note that if a triangle is not periodic itself then it's impossible that its data
// needs to be rotated for central, since central is a corner of the triangle.
// Therefore we can consult shared_per list instead. Okay but what if a tri is not in the list?
//
// __shared__ f64_vec3 Lap_A_central[SIZE_OF_MAJOR_PER_TRI_TILE]; // +1.5
// Let's hope atomicAdd to shared isn't as bad as we expect.
// https://devtalk.nvidia.com/default/topic/514085/cuda-programming-and-performance/atomicadd-in-shared-memory-is-measured-slower-than-in-global-memory-timing-shared-memory-atomic-o/
// says that it's AWFUL.
// Factors against doing:
// _ Must take account of periodic in applying effect from this side
// _ Must do atomic add to an extra array to avoid conflicting with other blocks' contribs
// _ Must do atomic add within shared memory to avoid conflicting with other threads' contribs
// We got so far 7.5 to 8 doubles per go.
// Do we want to add 3 more, put the routine following this. Yes.
// do we also want tri centroid? probably yes really
// Do we need shared flags?
// Note that we do not need to do 3D at all to do LAP - this is something where we can use 1 dimension at a time.
// Problem with that, we'd have to load A all over again.
// We could try it both ways.
// Stripping back the solution to 1D at a time, is probably just tinkering at the edges.
// The only thing worth comparing is if we do both that AND reload values 3x to do Ax,Ay,Az separately.
// Now bear in mind: if 10 doubles is a lot for shared that is 48K, 5 doubles is already a lot for L1.
// Do first with 3 dimensions Axyz at once - may be slower but we'll see.
long index = blockIdx.x*blockDim.x + threadIdx.x;
long StartTri = blockIdx.x*blockDim.x; // can replace this one.
long StartMajor = SIZE_OF_MAJOR_PER_TRI_TILE*blockIdx.x; // can replace this one.
// could replace with #define here.
A_tri[threadIdx.x] = p_A_tri[index];
tri_centroid[threadIdx.x] = p_tri_centroid[index];
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE) {
A_vert[threadIdx.x] = p_A_vert[SIZE_OF_MAJOR_PER_TRI_TILE*blockIdx.x + threadIdx.x];
structural info = p_info[SIZE_OF_MAJOR_PER_TRI_TILE*blockIdx.x + threadIdx.x];
vertex_pos[threadIdx.x] = info.pos;
// f64_vec3 zero(0.0,0.0,0.0);
// Lap_A_central[threadIdx.x] = zero;
// To save Lap A central solution we'd need to send it to the array per this tile's colour
// and then aggregate the results, divide by shoelace?
}
// shared_per[threadIdx.x] = perinfo.per0+perinfo.per1+perinfo.per2; // if periodic tri then neigh will need to be able to know.
__syncthreads();
// perinfo is still in scope later on but we'd rather get rid of it.
// The construction here is so we can get it before syncthreads, which is awkward.
f64_vec3 LapA(0.0,0.0,0.0);
f64_vec3 B(0.0,0.0,0.0);
CHAR4 perinfo = p_tri_perinfo[index];
if ((perinfo.flag != OUTER_FRILL) && (perinfo.flag != INNER_FRILL))
{
// We may need to find a way to AVOID doing branch around memory accesses.
// For frills, we would get a division by zero I'd expect.
// We probably should be splitting out tri vs central.
f64_vec2 edgenormal; // moving this inside unfortunately did not make any gains at all.
LONG3 corner_index = p_corner_index[index];
LONG3 neightri = p_neigh_tri_index[index];
CHAR4 tri_rotate = p_tri_per_neigh[index];
// Note that A, as well as position, has to be ROTATED to make a contiguous image.
// This tri minor has 3 edges with triangles and 3 edges with centrals.
// To accumulate Lap_A_central at the same time:
// * We should colour the blocks so that no two colours are shared by 1 major. That is possible.
// * The block outputs to its own colour array of centrals affected.
// * Then we aggregate the colour arrays.
// @@@@@@@@@@@@@@@@
// Now consider another one: what if we launched 3 threads per triangle. Same shared data for block as here.
// Does that really help anything? Think no.
// We need to divide by area when we've done something.
f64 area = 0.0;
f64_vec2 pos0(0.0,0.0), pos1(1.0,0.0), pos2(0.0,1.0);
// DEBUG: COMMENTING FROM HERE IT WORKED.
// f64_vec3 Avert0,Avert1,Avert2;
// We need 4 values at a time in order to do a side.
// We don't need to have all 7 values (3+ 3 + itself)
// So we'd be better just to load one quadrilateral's doings at a time, given the paucity of register and L1 space.
// Either we store all 7x3 A-values at once + 7 positions. Or we use 4 and 4 positions at once.
// Bear in mind a partial saving might yield big benefits.
// HAZARD: we don't know the ordering of the points.
// Halfway house: for simplicity store all the positions already loaded.
// A does not load from the same place anyway.
// Then go round the quadrilaterals.
// THIS BIT IS ENOUGH TO CRASH IT:
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
//pos0 = vertex_pos[corner_index.i1-StartMajor]; // this line okay
} else {
// This bit breaks it:
//structural info = p_info[corner_index.i1];
if ((corner_index.i1 >= 0) && (corner_index.i1 < 36864))
{
// Debug: Rule out that it's a bad index
structural info = p_info[corner_index.i1];
pos0 = info.pos;
} else {
printf("%d %d %d %d \n##################################################\n",
index,corner_index.i1,corner_index.i2, corner_index.i3);
// comes out with big negative
// same thing applies when we call with pX1->tri_corner_index
// When we output tri_corner_index it is all valid.
};
}
/* if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos1 = vertex_pos[corner_index.i2-StartMajor];
} else {
structural info = p_info[corner_index.i2];
pos1 = info.pos;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos2 = vertex_pos[corner_index.i3-StartMajor];
} else {
structural info = p_info[corner_index.i3];
pos2 = info.pos;
}*/
char periodic = perinfo.per0 + perinfo.per1 + perinfo.per2;
if (periodic == 0) {
} else {
// In this case which ones are periodic?
// Should we just store per flags?
// How it should work:
// CHAR4 perinfo: periodic, per0, per1, per2;
if (perinfo.per0 == NEEDS_ANTI) {
pos0 = Anticlock_rotate2(pos0);
// Avert0 = Anticlock_rotate2(Avert0);
}
if (perinfo.per0 == NEEDS_CLOCK) {
pos0 = Clockwise_rotate2(pos0);
// Avert0 = Clockwise_rotate2(Avert0);
}
if (perinfo.per1 == NEEDS_ANTI) {
pos1 = Anticlock_rotate2(pos1);
// Avert1 = Anticlock_rotate2(Avert1);
}
if (perinfo.per1 == NEEDS_CLOCK) {
pos1 = Clockwise_rotate2(pos1);
// Avert1 = Clockwise_rotate2(Avert1);
}
if (perinfo.per2 == NEEDS_ANTI) {
pos2 = Anticlock_rotate2(pos2);
// Avert2 = Anticlock_rotate2(Avert2);
}
if (perinfo.per2 == NEEDS_CLOCK) {
pos2 = Clockwise_rotate2(pos2);
// Avert2 = Clockwise_rotate2(Avert2);
}
};
f64_vec2 u0(0.0,0.0),u1(1.0,1.0),u2(1.0,3.0); // to be the positions of neighbouring centroids
/*
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + threadsPerTileMinor))
{
u0 = tri_centroid[neightri.i1-StartTri];
} else {
u0 = p_tri_centroid[neightri.i1];
}
if (tri_rotate.per0 == NEEDS_CLOCK) {
u0 = Clockwise_rotate2(u0);
}
if (tri_rotate.per0 == NEEDS_ANTI) {
u0 = Anticlock_rotate2(u0);
}
// Am I correct that this is to avoid tri_neigh_per information being recorded...
if ((neightri.i2 >= StartTri) && (neightri.i2 < StartTri + threadsPerTileMinor))
{
u1 = tri_centroid[neightri.i2-StartTri];
} else {
u1 = p_tri_centroid[neightri.i2];
}
if (tri_rotate.per1 == NEEDS_CLOCK) {
u1 = Clockwise_rotate2(u1);
}
if (tri_rotate.per1 == NEEDS_ANTI) {
u1 = Anticlock_rotate2(u1);
}
if ((neightri.i3 >= StartTri) && (neightri.i3 < StartTri + threadsPerTileMinor))
{
u2 = tri_centroid[neightri.i3-StartTri];
} else {
u2 = p_tri_centroid[neightri.i3];
}
if (tri_rotate.per2 == NEEDS_CLOCK) {
u2 = Clockwise_rotate2(u2);
}
if (tri_rotate.per2 == NEEDS_ANTI) {
u2 = Anticlock_rotate2(u2);
}*/
// ............................................................................................
// . I think working round with 4 has a disadvantage: if we get back around to one that is off-tile,
// we have to load it all over again. Still that is only 1 out of 7 that gets duplicated.
// Here is the best thing I can come up with: store 7 positions. That is already
// 28 longs' worth... each A-value uses 6 of the 7 positions to have an effect.
// Load each A-value at a time and recalc shoelace for 3 quadrilaterals. ??
// Too complicated.
// If we store all positions, can we finish with each A as we handle it? Yes but let's not.
f64_vec2 ourpos = tri_centroid[threadIdx.x]; // can try with and without this assignment to variable
f64_vec3 A0 = A_tri[threadIdx.x]; // can try with and without this assignment to variable
f64_vec3 A_1(0.0,0.0,0.0),
A_out(1.0,2.0,3.0),
A_2(4.0,5.0,6.0);
// Our A: A_tri[threadIdx.x]
/*
// Now fill in the A values:
// ____________________________
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_1 = A_vert[corner_index.i2-StartMajor];
} else {
A_1 = p_A_vert[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_2 = A_vert[corner_index.i3-StartMajor];
} else {
A_2 = p_A_vert[corner_index.i3];
}
if (perinfo.per1 == NEEDS_ANTI) {
A_1 = Anticlock_rotate3(A_1);
}
if (perinfo.per1 == NEEDS_CLOCK) {
A_1 = Clockwise_rotate3(A_1);
}
if (perinfo.per2 == NEEDS_ANTI) {
A_2 = Anticlock_rotate3(A_2);
};
if (perinfo.per2 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
};
/*
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + threadsPerTileMinor))
{
A_out = A_tri[neightri.i1-StartTri];
} else {
A_out = p_A_tri[neightri.i1];
}
if (tri_rotate.per0 != 0) {
if (tri_rotate.per0 == NEEDS_CLOCK) {
A_out = Clockwise_rotate3(A_out);
} else {
A_out = Anticlock_rotate3(A_out);
};
};*/
// ======================================================
f64 shoelace = (ourpos.x-u0.x)*(pos1.y-pos2.y)
+ (pos1.x-pos2.x)*(u0.y-ourpos.y); // if u0 is opposite point 0
// clock.x-anti.x
// We are now going to put the corners of the minor cell at
// e.g. 1/3(pos1 + u0 + ourpos)
// rather than at
// e.g. 2/3 pos1 + 1/3 pos2
//corner1 = 0.3333333*(pos1+u0+ourpos)
//corner2 = 0.3333333*(pos2+u0+ourpos)
//edgenormal.x = corner1.y-corner2.y = 0.333333(pos1.y-pos2.y) -- so no change here
edgenormal.x = (pos1.y-pos2.y)*0.333333333333333;
edgenormal.y = (pos2.x-pos1.x)*0.333333333333333; // cut off 1/3 of the edge
if (edgenormal.dot(pos0-pos1) > 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// note: same coeff to A0->grad_x as to x0 in shoelace:
f64 coeff = ((pos1.y-pos2.y)*edgenormal.x +
(pos2.x-pos1.x)*edgenormal.y)/shoelace;
LapA.x += coeff*(A0.x-A_out.x);
LapA.y += coeff*(A0.y-A_out.y);
LapA.z += coeff*(A0.z-A_out.z);
coeff = ((u0.y-ourpos.y)*edgenormal.x +
(ourpos.x-u0.x)*edgenormal.y)/shoelace; // from top line same
LapA.x += coeff*(A_1.x-A_2.x);
LapA.y += coeff*(A_1.y-A_2.y);
LapA.z += coeff*(A_1.z-A_2.z);
// Think about averaging at typical edge.
// Using 5/12:
// corners are say equidistant from 3 points, so on that it would be 1/6
// but allocate the middle half of the bar to 50/50 A0+Aout.
// Bx = dAz/dy
//B.x += Az_edge*edgenormal.y;
// By = -dAz/dx
//B.y += -Az_edge*edgenormal.x;
// Bz = dAy/dx-dAx/dy
//B.z += Ay_edge*edgenormal.x-Ax_edge*edgenormal.y;
B.x += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.y;
B.y += -(TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.x;
B.z += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A0.y+A_out.y))*edgenormal.x
- (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A0.x+A_out.x))*edgenormal.y;
// Now that we put minor corners at (1/3)(2 centroids+vertex), this makes even more sense.
area += 0.333333333333333*(0.5*(pos1.x+pos2.x)+ourpos.x+u0.x)*edgenormal.x;
/*
// ASSUMING ALL VALUES VALID (consider edge of memory a different case):
// From here on is where it gets thorny as we no longer map A_1 to vertex 1.
// %%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%%
A_1 = A_out; // now A_1 points at tri neigh 0
A_out = A_2; // now looking at vertex 2
// A_2 is now to point at tri neigh 1
if ((neightri.i2 >= StartTri) && (neightri.i2 < StartTri + threadsPerTileMinor))
{
A_2 = A_tri[neightri.i2-StartTri];
} else {
A_2 = p_A_tri[neightri.i2];
}
if (tri_rotate.per1 != 0) {
if (tri_rotate.per1 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} else {
A_2 = Anticlock_rotate3(A_2);
};
};
shoelace = (ourpos.x-pos2.x)*(u0.y-u1.y)
+ (u0.x-u1.x)*(pos2.y-ourpos.y); // can insert formula instead of creating var.
//x1 = (2/3)pos2+(1/3)pos0;
//x2 = (2/3)pos2+(1/3)pos1;
//edgenormal.x = (x1.y-x2.y);
//edgenormal.y = (x2.x-x1.x); // cut off 1/3 of the edge
edgenormal.x = 0.333333333333333*(pos0.y-pos1.y);
edgenormal.y = 0.333333333333333*(pos1.x-pos0.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos2-pos1) < 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
coeff = ((u0.y-u1.y)*edgenormal.x +
(u1.x-u0.x)*edgenormal.y)/shoelace; // This is correct - see coeff in shoelace on ourpos.y
LapA.x += coeff*(A0.x-A_out.x);
LapA.y += coeff*(A0.y-A_out.y);
LapA.z += coeff*(A0.z-A_out.z);
//// Now do contribution to Lap A central for vertex 2:
//if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
//{
// f64_vec3 addition = coeff*(A0-A_out);
// atomicAdd((double *)(Lap_A_solution+neightri.i2-StartTri), addition.x);
// atomicAdd((double *)(Lap_A_solution+neightri.i2-StartTri)+1, addition.y);
// atomicAdd((double *)(Lap_A_solution+neightri.i2-StartTri)+2, addition.z);
// // Will this simultaneously be affected by other threads? YES
// // So have to use atomicAdd on shared memory.
//
// // I guess we learned our lesson: it really is more of a headache to do this way
// // than just to write a whole separate routine for central cells.
// // !
// // the workaround atomicAdd will make it slow because of converting to long-long ?
// // So this is probably slower than recreating the whole routine and calculating again.
// // :-(
//} else {
// f64_vec3 addition = coeff*(A0-A_out);
// atomicAdd((double *)(Lap_A_extra_array+neightri), addition.x);
// // We forgot something ELSE:
// // we have to take into account periodic orientation as well!
// // Okay let's scrap this attempt to create central at same time.
// // Unfortunately I do not see a way to overwrite part of shared memory with indices
// // either.
//}
// A_1 ~ u0, A_2 ~ u1
coeff = ((pos2.y-ourpos.y)*edgenormal.x +
(ourpos.x-pos2.x)*edgenormal.y)/shoelace;
LapA.x += coeff*(A_1.x-A_2.x);
LapA.y += coeff*(A_1.y-A_2.y);
LapA.z += coeff*(A_1.z-A_2.z);
B.x += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.y;
B.y += -(TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.x;
B.z += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A0.y+A_out.y))*edgenormal.x
- (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A0.x+A_out.x))*edgenormal.y;
area += 0.333333333333333*(0.5*(u0.x+u1.x)+ourpos.x+pos2.x)*edgenormal.x;
A_1 = A_out; // now A_1 points at corner 2
A_out = A_2; // now points at tri 1
// A_2 to point at corner 0
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_2 = A_vert[corner_index.i1-StartMajor];
} else {
A_2 = p_A_vert[corner_index.i1];
}
if (perinfo.per0 != 0) {
if (perinfo.per0 == NEEDS_ANTI) {
A_2 = Anticlock_rotate3(A_2);
}
if (perinfo.per0 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
}
}
shoelace = (ourpos.x-u1.x)*(pos2.y-pos0.y) // clock.y-anti.y
+ (pos2.x-pos0.x)*(u1.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos0.y-pos2.y);
edgenormal.y = 0.333333333333333*(pos2.x-pos0.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos1-pos0) > 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
coeff = ((pos2.y-pos0.y)*edgenormal.x +
(pos0.x-pos2.x)*edgenormal.y)/shoelace; // see coeffs on ourpos in shoelace
LapA.x += coeff*(A0.x-A_out.x);
LapA.y += coeff*(A0.y-A_out.y);
LapA.z += coeff*(A0.z-A_out.z);
// A_1~pos2 A_2~pos0
coeff = ((u1.y-ourpos.y)*edgenormal.x +
(ourpos.x-u1.x)*edgenormal.y)/shoelace; // something suspicious: that we had to change smth here.
LapA.x += coeff*(A_1.x-A_2.x);
LapA.y += coeff*(A_1.y-A_2.y);
LapA.z += coeff*(A_1.z-A_2.z);
B.x += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.y;
B.y += -(TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.x;
B.z += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A0.y+A_out.y))*edgenormal.x
- (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A0.x+A_out.x))*edgenormal.y;
area += 0.333333333333333*(0.5*(pos2.x+pos0.x)+ourpos.x+u1.x)*edgenormal.x;
A_1 = A_out;
A_out = A_2;
// A_2 is now to point at tri neigh 2
if ((neightri.i3 >= StartTri) && (neightri.i3 < StartTri + threadsPerTileMinor))
{
A_2 = A_tri[neightri.i3-StartTri];
} else {
A_2 = p_A_tri[neightri.i3];
}
if (tri_rotate.per2 != 0) {
if (tri_rotate.per2 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} else {
A_2 = Anticlock_rotate3(A_2);
};
};
shoelace = (ourpos.x-pos0.x)*(u1.y-u2.y) // clock.y-anti.y
+ (u1.x-u2.x)*(pos0.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos1.y-pos2.y);
edgenormal.y = 0.333333333333333*(pos2.x-pos1.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos0-pos1) < 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
coeff = ((u1.y-u2.y)*edgenormal.x +
(u2.x-u1.x)*edgenormal.y)/shoelace; // see coeffs on ourpos in shoelace
LapA.x += coeff*(A0.x-A_out.x);
LapA.y += coeff*(A0.y-A_out.y);
LapA.z += coeff*(A0.z-A_out.z);
// A_1~u1 A_2~u2
coeff = ((pos0.y-ourpos.y)*edgenormal.x +
(ourpos.x-pos0.x)*edgenormal.y)/shoelace; // something suspicious: that we had to change smth here.
LapA.x += coeff*(A_1.x-A_2.x);
LapA.y += coeff*(A_1.y-A_2.y);
LapA.z += coeff*(A_1.z-A_2.z);
B.x += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.y;
B.y += -(TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.x;
B.z += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A0.y+A_out.y))*edgenormal.x
- (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A0.x+A_out.x))*edgenormal.y;
area += THIRD*(0.5*(u2.x+u1.x)+ourpos.x+pos0.x)*edgenormal.x;
A_1 = A_out;
A_out = A_2;
// A2 to be for corner 1
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_2 = A_vert[corner_index.i2-StartMajor];
} else {
A_2 = p_A_vert[corner_index.i2];
}
if (perinfo.per1 != 0) {
if (perinfo.per1 == NEEDS_ANTI) {
A_2 = Anticlock_rotate3(A_2);
}
if (perinfo.per1 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} // CAREFUL WITH FLAGS N MEANINGS
}
shoelace = (ourpos.x-u2.x)*(pos0.y-pos1.y) // clock.y-anti.y
+ (pos0.x-pos1.x)*(u2.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos1.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos1.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos2-pos1) > 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
coeff = ((pos0.y-pos1.y)*edgenormal.x +
(pos1.x-pos0.x)*edgenormal.y)/shoelace; // see coeffs on ourpos in shoelace
LapA.x += coeff*(A0.x-A_out.x);
LapA.y += coeff*(A0.y-A_out.y);
LapA.z += coeff*(A0.z-A_out.z);
// A_1~pos0 A_2~pos1
coeff = ((u2.y-ourpos.y)*edgenormal.x +
(ourpos.x-u2.x)*edgenormal.y)/shoelace; // something suspicious: that we had to change smth here.
LapA.x += coeff*(A_1.x-A_2.x);
LapA.y += coeff*(A_1.y-A_2.y);
LapA.z += coeff*(A_1.z-A_2.z);
B.x += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.y;
B.y += -(TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.x;
B.z += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A0.y+A_out.y))*edgenormal.x
- (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A0.x+A_out.x))*edgenormal.y;
area += 0.333333333333333*(0.5*(pos0.x+pos1.x)+ourpos.x+u2.x)*edgenormal.x;
A_1 = A_out;
A_out = A_2;
// A2 to be for tri 0
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + threadsPerTileMinor))
{
A_2 = A_tri[neightri.i1-StartTri];
} else {
A_2 = p_A_tri[neightri.i1];
}
if (tri_rotate.per0 != 0) {
if (tri_rotate.per0 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} else {
A_2 = Anticlock_rotate3(A_2);
};
};
shoelace = (ourpos.x-pos1.x)*(u2.y-u0.y) // clock.y-anti.y
+ (u2.x-u0.x)*(pos1.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos2.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos2.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos1-pos2) < 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
coeff = ((u2.y-u0.y)*edgenormal.x +
(u0.x-u2.x)*edgenormal.y)/shoelace; // see coeffs on ourpos in shoelace
LapA.x += coeff*(A0.x-A_out.x);
LapA.y += coeff*(A0.y-A_out.y);
LapA.z += coeff*(A0.z-A_out.z);
// A_1~pos0 A_2~pos1
coeff = ((pos1.y-ourpos.y)*edgenormal.x +
(ourpos.x-pos1.x)*edgenormal.y)/shoelace; // something suspicious: that we had to change smth here.
LapA.x += coeff*(A_1.x-A_2.x);
LapA.y += coeff*(A_1.y-A_2.y);
LapA.z += coeff*(A_1.z-A_2.z);
B.x += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.y;
B.y += -(TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.x;
B.z += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A0.y+A_out.y))*edgenormal.x
- (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A0.x+A_out.x))*edgenormal.y;
area += 0.333333333333333*(0.5*(u0.x+u2.x)+ourpos.x+pos1.x)*edgenormal.x;
*/
// CHECKED ALL THAT
// Heavy calcs are actually here: six divisions!
LapA /= (area + 1000.0);
B /= (area + 1000.0); // DEBUG
} else {
// frill - leave Lap A = B = 0
}
p_Lap_A[index] = LapA;
p_B[index] = B;
// Similar routine will be needed to create grad A ... or Adot ... what a waste of calcs.
// Is there a more sensible way: only do a mesh move every 10 steps -- ??
// Then what do we do on the intermediate steps -- that's a problem -- flowing Eulerian fluid
// will give the right change in pressure, but then mesh has to catch up. Still that might be a thought.
// Next consideration: Lap A on central.
// Idea for doing at same time: (don't do it -- too much atomicAdd, I do not trust)
// ___ only certain major cells "belong" to this tri tile.
// ___ write to a given output from our total effect coming from this tile's tris.
// ____ when we hit a central cell outside this tile, send it atomicAdd to an array
// that collects up all the extra contribs to it.
// __ then we just reload, sum 2 things and divide
// However, atomicAdd fp64 only exists on Compute 6.0 :-(
// Workaround taken from http://stackoverflow.com/questions/16077464/atomicadd-for-double-on-gpu
// Eventually decided not to use but to carry on with half the threads to target centrals in this routine.
/*
// COMMENTED FOR DEBUG:
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE) {
// Create Lap A for centrals.
// Outermost has to supply good boundary conditions for the outer edge.
index = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x;
structural info = p_info[index];
memcpy(IndexTri+threadIdx.x*MAXNEIGH_d, p_IndexTri+index*MAXNEIGH_d,
sizeof(long)*MAXNEIGH_d);
f64_vec3 A0 = A_vert[threadIdx.x]; // can ditch
f64_vec2 u0 = vertex_pos[threadIdx.x];
f64_vec3 A1,A2,A3;
f64_vec2 u1,u2,u3;
f64 shoelace, area = 0.0;
f64_vec2 edgenormal;
// As before we need 4 A values and positions at a time. Now 3 all come from tris.
LapA.x = 0.0; LapA.y = 0.0; LapA.z = 0.0;
B.x = 0.0; B.y = 0.0; B.z = 0.0;
// Note that I found out, unroll can be slower if registers are used up (!) CAUTION:
long iindextri;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// In this case there are extra triangles, for frills.
iindextri = IndexTri[threadIdx.x*MAXNEIGH_d+info.neigh_len];
} else {
iindextri = IndexTri[threadIdx.x*MAXNEIGH_d+info.neigh_len-1];
}
if ((iindextri >= StartTri) && (iindextri < StartTri + threadsPerTileMinor))
{
A3 = A_tri[iindextri-StartTri];
u3 = tri_centroid[iindextri-StartTri];
} else {
A3 = p_A_tri[iindextri];
u3 = p_tri_centroid[iindextri];
};
if (info.has_periodic != 0) {
if ((u3.x > 0.5*GRADIENT_X_PER_Y*u3.y) && (u0.x < -0.5*GRADIENT_X_PER_Y*u0.y))
{
A3 = Anticlock_rotate3(A3);
u3 = Anticlock_rotate2(u3);
};
if ((u3.x < -0.5*GRADIENT_X_PER_Y*u3.y) && (u0.x > 0.5*GRADIENT_X_PER_Y*u0.y))
{
A3 = Clockwise_rotate3(A3);
u3 = Clockwise_rotate2(u3);
};
}
// Initial situation: inext = 1, i = 0, iprev = -1
iindextri = IndexTri[threadIdx.x*MAXNEIGH_d]; // + 0
if ((iindextri >= StartTri) && (iindextri < StartTri + threadsPerTileMinor))
{
A2 = A_tri[iindextri-StartTri];
u2 = tri_centroid[iindextri-StartTri];
} else {
A2 = p_A_tri[iindextri];
u2 = p_tri_centroid[iindextri];
};
if (info.has_periodic != 0) {
if ((u2.x > 0.5*GRADIENT_X_PER_Y*u2.y) && (u0.x < -0.5*GRADIENT_X_PER_Y*u0.y))
{
A2 = Anticlock_rotate3(A2);
u2 = Anticlock_rotate2(u2);
};
if ((u2.x < -0.5*GRADIENT_X_PER_Y*u2.y) && (u0.x > 0.5*GRADIENT_X_PER_Y*u0.y))
{
A2 = Clockwise_rotate3(A2);
u2 = Clockwise_rotate2(u2);
};
}
short limit = info.neigh_len;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
limit++;
// Ordinarily, number of tri pairs = number of tris = number of neighs
// For outermost, number of neighs = 4 but the number of tri pairs to use = 5.
// Now we attempt to go all the way round: A and u from frills are valid and we can
// form a quadrilateral
int inext = 0; // will be ++ straight away.
#pragma unroll MAXNEIGH_d
for (short i = 0; i < limit; i++)
{
inext++;
if (inext == limit) inext = 0;
iindextri = IndexTri[threadIdx.x*MAXNEIGH_d+inext];
if ((iindextri >= StartTri) && (iindextri < StartTri + threadsPerTileMinor))
{
A1 = A_tri[iindextri-StartTri];
u1 = tri_centroid[iindextri-StartTri];
} else {
A1 = p_A_tri[iindextri];
u1 = p_tri_centroid[iindextri];
};
if (info.has_periodic != 0) {
if ((u1.x > 0.5*GRADIENT_X_PER_Y*u1.y) && (u0.x < -0.5*GRADIENT_X_PER_Y*u0.y))
{
A1 = Anticlock_rotate3(A1);
u1 = Anticlock_rotate2(u1);
};
if ((u1.x < -0.5*GRADIENT_X_PER_Y*u1.y) && (u0.x > 0.5*GRADIENT_X_PER_Y*u0.y))
{
A1 = Clockwise_rotate3(A1);
u1 = Clockwise_rotate2(u1);
};
}
// Affect LapA,B:
// ==============
// edge_cnr1 = (u1+u2+u0)*0.333333333333333;
edgenormal.x = 0.333333333333333*(u1.y-u3.y);
edgenormal.y = 0.333333333333333*(u3.x-u1.x);
// edgenormal to point at u2:
if ((u2-u1).dot(edgenormal) < 0.0)
{
edgenormal.x=-edgenormal.x; edgenormal.y = -edgenormal.y;
}
shoelace = (u0.x-u2.x)*(u1.y-u3.y) +
(u1.x-u3.x)*(u2.y-u0.y);
//coeff = ((u1.y-u3.y)*edgenormal.x + (u3.x-u1.x)*edgenormal.y)/shoelace;
//LapA += coeff*(A0-A2);
LapA += (A0-A2)*(((u1.y-u3.y)*edgenormal.x + (u3.x-u1.x)*edgenormal.y)/shoelace);
//coeff = ((u2.y-u0.y)*edgenormal.x + (u0.x-u2.x)*edgenormal.y)/shoelace;
LapA += (A1-A3)*(((u2.y-u0.y)*edgenormal.x + (u0.x-u2.x)*edgenormal.y)/shoelace);
B.x += (TWELTH*(A1.z+A3.z)+FIVETWELTHS*(A0.z+A2.z))*edgenormal.y;
B.y += -(TWELTH*(A1.z+A3.z)+FIVETWELTHS*(A0.z+A2.z))*edgenormal.x;
B.z += (TWELTH*(A1.y+A3.y)+FIVETWELTHS*(A0.y+A2.y))*edgenormal.x
-(TWELTH*(A1.x+A3.x)+FIVETWELTHS*(A0.x+A2.x))*edgenormal.y;
area += (0.3333333333333333*(0.5*(u1.x+u3.x)+u2.x+u0.x))*edgenormal.x;
// ( grad x )_x
// move round A values and positions:
// ----------------------------------
A3 = A2;
u3 = u2;
A2 = A1;
u2 = u1;
}
// if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
// Add on the outer edge: dA/dr times length of edge.
// dAx/dr = -(Ax r)/r^2 = -Ax/r
// We find a way just to go all the way round instead.
LapA /= area;
B /= area;
// Save off:
p_Lap_A_central[index] = LapA; // best way may be: if we know start of central stuff, can send
p_B_central[index] = B; // into the 1 array where it belongs.
}
*/
// =============================================================================
// Understand the following important fact:
// If you will use 63 registers (and this routine surely will -
// we have positions 7 x 2 x 2 = 28 registers, A 7 x 3 x 2 = 35 registers
// -- though we could try taking into account, 1 dimension at a time)
// Then the max thread throughput per SM is 512 which means that we will get
// no penalty from using up to 12 doubles in shared memory per thread.
// =============================================================================
// That does mean L1 has room for only 4 doubles. It is not big compared to registry itself.
}
__global__ void Kernel_Rel_advect_v_tris (
f64 h,
structural * __restrict__ p_info,
nT * __restrict__ p_nT_minor,
nT * __restrict__ p_nT_minor_new,
f64_vec2 * __restrict__ p_v_overall_minor,
f64_vec3 * __restrict__ p_v_minor,
f64_vec2 * __restrict__ p_tri_centroid,
LONG3 * __restrict__ p_tri_corner_index,
LONG3 * __restrict__ p_tri_neigh_index,
CHAR4 * __restrict__ p_tri_per_info,
CHAR4 * __restrict__ p_tri_per_neigh, // is neighbour re-oriented rel to this
f64 * __restrict__ p_area_old, // get from where?
f64 * __restrict__ p_area_new,
f64_vec3 * __restrict__ p_v_out
)
{
// Idea of momentum advection
// ==========================
// n_tri has been inferred from n_major
// Average nv to the edge between minors;
// find mom flow
// ARE WE CLEAR ABOUT USING nv AT ALL? NEED TO CHECK CORRESPONDENCE ---
// v = (n_k area_k v_k + additional mom)/(
// Need rel to v_overall ...
// Let's assume this kernel is called for threads corresp to ##triangles##.
// This info needed to do the "more proper" way with v_edge subtracted from each v that gets averaged.
/* __shared__ f64_vec2 tri_centroid[blockDim.x]; // + 2
__shared__ f64_vec2 vertex_pos[SIZE_OF_MAJOR_PER_TRI_TILE]; // + 1
__shared__ f64_vec3 p_v_tri[blockDim.x]; // + 3
__shared__ f64_vec3 p_v_central[SIZE_OF_MAJOR_PER_TRI_TILE]; // + 1.5
__shared__ f64 p_n_central[SIZE_OF_MAJOR_PER_TRI_TILE]; // + 0.5
__shared__ f64 p_n_tri[blockDim.x]; // + 1 = 9
__shared__ f64_vec2 p_v_overall[blockDim.x]; // +2
__shared__ f64_vec2 p_v_overall[SIZE_OF_MAJOR_PER_TRI_TILE]; // +1 needs to be limited to vertices --
*/
// 9+3 = 12 so that leaves no room for tri perflag - but that's OK.
__shared__ f64_vec2 tri_centroid[threadsPerTileMinor];
__shared__ f64_vec3 v_tri[threadsPerTileMinor];
__shared__ f64_vec2 n_vrel_tri[threadsPerTileMinor];
// For central cells, going to have to run all over again with the following
// replaced by __shared__ long IndexTri[MAXNEIGH_d*SIZE_OF_MAJOR_PER_TRI_TILE];
__shared__ f64_vec2 n_vrel_central[SIZE_OF_MAJOR_PER_TRI_TILE];
__shared__ f64_vec3 v_central[SIZE_OF_MAJOR_PER_TRI_TILE];
__shared__ f64_vec2 vertex_pos[SIZE_OF_MAJOR_PER_TRI_TILE]; // 2 + 1 + 3+ 1.5 +2 +1 = 10.5
// It is more certain that something vile does not go wrong, if we do stick with loading
// tri index each central.
// But we don't have room for that here due to sharing v_central.
// So we basically have to write 2 routines, even doing it this way. :-[
// Consider to chop and change to the alternative: how can we try to ensure that we do
// get a contiguous access each time we do a load and go through? We can't because it may
// do extra bus loads for some threads in-between.
// So. Stick with inelegant ways.
// __shared__ char shared_per[blockDim.x]; // tri periodic info --- there may be other more sensible ways though
// I'm seeing now that there is sense in just loading a CHAR4 with the information in.
// ?
// Then it doesn't need to even do a load of tests - it'll decide beforehand on CPU where it needs to
// have a periodic rotation looking at the next triangle.
// Loading per_info for itself and putting into shared is reasonable mind you. It's not an extra load.
// But sometimes there IS an extra load then, because we have to ask edge triangles about their periodic data
// and that is not a contiguous fetch.
// Keep shared memory cleaner. Okay then. So we COULD then fit in all the other things separately if we wanted.
// But how would we ideally do the advect formula then?
// Overall v_edge comes from
// But actually there is no reason to average v_edge along the edge.
// Look instead at each end.
// There we have v_overall = average of 3. nv = average of 3.
// So then do I want to calc v_overall, push it back into each of them: sum n_i (v_i - [v_overall = avg])
// These seem like niceties.
// ______________________________________________________________________________
// can create on our own triangle but how shall we create for edge?
// Use vertices nearby edge instead??
// These are the ones moving and therefore moving the edge, not the tri centroid.
// IMPORTANT:
// Another alternative way is to infer the edge motion from the 4 relevant points, but use
// only the opposing 2, or use 5/12, to create v of species.
// To get actual conservation of momentum we have to run again and divide by,
// for each cell, NEW N_k+1 that comes from n_k+1 avged, area_k+1.
long StartTri = blockIdx.x*threadsPerTileMinor;
//long EndMinor = (blockIdx.x+1)*blockDim.x; // can ditch
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long index = blockIdx.x*blockDim.x + threadIdx.x;
// Valid traffic of momentum:
tri_centroid[threadIdx.x] = p_tri_centroid[index];
f64_vec3 v_own = p_v_minor[index];
v_tri[threadIdx.x] = v_own;
f64 n_own = p_nT_minor[index].n;
f64_vec2 v_overall = p_v_overall_minor[index];
f64_vec2 nvrel;
nvrel.x = n_own*(v_own.x - v_overall.x);
nvrel.y = n_own*(v_own.y - v_overall.y);
n_vrel_tri[threadIdx.x] = nvrel;
// What makes this way better?
// Isn't it better to put
// store n_s, store v_overall, store v_s.
CHAR4 perinfo = p_tri_per_info[index];
// CHAR4 perneighinfo = p_tri_per_neigh[index];
// 3 chars for neighs per0,1,2 to show rel rotation; 'periodic' is just padding.
// If we load tri_per_info for neighbours then ?
// If the neigh is periodic and we are not, we can tell from x-values.
// How was it done for Lap A??
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE) {
structural info = p_info[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
vertex_pos[threadIdx.x] = info.pos;
v_central[threadIdx.x] = p_v_minor[ BEGINNING_OF_CENTRAL + blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x ];
f64 n = p_nT_minor[ BEGINNING_OF_CENTRAL + blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x ].n;
v_overall = p_v_overall_minor[ BEGINNING_OF_CENTRAL + blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x ];
nvrel.x = n*(v_central[threadIdx.x].x - v_overall.x);
nvrel.y = n*(v_central[threadIdx.x].y - v_overall.y);
n_vrel_central[threadIdx.x] = nvrel;
// Saved data vertex_pos, v_central, n_vrel_central | for each vertex in tile.
}
__syncthreads();
nvrel = n_vrel_tri[threadIdx.x];
if (perinfo.flag == DOMAIN_TRIANGLE)
{
// The other cases:
// CROSSING_INS, we assume v = 0 for now
// OUTER_FRILL, v = 0
// INNER_TRIANGLE, v = 0
//nT nTsrc = p_nT_shared[threadIdx.x];
f64 area_old = p_area_old[index]; // where getting these from?
f64 area_new = p_area_new[index];
LONG3 corner_index = p_tri_corner_index[index];
LONG3 neightri = p_tri_neigh_index[index];
CHAR4 perneigh = p_tri_per_neigh[index];
// Of course, if we were smart we could roll these into 3 longs
// in both cases, because we only need 24 bits to describe index.
// Ultimately that would be better.
f64_vec2 pos0, pos1, pos2, edgenormal;
f64_vec2 u0,u1,u2, ownpos;
f64_vec3 Nv(0.0,0.0,0.0);
// Create pos0,1,2 and adjust for periodic:
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos0 = vertex_pos[corner_index.i1-StartMajor];
} else {
structural info = p_info[corner_index.i1];
pos0 = info.pos;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos1 = vertex_pos[corner_index.i2-StartMajor];
} else {
structural info = p_info[corner_index.i2];
pos1 = info.pos;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos2 = vertex_pos[corner_index.i3-StartMajor];
} else {
structural info = p_info[corner_index.i3];
pos2 = info.pos;
};
if (perinfo.per0 == NEEDS_ANTI) {
pos0 = Anticlock_rotate2(pos0);
}
if (perinfo.per0 == NEEDS_CLOCK) {
pos0 = Clockwise_rotate2(pos0);
}
if (perinfo.per1 == NEEDS_ANTI) {
pos1 = Anticlock_rotate2(pos1);
}
if (perinfo.per1 == NEEDS_CLOCK) {
pos1 = Clockwise_rotate2(pos1);
}
if (perinfo.per2 == NEEDS_ANTI) {
pos2 = Anticlock_rotate2(pos2);
}
if (perinfo.per2 == NEEDS_CLOCK) {
pos2 = Clockwise_rotate2(pos2);
}
// };
// Create u0,1,2 and adjust for periodic:
// CHAR4 tri_rotate(0,0,0,0); // 4 chars but really using 3
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + threadsPerTileMinor))
{
u0 = tri_centroid[neightri.i1-StartTri];
// perneigh = shared_per[neightri.i1-StartTri];
} else {
u0 = p_tri_centroid[neightri.i1];
// CHAR4 perinfoneigh = p_tri_per_info[neightri.i1];
// perneigh = perinfoneigh.periodic; // just load and use 1 char ?...
}
//if (perneigh != perinfo.periodic) {
// // Test to see if we need to rotate the neighbour centroid and A:
// if ((perneigh != 0) && (ownpos.x > 0.0)) {
// // Avoid loading per flags again: save this as a char
// tri_rotate.per0 = 1; // rotate it clockwise
// u0 = Clockwise_rotate2(u0);
// };
// if ((perinfo.periodic != 0) && (u0.x > 0.0)) {
// u0 = Anticlock_rotate2(u0);
// tri_rotate.per0 = -1;
// };
//};
// ^^ Did I decide this was bad for some reason? Better to load
// just a char4 for periodic relationship to neighs? COULD BE.
// When we load all of these for edge ones it's individual.
// 64 accesses vs 256/12. 256/8 = 32 so it's better this way round.
// HMM
if (perneigh.per0 == NEEDS_ANTI)
u0 = Anticlock_rotate2(u0);
if (perneigh.per0 == NEEDS_CLOCK)
u0 = Clockwise_rotate2(u0);
if ((neightri.i2 >= StartTri) && (neightri.i2 < StartTri + threadsPerTileMinor))
{
u1 = tri_centroid[neightri.i2 - StartTri];
} else {
u1 = p_tri_centroid[neightri.i2];
};
if (perneigh.per1 == NEEDS_ANTI)
u1 = Anticlock_rotate2(u1);
if (perneigh.per1 == NEEDS_CLOCK)
u1 = Clockwise_rotate2(u1);
if ((neightri.i3 >= StartTri) && (neightri.i3 < StartTri + threadsPerTileMinor))
{
u2 = tri_centroid[neightri.i3 - StartTri];
} else {
u2 = p_tri_centroid[neightri.i3];
};
if (perneigh.per2 == NEEDS_ANTI)
u2 = Anticlock_rotate2(u2);
if (perneigh.per2 == NEEDS_CLOCK)
u2 = Clockwise_rotate2(u2);
// Let's say that we only need to take the average v with the opposite cell.
// Edge facing tri 0:
edgenormal.x = 0.333333333333333*(pos1.y-pos2.y);
edgenormal.y = 0.333333333333333*(pos2.x-pos1.x);
if ((pos0-pos1).dot(edgenormal) > 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// The edge is moving with ?
// Corners at (1/3)(ownpos + u2 + pos0),(1/3)(ownpos+u2 + pos1)
// v_overall only really matters insofar that it has a dotproduct with edgenormal.
// Think about this clearly.
// v_overall was generated in major cells.
// Then it was averaged out to triangles.
// Here the edge endpoints are formed by taking the average of 2 centroids + 1 vertex.
// Therefore we do want to use v_overall from those 4 locations.
f64_vec2 nvrel_prev, nvrel_out, nvrel_next;
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
nvrel_prev = n_vrel_central[corner_index.i2-StartMajor];
} else {
f64_vec3 v = p_v_minor[ BEGINNING_OF_CENTRAL + corner_index.i2];
f64 n = p_nT_minor[ BEGINNING_OF_CENTRAL + corner_index.i2].n;
v_overall = p_v_overall_minor[ BEGINNING_OF_CENTRAL + corner_index.i2];
nvrel_prev.x = n*(v.x - v_overall.x);
nvrel_prev.y = n*(v.y - v_overall.y);
};
if (perinfo.per1 == NEEDS_ANTI)
nvrel_prev = Anticlock_rotate2(nvrel_prev);
if (perinfo.per1 == NEEDS_CLOCK)
nvrel_prev = Clockwise_rotate2(nvrel_prev);
// Every single one of these rotates will need to be checked.
f64_vec3 v_out, vnext;
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + threadsPerTileMinor))
{
nvrel_out = n_vrel_tri[neightri.i1-StartTri];
v_out = v_tri[neightri.i1-StartTri];
} else {
f64_vec3 v = p_v_minor [ neightri.i1];
f64 n = p_nT_minor[neightri.i1].n;
v_overall = p_v_overall_minor[neightri.i1];
// I do not say this is the best way. Only that it is a way.
nvrel_out.x = n*(v.x- v_overall.x);
nvrel_out.y = n*(v.y - v_overall.y);
v_out = v;
};
if (perneigh.per0 == NEEDS_ANTI)
{
nvrel_out = Anticlock_rotate2(nvrel_out);
v_out = Anticlock_rotate3(v_out);
};
if (perneigh.per0 == NEEDS_CLOCK)
{
nvrel_out = Clockwise_rotate2(nvrel_out);
v_out = Clockwise_rotate3(v_out);
};
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
nvrel_next = n_vrel_central[corner_index.i3-StartMajor];
vnext = v_central[corner_index.i3-StartMajor];
} else {
vnext = p_v_minor [BEGINNING_OF_CENTRAL + corner_index.i3];
f64 n = p_nT_minor[BEGINNING_OF_CENTRAL + corner_index.i3].n;
v_overall = p_v_overall_minor[BEGINNING_OF_CENTRAL + corner_index.i3];
nvrel_next.x = n*(vnext.x - v_overall.x);
nvrel_next.y = n*(vnext.y - v_overall.y);
// Need 'vnext' to avoid loading data twice.
};
// So we keep how many in memory? 3 out of 6. Then we move round.
if (perinfo.per2 == NEEDS_ANTI)
{
nvrel_next = Anticlock_rotate2(nvrel_next);
vnext = Anticlock_rotate3(vnext);
};
if (perinfo.per2 == NEEDS_CLOCK)
{
nvrel_next = Clockwise_rotate2(nvrel_next);
vnext = Clockwise_rotate3(vnext);
};
// momflow = h*(nv.dot(edgenormal))*v;
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_out + nvrel_out + nvrel_prev + nvrel_next).dot
(edgenormal))*(0.5*(v_out + v_own));
// ....................................
// Edge facing point 2:
edgenormal.x = 0.333333333333333*(pos1.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos1.x);
if ((pos2-pos1).dot(edgenormal) < 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// Churn them around:
// "next" is now "out"
nvrel_prev = nvrel_out;
v_out = vnext;
nvrel_out = nvrel_next;
// new 'next' is tri 1
if ((neightri.i2 >= StartTri) && (neightri.i2 < StartTri + threadsPerTileMinor))
{
nvrel_next = n_vrel_tri[neightri.i2-StartTri];
vnext = v_tri[neightri.i2-StartTri];
} else {
f64_vec3 v = p_v_minor [ neightri.i2];
f64 n = p_nT_minor[neightri.i2].n;
v_overall = p_v_overall_minor[neightri.i2];
// I do not say this is the best way. Only that it is a way.
nvrel_next.x = n*(v.x- v_overall.x);
nvrel_next.y = n*(v.y - v_overall.y);
vnext = v;
};
if (perneigh.per1 == NEEDS_ANTI)
{
nvrel_next = Anticlock_rotate2(nvrel_next);
vnext = Anticlock_rotate3(vnext);
};
if (perneigh.per1 == NEEDS_CLOCK)
{
nvrel_next = Clockwise_rotate2(nvrel_next);
vnext = Clockwise_rotate3(vnext);
};
// momflow = h*(nv.dot(edgenormal))*v;
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_out + nvrel_out + nvrel_prev + nvrel_next).dot
(edgenormal))*(0.5*(v_out + v_own));
// ....................................
// Edge facing tri 1:
edgenormal.x = 0.333333333333333*(pos2.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos2.x);
if ((pos1-pos0).dot(edgenormal) > 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// Churn them around:
// "next" is now "out"
nvrel_prev = nvrel_out;
v_out = vnext;
nvrel_out = nvrel_next;
// new 'next' is point 0
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
nvrel_next = n_vrel_central[corner_index.i1-StartMajor];
vnext = v_central[corner_index.i1-StartMajor];
} else {
vnext = p_v_minor [BEGINNING_OF_CENTRAL + corner_index.i1];
f64 n = p_nT_minor[BEGINNING_OF_CENTRAL + corner_index.i1].n;
v_overall = p_v_overall_minor[BEGINNING_OF_CENTRAL + corner_index.i1];
nvrel_next.x = n*(vnext.x - v_overall.x);
nvrel_next.y = n*(vnext.y - v_overall.y);
// Need 'vnext' to avoid loading data twice.
};
if (perinfo.per0 == NEEDS_ANTI)
{
nvrel_next = Anticlock_rotate2(nvrel_next);
vnext = Anticlock_rotate3(vnext);
};
if (perinfo.per0 == NEEDS_CLOCK)
{
nvrel_next = Clockwise_rotate2(nvrel_next);
vnext = Clockwise_rotate3(vnext);
};
// momflow = h*(nv.dot(edgenormal))*v;
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_out + nvrel_out + nvrel_prev + nvrel_next).dot
(edgenormal))*(0.5*(v_out + v_own));
// ....................................
// Edge facing point 0:
edgenormal.x = 0.333333333333333*(pos2.y-pos1.y);
edgenormal.y = 0.333333333333333*(pos1.x-pos2.x);
if ((pos0-pos1).dot(edgenormal) < 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// Churn them around:
// "next" is now "out"
nvrel_prev = nvrel_out;
v_out = vnext;
nvrel_out = nvrel_next;
// new 'next' is tri 2 :
if ((neightri.i3 >= StartTri) && (neightri.i3 < StartTri + threadsPerTileMinor))
{
nvrel_next = n_vrel_tri[neightri.i3-StartTri];
vnext = v_tri[neightri.i3-StartTri];
} else {
f64_vec3 v = p_v_minor [ neightri.i3];
f64 n = p_nT_minor[neightri.i3].n;
v_overall = p_v_overall_minor[neightri.i3];
// I do not say this is the best way. Only that it is a way.
nvrel_next.x = n*(v.x- v_overall.x);
nvrel_next.y = n*(v.y - v_overall.y);
vnext = v;
};
if (perneigh.per2 == NEEDS_ANTI)
{
nvrel_next = Anticlock_rotate2(nvrel_next);
vnext = Anticlock_rotate3(vnext);
};
if (perneigh.per2 == NEEDS_CLOCK)
{
nvrel_next = Clockwise_rotate2(nvrel_next);
vnext = Clockwise_rotate3(vnext);
};
// momflow = h*(nv.dot(edgenormal))*v;
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_out + nvrel_out + nvrel_prev + nvrel_next).dot
(edgenormal))*(0.5*(v_out + v_own));
// ....................................
// Edge facing tri 2:
edgenormal.x = 0.333333333333333*(pos0.y-pos1.y);
edgenormal.y = 0.333333333333333*(pos1.x-pos0.x);
if ((pos2-pos1).dot(edgenormal) > 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// Churn them around:
// "next" is now "out"
nvrel_prev = nvrel_out;
v_out = vnext;
nvrel_out = nvrel_next;
// new 'next' is point 1
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
nvrel_next = n_vrel_central[corner_index.i2-StartMajor];
vnext = v_central[corner_index.i2-StartMajor];
} else {
vnext = p_v_minor [BEGINNING_OF_CENTRAL + corner_index.i2];
f64 n = p_nT_minor[BEGINNING_OF_CENTRAL + corner_index.i2].n;
v_overall = p_v_overall_minor[BEGINNING_OF_CENTRAL + corner_index.i2];
nvrel_next.x = n*(vnext.x - v_overall.x);
nvrel_next.y = n*(vnext.y - v_overall.y);
// Need 'vnext' to avoid loading data twice.
};
if (perinfo.per1 == NEEDS_ANTI)
{
nvrel_next = Anticlock_rotate2(nvrel_next);
vnext = Anticlock_rotate3(vnext);
};
if (perinfo.per1 == NEEDS_CLOCK)
{
nvrel_next = Clockwise_rotate2(nvrel_next);
vnext = Clockwise_rotate3(vnext);
};
// momflow = h*(nv.dot(edgenormal))*v;
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_out + nvrel_out + nvrel_prev + nvrel_next).dot
(edgenormal))*(0.5*(v_out + v_own));
// ....................................
// Edge facing point 1:
edgenormal.x = 0.333333333333333*(pos2.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos2.x);
if ((pos1-pos0).dot(edgenormal) < 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// Churn them around:
// "next" is now "out"
nvrel_prev = nvrel_out;
v_out = vnext;
nvrel_out = nvrel_next;
// new 'next' is tri 0
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + threadsPerTileMinor))
{
nvrel_next = n_vrel_tri[neightri.i1-StartTri];
// vnext = v_tri[neightri.i1-StartTri];
} else {
f64_vec3 v = p_v_minor [ neightri.i1];
f64 n = p_nT_minor[neightri.i1].n;
v_overall = p_v_overall_minor[neightri.i1];
// I do not say this is the best way. Only that it is a way.
nvrel_next.x = n*(v.x - v_overall.x);
nvrel_next.y = n*(v.y - v_overall.y);
// vnext = v;
};
if (perneigh.per0 == NEEDS_ANTI)
{
nvrel_next = Anticlock_rotate2(nvrel_next);
// vnext = Anticlock_rotate2(vnext);
};
if (perneigh.per0 == NEEDS_CLOCK)
{
nvrel_next = Clockwise_rotate2(nvrel_next);
// vnext = Clockwise_rotate2(vnext);
};
// momflow = h*(nv.dot(edgenormal))*v;
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_out + nvrel_out + nvrel_prev + nvrel_next).dot
(edgenormal))*(0.5*(v_out + v_own));
// ....................................
// that's it - that was 6.
// -------------------------------------------------
Nv += n_own*v_own*area_old; // Reused n and v : CAREFUL ?
// Note that 'n' does get overwritten above.
// save off:
f64 dest_n = p_nT_minor_new[index].n;
p_v_out[index] = (Nv / (dest_n*area_new));
////if (index == 43654) {
//// printf("43654: %1.8E %1.8E %1.8E | %1.8E %1.8E | %1.8E %1.8E | %1.8E %1.8E %1.8E \n",
//// Nv.x,Nv.y,Nv.z,dest_n,area_new, n_own, area_old,v_own.x,v_own.y,v_own.z);
//// // dest_n comes out 0 --- yet when we print out from host code it is not 0.
////}
////
} else {
// Set v = 0?
};
// Now move on to centrals with the same data in memory.
// Unfortunately we can't -- unless we figured out how to overwrite the central n data with
// indextri data
// Or, do what we should have done, and make indextri[0] a contiguous fetch so no array storage is needed.
}
__global__ void Kernel_Rel_advect_v_central(
f64 const h,
structural * __restrict__ p_info,
f64_vec2 * __restrict__ p_tri_centroid,
nT * __restrict__ p_nT,
nT * __restrict__ p_nT_minor,
nT * __restrict__ p_nT_new,
f64_vec3 * __restrict__ p_v,
f64_vec2 * __restrict__ p_v_overall_minor,
long * __restrict__ p_indextri,
char * __restrict__ pPBCtri,
f64 * __restrict__ p_area_old,
f64 * __restrict__ p_area_new,
f64_vec3 * __restrict__ p_v_out
// Not making a whole lot of sense: we need nT_minor, for tris?
)
{
// Maybe we SHOULD change it to put indextri packed the other way ---> be able
// to merge this into the tris routine.
// That is the good alternative. Using scatter-not-gather with atomicAdd and doing as part of tri code is not a good way for us.
// what other way is there?
// We do want to have thread run for each central.
// Or...
// stuck with atomic add between threads even if we could arrange
// it to be not between blocks by running certain colours at once.
// Gather not scatter.
// Need indextri ... would have been far better
// to have put contiguous storage for first, second, third index.
//
// OK - stick with incredibly inelegant way for now,
// know that we should eventually change it given time, then we can merge
// the central calcs routine into this tri calcs routine.
// nvm
// Alternative for central rel v:
__shared__ f64_vec2 tri_centroid[SIZE_OF_TRI_TILE_FOR_MAJOR]; // + 2
__shared__ f64_vec3 v_tri[SIZE_OF_TRI_TILE_FOR_MAJOR]; // + 3
__shared__ f64_vec2 n_vrel_tri[SIZE_OF_TRI_TILE_FOR_MAJOR]; // + 2
//__shared__ char shared_per[SIZE_OF_TRI_TILE_FOR_MAJOR]; // tri periodic info
// Perhaps better to load in PBCtri list instead.
// I think so? Saves interrogating tris outside the tile.
__shared__ long IndexTri[threadsPerTileMajor*MAXNEIGH_d];
__shared__ char PBCtri[threadsPerTileMajor*MAXNEIGH_d];
// per thread: 2*7 + 6 + 1.5 = 21.5 < 24
// We'd bring down to 14 if we chose to do contiguous index loads per neigh;
// however that feels like it has a high chance of not working, unless we did syncthreads.
long index = blockDim.x*blockIdx.x + threadIdx.x;
v_tri[threadIdx.x] = p_v[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x];
tri_centroid[threadIdx.x] = p_tri_centroid[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x];
f64 n = p_nT_minor[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x].n;
f64_vec2 v_overall = p_v_overall_minor[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x];
f64_vec2 nvrel;
nvrel.x = n*(v_tri[threadIdx.x].x - v_overall.x);
nvrel.y = n*(v_tri[threadIdx.x].y - v_overall.y);
n_vrel_tri[threadIdx.x] = nvrel;
long const StartTri = SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x;
v_tri[threadIdx.x + blockDim.x] = p_v[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + blockDim.x + threadIdx.x];
tri_centroid[threadIdx.x + blockDim.x] = p_tri_centroid[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + blockDim.x + threadIdx.x];
n = p_nT_minor[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + blockDim.x + threadIdx.x].n;
v_overall = p_v_overall_minor[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + blockDim.x + threadIdx.x];
nvrel.x = n*(v_tri[threadIdx.x + blockDim.x].x - v_overall.x);
nvrel.y = n*(v_tri[threadIdx.x + blockDim.x].y - v_overall.y);
n_vrel_tri[threadIdx.x + blockDim.x] = nvrel;
__syncthreads();
structural info = p_info[index];
//f64_vec2 ownpos = info.pos;
if (info.flag == DOMAIN_VERTEX) {
// otherwise???
memcpy(IndexTri + threadIdx.x*MAXNEIGH_d,p_indextri + index*MAXNEIGH_d, sizeof(long)*MAXNEIGH_d);
memcpy(PBCtri + threadIdx.x*MAXNEIGH_d, pPBCtri + index*MAXNEIGH_d, sizeof(char)*MAXNEIGH_d);
// For each triangle abutting this central, we want to know things like --
// where are the corners of the edge .. this requires the neighbouring centroids also.
f64_vec2 edgenormal,ownpos,
u_prev, u_out,u_next, nvrel_prev, nvrel_out, nvrel_next; // 8 x 2
f64_vec3 Nv(0.0,0.0,0.0); // + 3
f64_vec3 v_out, v_next, v; // + 9 = 28
v = p_v[BEGINNING_OF_CENTRAL + index];
n = p_nT_minor[BEGINNING_OF_CENTRAL + index].n;
v_overall = p_v_overall_minor[BEGINNING_OF_CENTRAL + index];
// ???????????????????????????????????????????????????????
nvrel.x = n*(v.x-v_overall.x);
nvrel.y = n*(v.y-v_overall.y);
// Assume we load in u_prev:
long indextri = IndexTri[threadIdx.x*MAXNEIGH_d + info.neigh_len-1]; // bad news, neigh_len is not tri_len
// ###############################################################################
// OOPS -- it's not true at the edge of memory, is it, so what will happen there?
// ###############################################################################
if ((indextri >= StartTri) && (indextri < StartTri + SIZE_OF_TRI_TILE_FOR_MAJOR))
{
u_prev = tri_centroid[indextri-StartTri];
nvrel_prev = n_vrel_tri[indextri-StartTri];
} else {
u_prev = p_tri_centroid[indextri];
f64_vec3 v_ = p_v[indextri];
n = p_nT_minor[indextri].n;
v_overall = p_v_overall_minor[indextri];
nvrel_prev.x = n*(v_.x - v_overall.x);
nvrel_prev.y = n*(v_.y - v_overall.y);
};
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + info.neigh_len-1];
if (PBC == NEEDS_CLOCK)
{
// Always check these rotate flags throughout.
u_prev = Clockwise_rotate2(u_prev);
nvrel_prev = Clockwise_rotate2(nvrel_prev);
};
if (PBC == NEEDS_ANTI)
{
u_prev = Anticlock_rotate2(u_prev);
nvrel_prev = Anticlock_rotate2(nvrel_prev);
};
indextri = IndexTri[0];
if ((indextri >= StartTri) && (indextri < StartTri + SIZE_OF_TRI_TILE_FOR_MAJOR))
{
u_out = tri_centroid[indextri-StartTri];
v_out = v_tri[indextri-StartTri];
nvrel_out = n_vrel_tri[indextri-StartTri];
} else {
u_out = p_tri_centroid[indextri];
v_out = p_v[indextri];
n = p_nT_minor[indextri].n;
v_overall = p_v_overall_minor[indextri];
nvrel_out.x = n*(v_out.x - v_overall.x);
nvrel_out.y = n*(v_out.y - v_overall.y);
};
PBC = PBCtri[0];
if (PBC == NEEDS_CLOCK)
{
u_out = Clockwise_rotate2(u_out);
nvrel_out = Clockwise_rotate2(nvrel_out);
v_out = Clockwise_rotate3(v_out);
};
if (PBC == NEEDS_ANTI)
{
u_out = Anticlock_rotate2(u_out);
nvrel_out = Anticlock_rotate2(nvrel_out);
v_out = Anticlock_rotate3(v_out);
};
int i,inext;
for (i = 0; i < info.neigh_len; i++)
{
inext = i+1; if (inext == info.neigh_len) inext = 0;
indextri = IndexTri[threadIdx.x*MAXNEIGH_d + inext];
if ((indextri >= StartTri) && (indextri < StartTri + SIZE_OF_TRI_TILE_FOR_MAJOR))
{
u_next = tri_centroid[indextri-StartTri];
v_next = v_tri[indextri-StartTri];
nvrel_next = n_vrel_tri[indextri-StartTri];
} else {
u_next = p_tri_centroid[indextri];
v_next = p_v[indextri];
n = p_nT_minor[indextri].n;
v_overall = p_v_overall_minor[indextri];
nvrel_next.x = n*(v_next.x - v_overall.x);
nvrel_next.y = n*(v_next.y - v_overall.y);
}
PBC = PBCtri[threadIdx.x*MAXNEIGH_d + inext];
if (PBC == NEEDS_CLOCK)
{
u_next = Clockwise_rotate2(u_next);
nvrel_next = Clockwise_rotate2(nvrel_next);
v_next = Clockwise_rotate3(v_next);
};
if (PBC == NEEDS_ANTI)
{
u_next = Anticlock_rotate2(u_next);
nvrel_next = Anticlock_rotate2(nvrel_next);
v_next = Anticlock_rotate3(v_next);
};
// edgenormal:
edgenormal.x = u_prev.y-u_next.y;
edgenormal.y = u_next.x-u_prev.x;
if ((ownpos-u_prev).dot(edgenormal) > 0.0) {
// NOT SURE ABOUT THAT TEST ?
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
}
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_prev + nvrel_next + nvrel_out + nvrel_out).dot(edgenormal))
*(0.5*(v_out + v));
u_prev = u_out;
u_out = u_next;
v_out = v_next;
nvrel_prev = nvrel_out;
nvrel_out = nvrel_next;
}
// Now how does it end?
f64 area_old = p_area_old[index];
f64 area_new = p_area_new[index];
Nv += n*v*area_old; // CAREFUL: n and v ?
// Probably got overwritten somewhere.
f64 dest_n = p_nT_new[index].n;
p_v_out[index] = (Nv / (dest_n*area_new));
} else {
f64_vec3 zero(0.0,0.0,0.0);
p_v_out[index] = zero;
};
}
// Grad phi: first put on triangles from major
__global__ void Kernel_Compute_grad_phi_Te_centrals(
structural * __restrict__ p_info_sharing, // for vertex positions & get has_periodic flag
f64 * __restrict__ p_phi,
nT * __restrict__ p_nT_elec,
long * __restrict__ p_indexneigh,
// Output:
f64_vec2 * __restrict__ p_grad_phi,
f64_vec2 * __restrict__ p_grad_Te
)
{
// Bad approach? : scatter instead of gather.
// This thread works to create grad phi on tris because we otherwise,
// having to load it in from tris, also have to load in periodic flags
// regarding them.
// Easier to compute it here -- computing it multiple times for each tri
// but that probably is cheaper. Less shared mem here than when we
// load to aggregate from tris - we then need to load area, grad phi, PB flag for tri
// vs -- phi and position for major
// Then instead of doing tri minors separately, is more efficient to put in a scatter
// data here to affect tri minors: but
// That requires that we load IndexTri and do a random write access???
// Maybe we should keep the tris routine separate -- that's simplest for now.
__shared__ f64 p_phi_shared[threadsPerTileMajor];
__shared__ f64 p_Te_shared[threadsPerTileMajor];
__shared__ f64_vec2 p_vertex_pos_shared[threadsPerTileMajor];
__shared__ long indexneigh[MAXNEIGH_d*threadsPerTileMajor]; // 1 + 2 + 6 doublesworth
long index = blockDim.x*blockIdx.x + threadIdx.x;
p_phi_shared[threadIdx.x] = p_phi[blockIdx.x*blockDim.x + threadIdx.x];
structural info = p_info_sharing[blockIdx.x*blockDim.x + threadIdx.x];
p_vertex_pos_shared[threadIdx.x] = info.pos;
{
nT nTtemp = p_nT_elec[blockIdx.x*blockDim.x + threadIdx.x];
p_Te_shared[threadIdx.x] = nTtemp.T;
}
__syncthreads();
long StartMajor = blockIdx.x*blockDim.x;
long EndMajor = StartMajor + blockDim.x;
f64 phi1, phi2, Te1, Te2;
f64_vec2 pos1, pos2;
if (info.flag == DOMAIN_VERTEX) {
// Don't bother otherwise, right?
memcpy(indexneigh + threadIdx.x*MAXNEIGH_d, p_indexneigh + MAXNEIGH_d*index, sizeof(long)*MAXNEIGH_d);
f64_vec2 grad_phi_integrated(0.0,0.0);
f64_vec2 grad_Te_integrated(0.0,0.0);
f64 grad_x_integrated_x = 0.0;
short iNeigh1 = info.neigh_len-1;
short iNeigh2 = 0;
// get phi,pos at edge -- & rotate if necessary
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh1];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
phi1 = p_phi_shared[indexNeigh-StartMajor];
pos1 = p_vertex_pos_shared[indexNeigh-StartMajor];
Te1 = p_Te_shared[indexNeigh-StartMajor];
} else {
phi1 = p_phi[indexNeigh];
structural infotemp = p_info_sharing[indexNeigh];
pos1 = infotemp.pos;
nT nTtemp = p_nT_elec[indexNeigh];
Te1 = nTtemp.T;
};
if (info.has_periodic) {
if ((pos1.x > 0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Clockwise_rotate2(pos1);
};
};
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
{
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh2];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
phi2 = p_phi_shared[indexNeigh-StartMajor];
pos2 = p_vertex_pos_shared[indexNeigh-StartMajor];
Te2 = p_Te_shared[indexNeigh-StartMajor];
} else {
phi2 = p_phi[indexNeigh];
structural infotemp = p_info_sharing[indexNeigh];
pos2 = infotemp.pos;
nT nTtemp = p_nT_elec[indexNeigh];
Te2 = nTtemp.T;
};
if (info.has_periodic) {
if ((pos2.x > 0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Clockwise_rotate2(pos2);
};
};
// Now we've got contiguous pos1, pos2, and own pos.
f64_vec2 edge_normal;
// edge_normal.x = pos1.y-info.pos.y;
// edge_normal.y = info.pos.x-pos1.x;
// if (edge_normal2.dot(pos2-info.pos) > 0.0)
// {
// edge_normal.x = -edge_normal.x;
// edge_normal.y = -edge_normal.y;
// }
// grad_phi_integrated += edge_normal*0.5*(phi0+phi1);
// grad_x_integrated_x += edge_normal.x*0.5*(info.pos.x+pos1.x);
edge_normal.x = pos1.y-pos2.y;
edge_normal.y = pos2.x-pos1.x;
if (edge_normal.dot(info.pos-pos1) > 0.0) {
edge_normal.x = -edge_normal.x;
edge_normal.y = -edge_normal.y;
}
grad_phi_integrated += edge_normal*0.5*(phi1+phi2);
grad_Te_integrated += edge_normal*0.5*(Te1+Te2);
grad_x_integrated_x += edge_normal.x*0.5*(pos1.x+pos2.x);
//edge_normal.x = info.pos.y-pos2.y;
//edge_normal.y = pos2.x-info.pos.x;
//if (edge_normal.dot(pos1-pos2) > 0.0) {
// edge_normal.x = -edge_normal.x;
// edge_normal.y = -edge_normal.y;
//}
//grad_phi_integrated += edge_normal*0.5*(phi0+phi2);
//grad_x_integrated_x += edge_normal.x*0.5*(info.pos.x+pos2.x);
// We want to sum to get the average of grad phi weighted by
// area of triangle:
// Not sure I can make sense of this now...
phi1 = phi2;
pos1 = pos2;
}
p_grad_phi[index] = grad_phi_integrated/grad_x_integrated_x;
p_grad_Te[index] = grad_Te_integrated/grad_x_integrated_x;
// Note that we accumulated edge_normal*(phi0+phi1) so that it
// cancelled out between every edge being counted each way.
// Therefore we only need the outward facing edges, the rest cancel to 0.
} else {
f64_vec2 zero(0.0,0.0);
p_grad_phi[index] = zero;
p_grad_Te[index] = zero;
}
}
__global__ void Kernel_GetThermalPressureCentrals(
structural * __restrict__ p_info_sharing, // for vertex positions & get has_periodic flag
nT * __restrict__ p_nT_neut,
nT * __restrict__ p_nT_ion,
nT * __restrict__ p_nT_elec,
long * __restrict__ p_indexneigh,
// Output:
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec
)
{
__shared__ f64 p_nT_shared[threadsPerTileMajor];
__shared__ f64_vec2 p_vertex_pos_shared[threadsPerTileMajor];
__shared__ long indexneigh[MAXNEIGH_d*threadsPerTileMajor]; // 1 + 2 + 6 doublesworth
long index = blockDim.x*blockIdx.x + threadIdx.x;
nT nT_temp = p_nT_neut[blockIdx.x*blockDim.x + threadIdx.x];
p_nT_shared[threadIdx.x] = nT_temp.n*nT_temp.T;
structural info = p_info_sharing[threadIdx.x];
p_vertex_pos_shared[threadIdx.x] = info.pos;
__syncthreads();
long StartMajor = blockIdx.x*blockDim.x;
long EndMajor = StartMajor + blockDim.x; // not needed
if (info.flag == DOMAIN_VERTEX) {
// Don't bother otherwise, right?
memcpy(indexneigh + threadIdx.x*MAXNEIGH_d, p_indexneigh + MAXNEIGH_d*index, sizeof(long)*MAXNEIGH_d);
f64_vec2 grad_nT_integrated(0.0,0.0);
f64 nT1, nT2;
f64_vec2 pos1, pos2;
// f64 areasum = 0.0;
// Now let's be careful ... we want to integrate grad nT over the central cell
// Probably our best bet is what? Divide by area out to neighs where it is found,
// multiply by central area that is known.
// * * ** * * ** * * ** * * ** * * ** * * ** * * ** * * ** * * ** * * ** * * **
short iNeigh1 = info.neigh_len-1;
short iNeigh2 = 0;
// get phi,pos -- & rotate if necessary
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh1];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
nT1 = p_nT_shared[indexNeigh-StartMajor];
pos1 = p_vertex_pos_shared[indexNeigh-StartMajor];
} else {
nT nT_temp = p_nT_neut[indexNeigh];
nT1 = nT_temp.n*nT_temp.T;
structural infotemp = p_info_sharing[indexNeigh];
pos1 = infotemp.pos;
};
if (info.has_periodic) {
if ((pos1.x > 0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Clockwise_rotate2(pos1);
};
};
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
{
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh2];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
nT2 = p_nT_shared[indexNeigh-StartMajor];
pos2 = p_vertex_pos_shared[indexNeigh-StartMajor];
} else {
nT nT_temp = p_nT_neut[indexNeigh];
nT2 = nT_temp.n*nT_temp.T;
structural infotemp = p_info_sharing[indexNeigh];
pos2 = infotemp.pos;
};
if (info.has_periodic) {
if ((pos2.x > 0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Clockwise_rotate2(pos2);
};
};
// Now we've got contiguous pos1, pos2, and own pos.
// Correctly, pos2 is the anticlockwise one, therefore edge_normal.x should be
// pos2.y-pos1.y;
f64_vec2 edge_normal;
edge_normal.x = pos2.y-pos1.y;
edge_normal.y = pos1.x-pos2.x;
// Drop this:
// if (edge_normal.dot(info.pos-pos1) > 0.0) {
// edge_normal.x = -edge_normal.x;
// edge_normal.y = -edge_normal.y;
// }
grad_nT_integrated += edge_normal*0.5*(nT1+nT2);
//grad_x_integrated_x += edge_normal.x*0.5*(pos1.x+pos2.x);
nT1 = nT2;
pos1 = pos2;
}
// Now we took it integrated over the whole union of triangles, but,
// we want to diminish this to the size of the central.
// = 1/9 as much
f64_vec3 add(-grad_nT_integrated.x/(9.0*m_n),
-grad_nT_integrated.y/(9.0*m_n),
0.0);
p_MAR_neut[index] += add;
// Note that we accumulated edge_normal*(phi0+phi1) so that it
// cancelled out between every edge being counted each way.
// Therefore we only need the outward facing edges, the rest cancel to 0.
} else {
// Not domain vertex
// f64_vec2 zero(0.0,0.0);
// p_grad_phi[index] = zero;
// do nothing
}
__syncthreads();
// Now proceed, with shared positions already stored, to do ion. Correct?
nT_temp = p_nT_ion[blockIdx.x*blockDim.x + threadIdx.x];
p_nT_shared[threadIdx.x] = nT_temp.n*nT_temp.T;
__syncthreads();
if (info.flag == DOMAIN_VERTEX) {
// Don't bother otherwise, right?
f64_vec2 grad_nT_integrated(0.0,0.0);
f64 nT1, nT2;
f64_vec2 pos1, pos2;
short iNeigh1 = info.neigh_len-1;
short iNeigh2 = 0;
// get phi,pos -- & rotate if necessary
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh1];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
nT1 = p_nT_shared[indexNeigh-StartMajor];
pos1 = p_vertex_pos_shared[indexNeigh-StartMajor];
} else {
nT nT_temp = p_nT_ion[indexNeigh];
nT1 = nT_temp.n*nT_temp.T;
structural infotemp = p_info_sharing[indexNeigh];
pos1 = infotemp.pos;
};
if (info.has_periodic) {
if ((pos1.x > 0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Clockwise_rotate2(pos1);
};
};
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
{
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh2];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
nT2 = p_nT_shared[indexNeigh-StartMajor];
pos2 = p_vertex_pos_shared[indexNeigh-StartMajor];
} else {
nT nT_temp = p_nT_ion[indexNeigh];
nT2 = nT_temp.n*nT_temp.T;
structural infotemp = p_info_sharing[indexNeigh];
pos2 = infotemp.pos;
};
if (info.has_periodic) {
if ((pos2.x > 0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Clockwise_rotate2(pos2);
};
};
// Now we've got contiguous pos1, pos2, and own pos.
// Correctly, pos2 is the anticlockwise one, therefore edge_normal.x should be
// pos2.y-pos1.y;
f64_vec2 edge_normal;
edge_normal.x = pos2.y-pos1.y;
edge_normal.y = pos1.x-pos2.x;
// Drop this:
// if (edge_normal.dot(info.pos-pos1) > 0.0) {
// edge_normal.x = -edge_normal.x;
// edge_normal.y = -edge_normal.y;
// }
grad_nT_integrated += edge_normal*0.5*(nT1+nT2);
//grad_x_integrated_x += edge_normal.x*0.5*(pos1.x+pos2.x);
nT1 = nT2;
pos1 = pos2;
}
// Now we took it integrated over the whole union of triangles, but,
// we want to diminish this to the size of the central.
// = 1/9 as much
f64_vec3 add(-grad_nT_integrated.x/(9.0*m_ion),
-grad_nT_integrated.y/(9.0*m_ion),
0.0);
p_MAR_ion[index] += add;
};
__syncthreads();
// Now proceed, with shared positions already stored, to do ion. Correct?
nT_temp = p_nT_elec[blockIdx.x*blockDim.x + threadIdx.x];
p_nT_shared[threadIdx.x] = nT_temp.n*nT_temp.T;
__syncthreads();
if (info.flag == DOMAIN_VERTEX) {
// Don't bother otherwise, right?
f64_vec2 grad_nT_integrated(0.0,0.0);
f64 nT1, nT2;
f64_vec2 pos1, pos2;
short iNeigh1 = info.neigh_len-1;
short iNeigh2 = 0;
// get phi,pos -- & rotate if necessary
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh1];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
nT1 = p_nT_shared[indexNeigh-StartMajor];
pos1 = p_vertex_pos_shared[indexNeigh-StartMajor];
} else {
nT nT_temp = p_nT_elec[indexNeigh];
nT1 = nT_temp.n*nT_temp.T;
structural infotemp = p_info_sharing[indexNeigh];
pos1 = infotemp.pos;
};
if (info.has_periodic) {
if ((pos1.x > 0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Clockwise_rotate2(pos1);
};
};
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
{
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh2];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
nT2 = p_nT_shared[indexNeigh-StartMajor];
pos2 = p_vertex_pos_shared[indexNeigh-StartMajor];
} else {
nT nT_temp = p_nT_elec[indexNeigh];
nT2 = nT_temp.n*nT_temp.T;
structural infotemp = p_info_sharing[indexNeigh];
pos2 = infotemp.pos;
};
if (info.has_periodic) {
if ((pos2.x > 0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Clockwise_rotate2(pos2);
};
}
// Now we've got contiguous pos1, pos2, and own pos.
// Correctly, pos2 is the anticlockwise one, therefore edge_normal.x should be
// pos2.y-pos1.y;
f64_vec2 edge_normal;
edge_normal.x = pos2.y-pos1.y;
edge_normal.y = pos1.x-pos2.x;
grad_nT_integrated += edge_normal*0.5*(nT1+nT2);
nT1 = nT2;
pos1 = pos2;
}
// Now we took it integrated over the whole union of triangles, but,
// we want to diminish this to the size of the central.
// = 1/9 as much
f64_vec3 add(-grad_nT_integrated.x/(9.0*m_e),
-grad_nT_integrated.y/(9.0*m_e),
0.0);
p_MAR_elec[index] += add;
};
// We divided by particle mass and left in Area_central
}
__global__ void Kernel_Compute_grad_phi_Te_tris(
structural * __restrict__ p_info_sharing, // for vertex positions
f64 * __restrict__ p_phi,
nT * __restrict__ p_nT_elec,
LONG3 * __restrict__ p_tri_corner_index,
CHAR4 * __restrict__ p_tri_perinfo,
// Output:
f64_vec2 * __restrict__ p_grad_phi,
f64_vec2 * __restrict__ p_GradTe
)
{
__shared__ f64 p_phi_shared[SIZE_OF_MAJOR_PER_TRI_TILE];
__shared__ f64 p_Te_shared[SIZE_OF_MAJOR_PER_TRI_TILE];
__shared__ f64_vec2 p_vertex_pos_shared[SIZE_OF_MAJOR_PER_TRI_TILE];
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long EndMajor = StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE;
long index = threadIdx.x + blockIdx.x * blockDim.x;
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
p_phi_shared[threadIdx.x] = p_phi[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
nT nTtemp = p_nT_elec[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
p_Te_shared[threadIdx.x] = nTtemp.T;
structural info = p_info_sharing[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
p_vertex_pos_shared[threadIdx.x] = info.pos;
}
__syncthreads();
CHAR4 perinfo = p_tri_perinfo[index];
// Take grad on triangle:
// first collect corner positions; if this is periodic triangle then we have to rotate em.
if (perinfo.flag == DOMAIN_TRIANGLE) { // ?
LONG3 corner_index = p_tri_corner_index[index];
// Do we ever require those and not the neighbours?
// Yes - this time for instance.
f64_vec2 pos0, pos1, pos2;
f64 phi0,phi1,phi2, Te0, Te1, Te2;
short iNeigh;
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
pos0 = p_vertex_pos_shared[corner_index.i1-StartMajor];
phi0 = p_phi_shared[corner_index.i1-StartMajor];
Te0 = p_Te_shared[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i1];
pos0 = info.pos;
phi0 = p_phi[corner_index.i1];
nT nTtemp = p_nT_elec[corner_index.i1];
Te0 = nTtemp.T;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
pos1 = p_vertex_pos_shared[corner_index.i2-StartMajor];
phi1 = p_phi_shared[corner_index.i2-StartMajor];
Te1 = p_Te_shared[corner_index.i2-StartMajor];
} else {
structural info = p_info_sharing[corner_index.i2];
pos1 = info.pos;
phi1 = p_phi[corner_index.i2];
nT nTtemp = p_nT_elec[corner_index.i2];
Te1 = nTtemp.T;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
pos2 = p_vertex_pos_shared[corner_index.i3-StartMajor];
phi2 = p_phi_shared[corner_index.i3-StartMajor];
Te2 = p_Te_shared[corner_index.i3-StartMajor];
} else {
structural info = p_info_sharing[corner_index.i3];
pos2 = info.pos;
phi2 = p_phi[corner_index.i3];
nT nTtemp = p_nT_elec[corner_index.i3];
Te2 = nTtemp.T;
}
// if (perinfo.periodic == 0) {
// } else {
// In this case which ones are periodic?
// Should we just store per flags?
// How it should work:
// CHAR4 perinfo: periodic, per0, per1, per2;
if (perinfo.per0 == NEEDS_ANTI)
pos0 = Anticlock_rotate2(pos0);
if (perinfo.per0 == NEEDS_CLOCK)
pos0 = Clockwise_rotate2(pos0);
if (perinfo.per1 == NEEDS_ANTI)
pos1 = Anticlock_rotate2(pos1);
if (perinfo.per1 == NEEDS_CLOCK)
pos1 = Clockwise_rotate2(pos1);
if (perinfo.per2 == NEEDS_ANTI)
pos2 = Anticlock_rotate2(pos2);
if (perinfo.per2 == NEEDS_CLOCK)
pos2 = Clockwise_rotate2(pos2);
// };
// To get grad phi:
f64_vec2 grad_phi, edge_normal0, edge_normal1, edge_normal2, GradTe;
// Integral of grad... average phi on edge . edgenormal
// This should give the same result as the plane passing through
// the 3 corners -- a few simple examples suggest yes.
edge_normal0.x = pos2.y-pos1.y;
edge_normal0.y = pos1.x-pos2.x;
// Got to make sure it points out. How? Have to take
// dot product with vector to the opposing point
if (edge_normal0.dot(pos0-pos1) > 0.0) {
// points to opposing point - wrong way
edge_normal0.x = -edge_normal0.x;
edge_normal0.y = -edge_normal0.y;
}
edge_normal1.x = pos2.y-pos0.y;
edge_normal1.y = pos0.x-pos2.x;
if (edge_normal1.dot(pos1-pos0) > 0.0) {
edge_normal1.x = -edge_normal1.x;
edge_normal1.y = -edge_normal1.y;
}
edge_normal2.x = pos1.y-pos0.y;
edge_normal2.y = pos0.x-pos1.x;
if (edge_normal2.dot(pos2-pos0) > 0.0) {
edge_normal2.x = -edge_normal2.x;
edge_normal2.y = -edge_normal2.y;
};
grad_phi =
( 0.5*(phi1 + phi2)*edge_normal0 // opposite phi0
+ 0.5*(phi0 + phi2)*edge_normal1
+ 0.5*(phi1 + phi0)*edge_normal2 );
GradTe =
( 0.5*(Te1 + Te2)*edge_normal0 // opposite phi0
+ 0.5*(Te0 + Te2)*edge_normal1
+ 0.5*(Te1 + Te0)*edge_normal2 );
// Divide by area -- easier to recalculate here than to load it in.
f64 area = fabs(0.5*(
(pos1.x+pos0.x)*edge_normal2.x
+ (pos2.x+pos1.x)*edge_normal0.x
+ (pos0.x+pos2.x)*edge_normal1.x
));
grad_phi /= area;
GradTe /= area;
// Grad of phi on tri is grad for this minor within the tri:
p_grad_phi[index] = grad_phi;
p_GradTe[index] = GradTe;
} else {
f64_vec2 zero(0.0,0.0);
p_grad_phi[index] = zero;
p_GradTe[index] = zero;
}
}
__global__ void Get_Lap_phi_on_major(
f64 * __restrict__ p_phi,
structural * __restrict__ p_info_sharing,
// f64_vec2 * __restrict__ p_tri_centroid,
long * __restrict__ pIndexNeigh,
char * __restrict__ pPBCNeigh,
// output:
f64 * __restrict__ p_Lap_phi
)
{
__shared__ f64 p_phi_shared[threadsPerTileMajor];
__shared__ f64_vec2 p_vertex_pos_shared[threadsPerTileMajor];
__shared__ long Indexneigh[MAXNEIGH_d*threadsPerTileMajor];
// So, per thread: 1 + 2 + 6 doubles = 9 doubles.
__shared__ char PBCneigh[MAXNEIGH_d*threadsPerTileMajor]; // + 1.5
//__shared__ f64_vec2 tri_centroid[SIZE_OF_TRI_TILE_FOR_MAJOR]; // + 4
// This is not good: 1 + 2 + 6 + 1.5 + 4 = 14.5 --- we said max 12 for decent throughput.
// I think we can drop PBCneigh here and use info.has_periodic
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
long const index = threadIdx.x + blockIdx.x * blockDim.x;
f64 phi_clock, phi_anti, phi_out, phi;
f64_vec2 pos_clock, pos_anti, pos_out;
char PBC;
p_phi_shared[threadIdx.x] = p_phi[index];
structural info = p_info_sharing[index];
p_vertex_pos_shared[threadIdx.x] = info.pos;
// We are going to want tri centroids to know the edge of the major cell.
//tri_centroid[threadIdx.x] = p_tri_centroid[blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
//tri_centroid[threadIdx.x + blockDim.x] = p_tri_centroid[blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + blockDim.x + threadIdx.x];
__syncthreads();
f64 Lapphi = 0.0, Area = 0.0;
if ( (info.flag != DOMAIN_VERTEX) && (info.flag != OUTERMOST) ) {
return;
}
// We might like to treat the OUTERMOST case.:
//short limit = info.neigh_len;
//if (info.flag == OUTERMOST) limit++; // Ensure it's tri 4 and 0 on edge towards neigh 0.
// Yeah that's no good ... this is neighs not tris. Pay attention.
long indexneigh;
switch(info.flag)
{
case DOMAIN_VERTEX:
// Now we've got to load up what we need for the edge of the major cell.
// Did we do this anywhere else?
phi = p_phi_shared[threadIdx.x];
memcpy(Indexneigh + MAXNEIGH_d*threadIdx.x,
pIndexNeigh + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d*threadIdx.x,
pPBCNeigh + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(char));
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len-1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = p_vertex_pos_shared[indexneigh-StartMajor];
phi_clock = p_phi_shared[indexneigh-StartMajor];
} else {
info = p_info_sharing[indexneigh];
pos_clock = info.pos;
phi_clock = p_phi[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len-1];
if (PBC == NEEDS_ANTI)
pos_clock = Anticlock_rotate2(pos_clock);
if (PBC == NEEDS_CLOCK)
pos_clock = Clockwise_rotate2(pos_clock);
// What about neighs and tris? Are they in the appropriate relationship?
// How about: load vertex positions --> work out centroids --
// we need phi from vertices anyway and we need their positions anyway. So.
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = p_vertex_pos_shared[indexneigh-StartMajor];
phi_out = p_phi_shared[indexneigh-StartMajor];
} else {
info = p_info_sharing[indexneigh];
pos_out = info.pos;
phi_out = p_phi[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI)
pos_out = Anticlock_rotate2(pos_out);
if (PBC == NEEDS_CLOCK)
pos_out = Clockwise_rotate2(pos_out);
short iNeigh;
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
int inext = iNeigh+1; if (inext == info.neigh_len) inext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + inext];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = p_vertex_pos_shared[indexneigh-StartMajor];
phi_anti = p_phi_shared[indexneigh-StartMajor];
} else {
info = p_info_sharing[indexneigh];
pos_anti = info.pos;
phi_anti = p_phi[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + inext];
if (PBC == NEEDS_ANTI)
pos_anti = Anticlock_rotate2(pos_anti);
if (PBC == NEEDS_CLOCK)
pos_anti = Clockwise_rotate2(pos_anti);
// Choice of using PBC list here. Alternative is what: just working from
// 'has_periodic' flag on our own thread, and deciding from that based on x/y.
// ___________________________________________________________________________
// Now what to do with it?
// Find the edge:
f64_vec2 edgenormal;
//vec2 = THIRD*(pos_clock + info.pos + pos_out); <--- assume this would be centroid...
edgenormal.x = THIRD*(pos_anti.y-pos_clock.y);
edgenormal.y = THIRD*(pos_clock.x-pos_anti.x);
// HERE DID NOT HAVE TO USE tri_centroid AFTER ALL.
// HOWEVER MAKE SURE WE DO THE RIGHT THING IN CASE THIS ABUTS THE INSULATOR.
// In this case, tri centroid is meant to be projected to insulator!!
// But Lap phi abutting the insulator should come out as what?
// Sides contribute azimuthally, ins side contributes 0.
// ie we should not be using Inner values to get gradient when looking left + right
// at ins.
// COMMENTED FOR DEBUGGING WHY IT LAUNCH FAILURES
if (pos_out.x*pos_out.x+pos_out.y*pos_out.y < DEVICE_INSULATOR_OUTER_RADIUS*DEVICE_INSULATOR_OUTER_RADIUS)
{
// Zero contribution, looking into insulator
} else {
if (pos_anti.x*pos_anti.x+pos_anti.y*pos_anti.y < DEVICE_INSULATOR_OUTER_RADIUS*DEVICE_INSULATOR_OUTER_RADIUS)
{
// assume we just look at the phi_out? No,
// get grad phi from 3 points.
f64 shoelacedoubled = ( (pos_clock.x + info.pos.x)*(pos_clock.y-info.pos.y) // y_anti - y_clock --- pos_clock is the highest one.
+ (pos_clock.x + pos_out.x)*(pos_out.y-pos_clock.y)
+ (pos_out.x + info.pos.x)*(info.pos.y-pos_out.y));
f64_vec2 Gradphi;
Gradphi.x = ( (phi_clock + phi)*(pos_clock.y-info.pos.y) // y_anti - y_clock --- pos_clock is the highest one.
+ (phi_clock + phi_out)*(pos_out.y-pos_clock.y)
+ (phi_out + phi)*(info.pos.y-pos_out.y) )
/ shoelacedoubled;
Gradphi.y = ( (phi_clock + phi)*(info.pos.x-pos_clock.x) // y_anti - y_clock --- pos_clock is the highest one.
+ (phi_clock + phi_out)*(pos_clock.x-pos_out.x)
+ (phi_out + phi)*(pos_out.x-info.pos.x) )
/ shoelacedoubled;
Lapphi += Gradphi.dot(edgenormal);
// We did not yet modify edgenormal, note bene.
// And what then is the contribution for shoelace?
// Should be adding up
// integral of dx/dx
//edgenormal.x = THIRD*(pos_anti.y-pos_clock.y);
//edgenormal.y = THIRD*(pos_clock.x-pos_anti.x);
Area += 0.5*(pos_clock.x + pos_anti.x)*edgenormal.x;
// Of course for sides we are not doing this quite right, by not
// modifying the centroid.
} else {
if (pos_clock.x*pos_clock.x+pos_clock.y*pos_clock.y < DEVICE_INSULATOR_OUTER_RADIUS*DEVICE_INSULATOR_OUTER_RADIUS)
{
f64 shoelacedoubled = ( (pos_anti.x + info.pos.x)*(info.pos.y-pos_anti.y) // y_anti - y_clock --- pos_clock is the highest one.
+ (pos_anti.x + pos_out.x)*(pos_anti.y-pos_out.y)
+ (pos_out.x + info.pos.x)*(pos_out.y-info.pos.y));
f64_vec2 Gradphi;
Gradphi.x = ( (phi_anti + phi)*(info.pos.y-pos_anti.y) // y_anti - y_clock --- pos_clock is the highest one.
+ (phi_anti + phi_out)*(pos_anti.y-pos_out.y)
+ (phi_out + phi)*(pos_out.y-info.pos.y) )
/ shoelacedoubled;
Gradphi.y = ( (phi_anti + phi)*(pos_anti.x-info.pos.x) // y_anti - y_clock --- pos_clock is the highest one.
+ (phi_anti + phi_out)*(pos_out.x-pos_anti.x)
+ (phi_out + phi)*(info.pos.x-pos_out.x) )
/ shoelacedoubled;
Lapphi += Gradphi.dot(edgenormal);
Area += 0.5*(pos_clock.x + pos_anti.x)*edgenormal.x;
} else {
// Default case.
//shoelace = (info.pos.x - pos_out.x)*(pos_anti.y - pos_clock.y)
// + (pos_anti.x - pos_clock.x)*(pos_out.y - info.pos.y);
// same coeff to phi for grad_x integrated as on x_0 in shoelace:
// same coeff to phi_anti for grad_y as on y_anti in shoelace:
// succeed with this out:
Lapphi += ( (phi - phi_out) * ( (pos_anti.y-pos_clock.y)*edgenormal.x
+ (pos_clock.x-pos_anti.x)*edgenormal.y )
// still fails with this out too:
+ (phi_anti-phi_clock)*( (pos_out.y - info.pos.y)*edgenormal.x
+ (info.pos.x - pos_out.x)*edgenormal.y) )
// // divide by shoelace :
/ ( (info.pos.x - pos_out.x)*(pos_anti.y - pos_clock.y)
+ (pos_anti.x - pos_clock.x)*(pos_out.y - info.pos.y) );
// Think divide by zero is the reason it crashes. Nope. Still fails without division.
Area += 0.5*(pos_clock.x + pos_anti.x)*edgenormal.x;
};
};
};
// Get away with not repositioning edge_normal ends to insulator...
// Now go round:
pos_clock = pos_out;
pos_out = pos_anti;
phi_clock = phi_out;
phi_out = phi_anti;
};
break;
case OUTERMOST:
// In this case we have e.g. if there are 4 neighs 0,1,2,3, then just 0-1-2, 1-2-3
// We can happily drop the d/dtheta, it's not a big deal.
// Start with neigh 0, not neigh N-1. End with neigh N-2 for centre.
phi = p_phi_shared[threadIdx.x];
memcpy(Indexneigh + MAXNEIGH_d*threadIdx.x,
pIndexNeigh + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d*threadIdx.x,
pPBCNeigh + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(char));
long indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = p_vertex_pos_shared[indexneigh-StartMajor];
phi_clock = p_phi_shared[indexneigh-StartMajor];
} else {
info = p_info_sharing[indexneigh];
pos_clock = info.pos;
phi_clock = p_phi[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI)
pos_clock = Anticlock_rotate2(pos_clock);
if (PBC == NEEDS_CLOCK)
pos_clock = Clockwise_rotate2(pos_clock);
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = p_vertex_pos_shared[indexneigh-StartMajor];
phi_out = p_phi_shared[indexneigh-StartMajor];
} else {
info = p_info_sharing[indexneigh];
pos_out = info.pos;
phi_out = p_phi[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 1];
if (PBC == NEEDS_ANTI)
pos_out = Anticlock_rotate2(pos_out);
if (PBC == NEEDS_CLOCK)
pos_out = Clockwise_rotate2(pos_out);
#pragma unroll MAXNEIGH_d
for (iNeigh = 1; iNeigh < info.neigh_len-1; iNeigh++)
{
int inext = iNeigh+1;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + inext];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = p_vertex_pos_shared[indexneigh-StartMajor];
phi_anti = p_phi_shared[indexneigh-StartMajor];
} else {
info = p_info_sharing[indexneigh];
pos_anti = info.pos;
phi_anti = p_phi[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + inext];
if (PBC == NEEDS_ANTI)
pos_anti = Anticlock_rotate2(pos_anti);
if (PBC == NEEDS_CLOCK)
pos_anti = Clockwise_rotate2(pos_anti);
f64_vec2 edgenormal;
edgenormal.x = THIRD*(pos_anti.y-pos_clock.y);
edgenormal.y = THIRD*(pos_clock.x-pos_anti.x);
Lapphi += ( (phi - phi_out) * ( (pos_anti.y-pos_clock.y)*edgenormal.x
+ (pos_clock.x-pos_anti.x)*edgenormal.y )
+ (phi_anti-phi_clock)*( (pos_out.y - info.pos.y)*edgenormal.x
+ (info.pos.x - pos_out.x)*edgenormal.y) )
// was:
// + (pos_anti.y-pos_clock.y)*edgenormal.y ) )
// divide by shoelace :
/ ( (info.pos.x - pos_out.x)*(pos_anti.y - pos_clock.y)
+ (pos_anti.x - pos_clock.x)*(pos_out.y - info.pos.y) );
Area += 0.5*(pos_clock.x + pos_anti.x)*edgenormal.x;
// Now go round:
pos_clock = pos_out;
pos_out = pos_anti;
phi_clock = phi_out;
phi_out = phi_anti;
};
break;
};
// integral of div f = sum of [f dot edgenormal]
// ... so here we took integral of div grad f.
// Look at previous code. Need to collect area and divide by it.
p_Lap_phi[index] = Lapphi/Area;
}
__global__ void Kernel_GetThermalPressureTris(
structural * __restrict__ p_info_sharing, // for vertex positions
nT * __restrict__ p_nT_neut,
nT * __restrict__ p_nT_ion,
nT * __restrict__ p_nT_elec,
LONG3 * __restrict__ p_tri_corner_index,
CHAR4 * __restrict__ p_tri_perinfo,
// Output:
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec
)
{
// Attention: code p_MAR_neut[index] += add;
// implies that we zero those arrays before we come here.
__shared__ f64 p_nT_shared[SIZE_OF_MAJOR_PER_TRI_TILE];
__shared__ f64_vec2 p_vertex_pos_shared[SIZE_OF_MAJOR_PER_TRI_TILE];
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long EndMajor = StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE;
long index = threadIdx.x + blockIdx.x * blockDim.x;
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
nT nTtemp = p_nT_neut[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
p_nT_shared[threadIdx.x] = nTtemp.n*nTtemp.T;
structural info = p_info_sharing[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
p_vertex_pos_shared[threadIdx.x] = info.pos;
}
__syncthreads();
// Take grad on triangle:
// first collect corner positions; if this is periodic triangle then we have to rotate em.
LONG3 corner_index;
f64_vec2 edge_normal0, edge_normal1, edge_normal2;
CHAR4 perinfo = p_tri_perinfo[index];
if (perinfo.flag == DOMAIN_TRIANGLE) { // ?
corner_index = p_tri_corner_index[index];
// Do we ever require those and not the neighbours?
// Yes - this time for instance.
f64 nT0, nT1, nT2;
f64_vec2 pos0, pos1, pos2;
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
pos0 = p_vertex_pos_shared[corner_index.i1-StartMajor];
nT0 = p_nT_shared[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i1];
pos0 = info.pos;
nT nTtemp = p_nT_neut[corner_index.i1];
nT0 = nTtemp.n*nTtemp.T;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
pos1 = p_vertex_pos_shared[corner_index.i2-StartMajor];
nT1 = p_nT_shared[corner_index.i2-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i2];
pos1 = info.pos;
nT nTtemp = p_nT_neut[corner_index.i2];
nT1 = nTtemp.n*nTtemp.T;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
pos2 = p_vertex_pos_shared[corner_index.i3-StartMajor];
nT2 = p_nT_shared[corner_index.i3-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i3];
pos2 = info.pos;
nT nTtemp = p_nT_neut[corner_index.i3];
nT2 = nTtemp.n*nTtemp.T;
}
// In this case which ones are periodic?
// Should we just store per flags?
// How it should work:
// CHAR4 perinfo: periodic, per0, per1, per2;
if (perinfo.per0 == NEEDS_ANTI)
pos0 = Anticlock_rotate2(pos0);
if (perinfo.per0 == NEEDS_CLOCK)
pos0 = Clockwise_rotate2(pos0);
if (perinfo.per1 == NEEDS_ANTI)
pos1 = Anticlock_rotate2(pos1);
if (perinfo.per1 == NEEDS_CLOCK)
pos1 = Clockwise_rotate2(pos1);
if (perinfo.per2 == NEEDS_ANTI)
pos2 = Anticlock_rotate2(pos2);
if (perinfo.per2 == NEEDS_CLOCK)
pos2 = Clockwise_rotate2(pos2);
// Integral of grad... average phi on edge . edgenormal
// This should give the same result as the plane passing through
// the 3 corners -- a few simple examples suggest yes.
edge_normal0.x = pos2.y-pos1.y;
edge_normal0.y = pos1.x-pos2.x;
// Got to make sure it points out. How? Have to take
// dot product with vector to the opposing point
if (edge_normal0.dot(pos0-pos1) > 0.0) {
// points to opposing point - wrong way
edge_normal0.x = -edge_normal0.x;
edge_normal0.y = -edge_normal0.y;
}
edge_normal1.x = pos2.y-pos0.y;
edge_normal1.y = pos0.x-pos2.x;
if (edge_normal1.dot(pos1-pos0) > 0.0) {
edge_normal1.x = -edge_normal1.x;
edge_normal1.y = -edge_normal1.y;
}
edge_normal2.x = pos1.y-pos0.y;
edge_normal2.y = pos0.x-pos1.x;
if (edge_normal2.dot(pos2-pos0) > 0.0) {
edge_normal2.x = -edge_normal2.x;
edge_normal2.y = -edge_normal2.y;
};
f64_vec2 grad_nT_integrated =
( 0.5*(nT1 + nT2)*edge_normal0 // opposite phi0
+ 0.5*(nT0 + nT2)*edge_normal1
+ 0.5*(nT1 + nT0)*edge_normal2 );
// Grad of phi on tri is grad for this minor within the tri:
//p_grad_nT_neut_integrated[index] = grad_nT_integrated;
// NOTE WE DO NOW DIVIDE BY PARTICLE MASS
f64_vec3 add(-grad_nT_integrated.x/m_n,
-grad_nT_integrated.y/m_n,
0.0); // MINUS
p_MAR_neut[index] += add;
} else {
if (perinfo.flag == CROSSING_INS) {
// We don't know if it's got 1 point outside ins or 2.
// If 1 then not a lot we can do ??
// Contribute zero to MAR for now...
} else {
// leave MAR unaffected
};
}
__syncthreads();
// Now load in ion nT info:
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
nT nTtemp = p_nT_ion[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
p_nT_shared[threadIdx.x] = nTtemp.n*nTtemp.T;
}
__syncthreads();
if (perinfo.flag == DOMAIN_TRIANGLE) { // ?
f64 nT0, nT1, nT2;
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
nT0 = p_nT_shared[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
nT nTtemp = p_nT_ion[corner_index.i1];
nT0 = nTtemp.n*nTtemp.T;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
nT1 = p_nT_shared[corner_index.i2-StartMajor];
} else {
// have to load in from global memory:
nT nTtemp = p_nT_ion[corner_index.i2];
nT1 = nTtemp.n*nTtemp.T;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
nT2 = p_nT_shared[corner_index.i3-StartMajor];
} else {
// have to load in from global memory:
nT nTtemp = p_nT_ion[corner_index.i3];
nT2 = nTtemp.n*nTtemp.T;
}
// Integral of grad... average phi on edge . edgenormal
// This should give the same result as the plane passing through
// the 3 corners -- a few simple examples suggest yes.
f64_vec2 grad_nT_integrated =
( 0.5*(nT1 + nT2)*edge_normal0 // opposite phi0
+ 0.5*(nT0 + nT2)*edge_normal1
+ 0.5*(nT1 + nT0)*edge_normal2 );
// Grad of phi on tri is grad for this minor within the tri:
//p_grad_nT_ion_integrated[index] = grad_nT_integrated;
f64_vec3 add(-grad_nT_integrated.x/m_ion,
-grad_nT_integrated.y/m_ion,
0.0);
p_MAR_ion[index] += add;
} else {
f64_vec2 zero(0.0,0.0);
//p_grad_nT_ion_integrated[index] = zero;
}
__syncthreads();
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
nT nTtemp = p_nT_elec[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
p_nT_shared[threadIdx.x] = nTtemp.n*nTtemp.T;
}
__syncthreads();
if (perinfo.flag == DOMAIN_TRIANGLE) { // ?
f64 nT0, nT1, nT2;
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
nT0 = p_nT_shared[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
nT nTtemp = p_nT_elec[corner_index.i1];
nT0 = nTtemp.n*nTtemp.T;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
nT1 = p_nT_shared[corner_index.i2-StartMajor];
} else {
// have to load in from global memory:
nT nTtemp = p_nT_elec[corner_index.i2];
nT1 = nTtemp.n*nTtemp.T;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
nT2 = p_nT_shared[corner_index.i3-StartMajor];
} else {
// have to load in from global memory:
nT nTtemp = p_nT_elec[corner_index.i3];
nT2 = nTtemp.n*nTtemp.T;
}
// Integral of grad... average phi on edge . edgenormal
// This should give the same result as the plane passing through
// the 3 corners -- a few simple examples suggest yes.
f64_vec2 grad_nT_integrated =
( 0.5*(nT1 + nT2)*edge_normal0 // opposite phi0
+ 0.5*(nT0 + nT2)*edge_normal1
+ 0.5*(nT1 + nT0)*edge_normal2 );
// Grad of phi on tri is grad for this minor within the tri:
//p_grad_nT_elec_integrated[index] = grad_nT_integrated;
f64_vec3 add(-grad_nT_integrated.x/m_e,
-grad_nT_integrated.y/m_e,
0.0);
p_MAR_elec[index] += add;
} else {
}
}
__global__ void Kernel_Advance_Antiadvect_phidot(
f64 * __restrict__ p_phidot,
f64_vec2 * __restrict__ p_v_overall,
f64 h_use,
f64_vec2 * __restrict__ p_grad_phidot,
f64 * __restrict__ p_Lap_phi,
nT * __restrict__ p_nT_ion,
nT * __restrict__ p_nT_elec,
// out:
f64 * __restrict__ p_phidot_out
)
{
long index = blockDim.x*blockIdx.x + threadIdx.x;
f64_vec2 move = h_use*p_v_overall[index];
f64 Lap_phi = p_Lap_phi[index];
nT nT_ion = p_nT_ion[index];
nT nT_elec = p_nT_elec[index];
f64 phidot = p_phidot[index];
f64_vec2 grad_phidot = p_grad_phidot[index];
// What it has to do:
p_phidot_out[index] =
phidot + move.dot(grad_phidot)
+ h_use*csq*(Lap_phi + FOURPI_Q*(nT_ion.n-nT_elec.n));
// CHECK SIGNS
// We are giving the value at the moved point.
// The existence of this routine is a clear inefficiency.
// It's basically so that computing grad_phi can be separate and repeated.
// Could we combine it with Get_Lap_phi_on_major?
}
__global__ void Kernel_Advance_Antiadvect_phi
(
f64 * __restrict__ p_phi,
f64_vec2 * p_v_overall_major,
f64 h_use,
f64_vec2 * __restrict__ p_grad_phi_major,
f64 * __restrict__ p_phidot ,
f64 * __restrict__ p_phi_out
)
{
long index = blockDim.x*blockIdx.x + threadIdx.x;
f64_vec2 move = h_use*p_v_overall_major[index];
f64 phidot = p_phidot[index];
f64 phi = p_phi[index];
f64_vec2 grad_phi = p_grad_phi_major[index];
p_phi_out[index] =
phi + move.dot(grad_phi) + h_use*phidot;
}
__global__ void Kernel_Antiadvect_A_allminor
(
f64_vec3 * __restrict__ p_A,
f64_vec2 * __restrict__ p_v_overall_minor,
f64 h_use,
f64_vec2 * __restrict__ p_grad_Ax,
f64_vec2 * __restrict__ p_grad_Ay,
f64_vec2 * __restrict__ p_grad_Az,
f64_vec3 * __restrict__ p_A_out,
bool bAdd,
f64_vec3 * __restrict__ p_Adot
)
{
long index = blockDim.x*blockIdx.x + threadIdx.x;
f64_vec2 move = h_use*p_v_overall_minor[index];
f64_vec3 A_out;
f64_vec3 A_in = p_A[index];
A_out.x = A_in.x + move.dot(p_grad_Ax[index]);
A_out.y = A_in.y + move.dot(p_grad_Ay[index]);
A_out.z = A_in.z + move.dot(p_grad_Az[index]);
if (bAdd) {
f64_vec3 Adot = p_Adot[index];
A_out += h_use*Adot;
}
p_A_out[index] = A_out;
}
__global__ void Kernel_Ionisation(
f64 const h,
structural * __restrict__ p_info,
f64 * __restrict__ p_area,
nT * __restrict__ p_nT_neut_src,
nT * __restrict__ p_nT_ion_src,
nT * __restrict__ p_nT_elec_src,
nT * __restrict__ p_nT_neut_use,
nT * __restrict__ p_nT_ion_use,
nT * __restrict__ p_nT_elec_use,
nn * __restrict__ p_nn_ionise_recombine,
// Where are these used and how to avoid storing?
// There isn't a way: we have to spread this information out to minor cells.
bool b2ndpass
)
{
long index = blockIdx.x*blockDim.x + threadIdx.x;
structural info = p_info[index]; // 3 doubles?
nT nT_elec_use, nT_ion_use, nT_neut_use;
nT nT_elec_src, nT_ion_src, nT_neut_src;
//f64 n_n_plus, n_ion_plus, n_e_plus;
nn nirec;
nT_elec_src = p_nT_elec_src[index];
nT_neut_src = p_nT_neut_src[index];
nT_ion_src = p_nT_ion_src[index];
if (b2ndpass) {
nT_elec_use = p_nT_elec_use[index];
nT_ion_use = p_nT_ion_use[index];
nT_neut_use = p_nT_neut_use[index];
} else {
nT_elec_use = nT_elec_src;
nT_ion_use = nT_ion_src;
nT_neut_use = nT_neut_src;
};
if (info.flag == DOMAIN_VERTEX)
{
// . Do ionisation --> know how much ionisation change in mom
// (or v) of each species and rate of adding to T.
{
// My clever way: anticipate some change only in some of the
// T values. But use estimated T 1/2 on the 2nd pass.
f64 S, R, sqrtTeeV;
if (b2ndpass == 0) {
// Use a deliberate underestimate that takes into account some
// expected change in Te from ionisation:
// For sqrt(T) we use sqrt((T_k+T_k+1)/2).
f64 T_eV_k = nT_elec_src.T/kB;
f64 buildingblock = 1.0e-5*exp(-13.6/T_eV_k)/
(13.6*(6.0*13.6+T_eV_k));
buildingblock = buildingblock*buildingblock;
f64 temp = 0.25*h*nT_neut_src.n*TWOTHIRDS*13.6*buildingblock;
S = - temp + sqrt( temp*temp + T_eV_k*buildingblock );
// The 2nd pass, T will be a little less if ionisation
// is important, and we then go ahead and use that.
sqrtTeeV = sqrt(T_eV_k);
R = nT_elec_src.n*8.75e-27/
((T_eV_k*T_eV_k)*(T_eV_k*T_eV_k)*sqrtTeeV); // take n_i*n_e*R
// Nothing fancy for recombination rate.
// It should only be a problem in case that we are weakly ionised,
// and at least in that case the first go will be limited by the
// measure to avoid n < 0.
nirec.n_ionise = nT_neut_src.n*nT_elec_src.n*S*h;
nirec.n_recombine = nT_elec_src.n*nT_ion_src.n*R*h;
f64 netionise = nirec.n_ionise - nirec.n_recombine;
if ((nT_neut_src.n-netionise < 0.0) || (nT_ion_src.n+netionise < 0.0) || (nT_elec_src.n+netionise < 0.0))
{
// in denom goes n_ionise/n_n and n_recombine/n_lowest
if (nT_ion_src.n < nT_elec_src.n) {
f64 denom = (1.0 + h*nT_elec_src.n*S + h*nT_elec_src.n*R);
nirec.n_ionise /= denom;
nirec.n_recombine /= denom;
} else {
f64 denom = (1.0 + h*nT_elec_src.n*S + h*nT_ion_src.n*R);
nirec.n_ionise /= denom;
nirec.n_recombine /= denom;
};
netionise = nirec.n_ionise - nirec.n_recombine;
};
// n_ion_plus = nT_ion_src.n + netionise;
// n_n_plus = nT_neut_src.n - netionise;
// n_e_plus = nT_elec_src.n + netionise;
} else {
// Use Te_1/2 throughout:
f64 T_eV = nT_elec_use.T/kB; // large negative
sqrtTeeV = sqrt(T_eV);
S = 1.0e-5*sqrtTeeV*exp(-13.6/T_eV)/(13.6*(6.0*13.6+T_eV));
R = nT_elec_use.n*8.75e-27/((T_eV*T_eV)*(T_eV*T_eV)*sqrtTeeV);
nirec.n_ionise = nT_neut_use.n*nT_elec_use.n*S*h;
nirec.n_recombine = nT_elec_use.n*nT_ion_use.n*R*h;
f64 netionise = nirec.n_ionise - nirec.n_recombine;
// Am I right that they are getting wiped out -- so that makes a difference here
// drastically reducing the amt of recombination because it recognises there are less there.
if ((nT_neut_src.n-netionise < 0.0) || (nT_ion_src.n+netionise < 0.0) || (nT_elec_src.n+netionise < 0.0))
{
f64 denom;
if (nT_ion_src.n < nT_elec_src.n) {
denom = (1.0 + h*nT_elec_src.n*S + h*nT_elec_src.n*R);
} else {
denom = (1.0 + h*nT_elec_src.n*S + h*nT_ion_src.n*R);
};
nirec.n_ionise = nT_neut_src.n*nT_elec_src.n*S*h/denom;
nirec.n_recombine = nT_elec_src.n*nT_ion_src.n*R*h/denom;
netionise = nirec.n_ionise - nirec.n_recombine;
};
// n_ion_plus = nT_ion_src.n + netionise;
// n_n_plus = nT_neut_src.n - netionise;
// n_e_plus = nT_elec_src.n + netionise;
};
} // end of "do ionisation"
// We now got: n_ion_plus, n_n_plus, n_e_plus, n_ionise, n_recombine.
} else {
// (info.flag == DOMAIN_VERTEX)
// n_e_plus = nT_elec_src.n;
// n_n_plus = nT_neut_src.n;
// n_ion_plus = nT_ion_src.n;
};
// Save output...
p_nn_ionise_recombine[index] = nirec;
// nT_elec_out ? We do not need it for midpoint v routine, because
// we load n_ionise_recombine to recreate.
//p_nT_elec_out[index].n = n_e_plus;
//p_nT_ion_out[index].n = n_ion_plus;
//p_nT_neut_out[index].n = n_n_plus;
// Therefore we should probably use the midpoint routine to do this save,
// because we will be doing a save-off of T anyway.
// NOPE - midpoint routine applies to minor not major.
}
// Note: unroll can increase register pressure!
__global__ void Kernel_Midpoint_v_and_Adot (
f64 const h,
CHAR4 * __restrict__ p_tri_perinfo,
nT * __restrict__ p_nT_neut_src,
nT * __restrict__ p_nT_ion_src,
nT * __restrict__ p_nT_elec_src,
// n_k appears because it is needed as part of the midpoint step.
// Am I serious about __restrict__ ? Yes if pass 0 as use on 1st pass
// On 2nd pass you do need different n for half time?
// n basically changes with ionisation
nT * __restrict__ p_nT_neut_use, // k+1/2 on 2nd pass; on 1st pass n is adjusted by ionisation - correct?
nT * __restrict__ p_nT_ion_use, // k or k+1/2
nT * __restrict__ p_nT_elec_use, // k or k+1/2 --- for forming nu etc...
nn * __restrict__ p_nn_ionise_recombine,
// Have to load 2 additional doubles due to doing ionisation outside.
f64_vec2 * __restrict__ p_tri_centroid,
// CAN WE MAKE THIS BE EXTENDED TO APPLY FOR CENTRAL VALUES ALSO?
// For now have to include separate set of positions:
structural * __restrict__ p_info,
// We use this when we assume we are adding to v's momentum and for doing the n_k+1 part of the midpt formula
f64_vec3 * __restrict__ p_B,
f64_vec3 * __restrict__ p_v_n_src,
f64_vec3 * __restrict__ p_v_ion_src,
f64_vec3 * __restrict__ p_v_e_src,
f64 * __restrict__ p_area, // It's assumed to be area_k+1 but I guess it's area_k+1/2 ... too bad?
f64_vec2 * __restrict__ p_grad_phi_half,
f64_vec3 * __restrict__ p_Lap_A_half,
f64_vec3 * __restrict__ p_Adot_k,
f64_vec3 * __restrict__ p_MomAdditionRate_neut,
f64_vec3 * __restrict__ p_MomAdditionRate_ion,
f64_vec3 * __restrict__ p_MomAdditionRate_elec,
// OKay let's check out if aTP was even correct.
f64_vec2 * __restrict__ p_grad_Te,
f64_vec3 * __restrict__ p_v_neut_out,
f64_vec3 * __restrict__ p_v_ion_out,
f64_vec3 * __restrict__ p_v_elec_out,
f64 * __restrict__ p_resistive_heat_neut, // additions to NT
f64 * __restrict__ p_resistive_heat_ion,
f64 * __restrict__ p_resistive_heat_elec,
f64_vec3 * p_dAdt_out,
bool b2ndPass,
f64 const EzTuning,
f64 * __restrict__ p_Iz,
f64 * __restrict__ p_sigma_zz
)
{
// Still going to need to know the Ez linear relationship
// from this function:
__shared__ f64 Iz[threadsPerTileMinor];
__shared__ f64 sigma_zz[threadsPerTileMinor];
// Only dimension here what will have def been used by the time we hit the
// largest memory footprint:
f64 nu_ne_MT_over_n, nu_ni_MT_over_n, nu_eiBar, nu_ieBar, nu_eHeart; // 5 double
Vector3 omega_ce, v_e_k, v_ion_k, v_n_k, v_n_0; // 15 double
Vector3 v_ion_plus, v_n_plus, v_e_plus; // 9 doubles
CHAR4 per_info;
f64 n_e_plus, n_ion_plus, n_n_plus;
nT nT_elec_use,nT_ion_use,nT_neut_use;
nn n_ionrec;
Vector3 Lap_A_half;
long index = threadIdx.x + blockIdx.x*blockDim.x;
omega_ce = p_B[index];
omega_ce *= eovermc; // Trying to avoid 3 separate accesses [which would not even be contig!!]
// Shane Cook p.176 We do not need to put the reads where they are needed, can put first.
// It is UNCLEAR where I read that we have to put reads outside of branches.
// I do not find corroborating sources.
// index < Nverts is more or less guaranteed but it would of course be nice not to
// do fetches for info.flag != DOMAIN_VERTEX.
v_e_k = p_v_e_src[index];
v_ion_k = p_v_ion_src[index];
v_n_k = p_v_n_src[index];
f64 area = p_area[index];
f64_vec2 centroid;
if (index < BEGINNING_OF_CENTRAL)
{
centroid = p_tri_centroid[index];
} else {
centroid = p_info[index-BEGINNING_OF_CENTRAL].pos;
};
per_info = p_tri_perinfo[index];
n_ionrec = p_nn_ionise_recombine[index];
//n_n_plus = p_n_n_plus[index].n;
//n_ion_plus = p_n_ion_plus[index].n;
//n_e_plus = p_n_e_plus[index].n;
if ((OUTPUT) && (index == REPORT)) {
printf("v_k %1.5E %1.5E %1.5E\n",v_n_k.z,v_ion_k.z,v_e_k.z);
printf("Bxy %1.5E %1.5E omega.z %1.5E \n",omega_ce.x/eovermc,omega_ce.y/eovermc,omega_ce.z);
};
if ((per_info.flag == DOMAIN_TRIANGLE)) // try loading inside, outside
{
// Now the v calcs:
{
nT nT_elec_src, nT_ion_src, nT_neut_src; // register pressure?
nT_elec_src = p_nT_elec_src[index];
nT_ion_src = p_nT_ion_src[index];
nT_neut_src = p_nT_neut_src[index];
// Question whether these should be inside brace.
n_n_plus = nT_neut_src.n + n_ionrec.n_recombine-n_ionrec.n_ionise;
n_ion_plus = nT_ion_src.n + n_ionrec.n_ionise-n_ionrec.n_recombine;
n_e_plus = nT_elec_src.n + n_ionrec.n_ionise-n_ionrec.n_recombine;
if (b2ndPass) {
nT_elec_use = p_nT_elec_use[index];
nT_ion_use = p_nT_ion_use[index];
nT_neut_use = p_nT_neut_use[index];
} else {
nT_elec_use = nT_elec_src;
nT_ion_use = nT_ion_src;
nT_neut_use = nT_neut_src;
};
// Try to make do with 3 tensors: 27 doubles.
{
f64 sqrt_Te,ionneut_thermal, electron_thermal,
lnLambda, s_in_MT, s_en_MT, s_en_visc;
sqrt_Te = sqrt(nT_elec_use.T);
ionneut_thermal = sqrt(nT_ion_use.T/m_ion+nT_neut_use.T/m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te*over_sqrt_m_e;
lnLambda = Get_lnLambda_d(nT_ion_use.n,nT_elec_use.T);
s_in_MT = Estimate_Neutral_MT_Cross_section(nT_ion_use.T*one_over_kB);
s_en_MT = Estimate_Neutral_MT_Cross_section(nT_elec_use.T*one_over_kB);
s_en_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(nT_elec_use.T*one_over_kB);
// Need nu_ne etc to be defined:
nu_ne_MT_over_n = s_en_MT*electron_thermal; // have to multiply by n_e for nu_ne_MT
nu_ni_MT_over_n = s_in_MT*ionneut_thermal;
nu_eiBar = nu_eiBarconst*kB_to_3halves*nT_ion_use.n*lnLambda/(nT_elec_use.T*sqrt_Te);
nu_ieBar = nT_elec_use.n*nu_eiBar/nT_ion_use.n;
nu_eHeart = 1.87*nu_eiBar +
//nu_en_visc; // Why used visc??
nT_neut_use.n*s_en_visc*electron_thermal;
}
// Can avoid 6 doubles on 1st pass if we put these defined above and do not use nT_use from here.
f64 Beta_ni, Beta_ne;
// Get v_n (v_e, v_i):
Beta_ne = h*0.5*(m_e/(m_e+m_n))*nu_ne_MT_over_n*nT_elec_use.n; // avoid division with a #define!
Beta_ni = h*0.5*(m_ion/(m_ion+m_n))*nu_ni_MT_over_n*nT_ion_use.n;
v_n_0 =
// ionisation addition to neutral momentum:
((nT_neut_src.n - n_ionrec.n_ionise)*v_n_k
+ n_ionrec.n_recombine*(m_i_over_m_n*v_ion_k+m_e_over_m_n*v_e_k))/n_n_plus;
- Beta_ne*(v_n_k-v_e_k)
- Beta_ni*(v_n_k-v_ion_k);
{
//Vector2 grad_nT_neut = p_grad_nT_neut[index];
Vector3 MomAdditionRate = p_MomAdditionRate_neut[index];
// We can avoid a fetch if we just store the sum(diff) of these in 1 Vector3
// But be careful : how do we work out visc heating? Do that first.
// We stored [gradnTintegrated / m_s] = d/dt Nv
v_n_0 += h*( //-grad_nT_neut.x + ViscMomAdditionRate_neut.x)/(n_n_plus*m_n);
MomAdditionRate/(n_n_plus*area));
f64 over = 1.0/(1.0 + Beta_ne + Beta_ni);
v_n_0 *= over;
Beta_ni *= over;
Beta_ne *= over;
}
// Now get v_i (v_e):
f64 total =
(nu_eHeart*nu_eHeart + omega_ce.x*omega_ce.x+omega_ce.y*omega_ce.y+omega_ce.z*omega_ce.z);
Vector3 vec_e, vec_i, dAdt_k;
f64 EzShape;
{
Vector2 grad_phi, GradTe;
Vector3 MomAdditionRate; // We could use it first as this, union with dAdt_k
grad_phi = p_grad_phi_half[index];
Lap_A_half = p_Lap_A_half[index];
dAdt_k = p_Adot_k[index];
MomAdditionRate = p_MomAdditionRate_ion[index];
// TRY putting this stuff outside the branch to see what happens.
// ***************************************************************
EzShape = GetEzShape(centroid.modulus());
// Set up most of vec_e, vec_i here:
vec_i = // Ionisation affected v_i_k:
((nT_ion_src.n-n_ionrec.n_recombine)*v_ion_k + n_ionrec.n_ionise*v_n_k)/n_ion_plus
- h*0.5*moverM*omega_ce.cross(v_ion_k);
vec_i +=
h*qoverM*( //- grad_phi [[below]]
- dAdt_k/c - h*c*0.5*Lap_A_half
- h*M_PI*e*(nT_ion_src.n*v_ion_k - nT_elec_src.n*v_e_k))
// nu_ni/n * n_n = nu_in
- h*0.5*(m_n/(m_ion+m_n))*nu_ni_MT_over_n*nT_neut_use.n*(v_ion_k-v_n_k-v_n_0)
- h*0.5*moverM*nu_ieBar*(v_ion_k-v_e_k);
vec_i.x -= h*qoverM*grad_phi.x;
vec_i.y -= h*qoverM*grad_phi.y;
vec_i.z += h*qoverM*EzShape*EzTuning;
if ((OUTPUT) && (index == REPORT)) printf("vec_i %1.5E %1.5E %1.5E\n",vec_i.x,vec_i.y,vec_i.z);
// -grad_nT_ion.x + ViscMomAdditionRate_ion.x
// Vector3 ViscMomAdditionRate_ion = p_visc_mom_addition_rate_ion[index];
// We can avoid a fetch if we just store the sum(diff) of these in 1 Vector3
// But be careful : how do we work out visc heating? It has to be fetched separately anyway.
vec_i += h*((MomAdditionRate)/(n_ion_plus*area));
if ((OUTPUT) && (index == REPORT)) printf("vec_i w/press %1.5E %1.5E %1.5E\n",vec_i.x,vec_i.y,vec_i.z);
MomAdditionRate = p_MomAdditionRate_elec[index];
// We almost certainly should take v += (ViscMomAddition/n_k+1)
// The same applies to grad_nT_ion : integrate this over [t_k,t_k+1]
// and we get the addition to momentum.
GradTe = p_grad_Te[index];
// Add thermal force on ions:
f64 fac = 1.5*h*(nu_ieBar/(m_ion*nu_eHeart*total));
vec_i.x += fac* (// (Upsilon.xx)*GradTe.x + Upsilon.xy*GradTe.y
(omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart)*GradTe.x
+ (omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z)*GradTe.y);
// divide by (nu*nu+omega*omega) already in fac
vec_i.y += fac* (
(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z)*GradTe.x
+ (omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart)*GradTe.y);
vec_i.z += fac* (
(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y)*GradTe.x
+ (omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x)*GradTe.y);
// if (index == 15936) printf("vec_i %1.5E \n",vec_i.z);
// Add Upsilon part of collisional term:
fac = h*0.5*0.9*moverM*nu_eiBar*nu_ieBar/(nu_eHeart*total);
vec_i.x += fac*( (omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart)*(v_ion_k.x-v_e_k.x)
+ (omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z)*(v_ion_k.y-v_e_k.y)
+ (omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y)*(v_ion_k.z-v_e_k.z)
);
vec_i.y += fac*( (omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z)*(v_ion_k.x-v_e_k.x)
+ (omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart)*(v_ion_k.y-v_e_k.y)
+ (omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x)*(v_ion_k.z-v_e_k.z)
);
vec_i.z += fac*( (omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y)*(v_ion_k.x-v_e_k.x)
+ (omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x)*(v_ion_k.y-v_e_k.y)
+ (omega_ce.z*omega_ce.z + nu_eHeart*nu_eHeart)*(v_ion_k.z-v_e_k.z)
);
if ((OUTPUT) && (index == REPORT)) {
printf("%d vik %1.4E %1.4E %1.4E Vi %1.4E %1.4E %1.4E \n",
index,v_ion_k.x,v_ion_k.y,v_ion_k.z,vec_i.x,vec_i.y,vec_i.z);
};
vec_e = ((nT_elec_src.n-n_ionrec.n_recombine)*v_e_k + n_ionrec.n_ionise*v_n_k)/n_e_plus;
vec_e += h*0.5*omega_ce.cross(v_e_k)
- h*eoverm*(// -grad_phi // below
- dAdt_k/c - h*c*0.5*Lap_A_half
- h*M_PI*e*(nT_ion_src.n*v_ion_k - nT_elec_src.n*v_e_k))
- 0.5*h*(m_n/(m_e+m_n))*nu_ne_MT_over_n*nT_neut_use.n*(v_e_k-v_n_k-v_n_0)
- 0.5*h*nu_eiBar*(v_e_k-v_ion_k);
vec_e.x += h*eoverm*grad_phi.x ;
vec_e.y += h*eoverm*grad_phi.y;
vec_e.z += -h*eoverm*EzShape*EzTuning;
if ((OUTPUT) && (index == REPORT))
printf("vec_e %1.6E %1.6E %1.6E\n",vec_e.x,vec_e.y,vec_e.z);
//vec_e.x += h*( (-grad_nT_e.x )/(n_e_plus*m_e));
vec_e += h*(MomAdditionRate/(n_e_plus*area)); // MAR = d/dt (Neve)
// Add thermal force to electrons:
fac = -(1.5*h*nu_eiBar/(m_e*nu_eHeart*total));
vec_e.x += fac*(
(omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart)*GradTe.x
+ (omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z)*GradTe.y);
vec_e.y += fac*(
(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z)*GradTe.x
+ (omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart)*GradTe.y);
vec_e.z += fac*(
(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y)*GradTe.x
+ (omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x)*GradTe.y);
if ((OUTPUT) && (index == REPORT)) {
printf("vec_e intermed %1.6E %1.6E %1.6E\n",vec_e.x,vec_e.y,vec_e.z);
printf("h*eoverm*grad_phi %1.6E %1.6E \n",h*eoverm*grad_phi.x,h*eoverm*grad_phi.y);
printf("h*(MomAdditionRate/(n_e_plus*area)) %1.6E %1.6E \n",
h*(MomAdditionRate.x/(n_e_plus*area)),
h*(MomAdditionRate.y/(n_e_plus*area)));
printf("h*0.5*omega_ce.cross(v_e_k).z %1.10E \n",
h*0.5*(omega_ce.cross(v_e_k)).z);
printf("h*eoverm*dAdt_k/c %1.6E \n h*eoverm*h*c*0.5*Lap_A_half %1.6E\n"
" h*eoverm*h*M_PI*e*() %1.6E\n"
"0.5*h*...*(v_e_k-v_n_k-v_n_0) %1.6E\n"
"0.5*h*nu_eiBar*(v_e_k-v_ion_k) %1.6E\n",
h*eoverm*dAdt_k.z/c ,h*eoverm*h*c*0.5*Lap_A_half.z,
h*eoverm*h*M_PI*e*(nT_ion_src.n*v_ion_k.z - nT_elec_src.n*v_e_k.z),// comes out -1.033e6.
// = 8e-18*2e17*(viz-vez) = 1.6 (viz-vez) = 1.6(-vez) = 1e6.
// Where is the term that cancels its impact?
0.5*h*(m_n/(m_e+m_n))*nu_ne_MT_over_n*nT_neut_use.n*(v_e_k-v_n_k-v_n_0).z,
0.5*h*nu_eiBar*(v_e_k-v_ion_k).z);
printf("-h*eoverm*EzShape*EzTuning %1.6E\n",
-h*eoverm*EzShape*EzTuning);
printf("thermal contrib %1.6E\n",
fac*(
(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y)*GradTe.x
+ (omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x)*GradTe.y));
};
// Add Upsilon part of collisional term:
fac = 0.5*h*0.9*nu_eiBar*nu_eiBar/(nu_eHeart*total);
vec_e.x += fac*(
(omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart)*(v_e_k.x-v_ion_k.x)
+ (omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z)*(v_e_k.y-v_ion_k.y)
+ (omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y)*(v_e_k.z-v_ion_k.z));
vec_e.y += fac*(
(omega_ce.y*omega_ce.x + nu_eHeart*omega_ce.z)*(v_e_k.x-v_ion_k.x)
+ (omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart)*(v_e_k.y-v_ion_k.y)
+ (omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x)*(v_e_k.z-v_ion_k.z));
vec_e.z += fac*(
(omega_ce.z*omega_ce.x - nu_eHeart*omega_ce.y)*(v_e_k.x-v_ion_k.x)
+ (omega_ce.z*omega_ce.y + nu_eHeart*omega_ce.x)*(v_e_k.y-v_ion_k.y)
+ (omega_ce.z*omega_ce.z + nu_eHeart*nu_eHeart)*(v_e_k.z-v_ion_k.z));
if ((OUTPUT) && (index == REPORT))
{
printf("contrib_upsilon %1.6E\n",
fac*(
(omega_ce.z*omega_ce.x - nu_eHeart*omega_ce.y)*(v_e_k.x-v_ion_k.x)
+ (omega_ce.z*omega_ce.y + nu_eHeart*omega_ce.x)*(v_e_k.y-v_ion_k.y)
+ (omega_ce.z*omega_ce.z + nu_eHeart*nu_eHeart)*(v_e_k.z-v_ion_k.z)));
printf("%d vek %1.4E %1.4E %1.4E \nVe %1.4E %1.4E %1.4E \n",
index,v_e_k.x,v_e_k.y,v_e_k.z,vec_e.x,vec_e.y,vec_e.z);
};
}
Tensor3 Tens1, Tens2, Tens3;
// We are going to need Tens1, Tens2 again
// BUT have to reallocate BECAUSE ...
// we don't want them to be created prior to this.
// and we can't stand to put heating in this same scope
// which also has Tens3.
// Tens1 is going to be "G"
// Set Tens3 = Upsilon_eHeart:
//// nu = nu_eHeart, omega =
//{
// f64 total = nu_eHeart*nu_eHeart+
// omega_ce.x*omega_ce.x + omega_ce.y*omega_ce.y
// +omega_ce.x*omega_ce.z;
// Upsilon_eHeart.xx = (nu_eHeart*nu_eHeart +omega_ce.x*omega_ce.x)/total;
// Upsilon_eHeart.xy = (omega_ce.x*omega_ce.y-nu_eHeart*omega_ce.z)/total;
// Upsilon_eHeart.xz = (omega_ce.x*omega_ce.z+nu_eHeart*omega_ce.y)/total;
// Upsilon_eHeart.yx =
//}
// Upsilon is used 8 times.
// But it would keep getting wiped out.
// So it's a real problem. Storing it is another 9 doubles which is bad.
// Try to edit to at least copy-paste the code...
f64 fac = h*0.5*moverM*0.9*nu_eiBar*nu_ieBar/(nu_eHeart*total);
// We inserted 1/total into fac.
Tens1.xx = 1.0
// + no contrib from omega_ci x
+ (h*0.5*m_n/(m_ion+m_n))*nu_ni_MT_over_n*nT_neut_use.n
*(1.0-Beta_ni)
+ h*0.5*moverM*nu_ieBar
+ h*h*e*e*M_PI* n_ion_plus / m_ion
;
Tens1.yy = Tens1.xx;
Tens1.zz = Tens1.xx;
Tens1.xx -= fac*//Upsilon_eHeart.xx/total;
// division by "total = nu*nu+omega*omega" is in fac.
(nu_eHeart*nu_eHeart + omega_ce.x*omega_ce.x);
Tens1.yy -= fac*(nu_eHeart*nu_eHeart + omega_ce.y*omega_ce.y);
Tens1.zz -= fac*(nu_eHeart*nu_eHeart + omega_ce.z*omega_ce.z);
Tens1.xy = -h*0.5*moverM*omega_ce.z
- fac*(omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z);
Tens1.xz = h*0.5*moverM*omega_ce.y
- fac*(omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y);
Tens1.yx = h*0.5*moverM*omega_ce.z
- fac*(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z);
Tens1.yz = -h*0.5*moverM*omega_ce.x
- fac*(omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x);
Tens1.zx = -h*0.5*moverM*omega_ce.y
- fac*(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y);
Tens1.zy = h*0.5*moverM*omega_ce.x
- fac*(omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x);
// ... replace omega_ci = omega_ce*moverM ...
// Formula for Upsilon_eHeart comes from Krook model subsection in model document.
// We will prefer not to create omega_ci vector of course!!!
Tens1.Inverse(Tens2); // Tens2 now = G^-1
// Now create F:
fac = h*0.5*0.9*nu_eiBar*nu_eiBar/(nu_eHeart*total);
Tens3.xx = -h*0.5*(m_n/(m_e+m_n))*nu_ne_MT_over_n*nT_neut_use.n*Beta_ni
-h*0.5*nu_eiBar
- (h*h*e*e*M_PI*over_m_e) * n_ion_plus;
Tens3.yy = Tens3.xx;
Tens3.zz = Tens3.xx;
Tens3.xx += fac*(nu_eHeart*nu_eHeart + omega_ce.x*omega_ce.x);
Tens3.yy += fac*(nu_eHeart*nu_eHeart + omega_ce.y*omega_ce.y);
Tens3.zz += fac*(nu_eHeart*nu_eHeart + omega_ce.z*omega_ce.z);
Tens3.xy = fac*(omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z);
Tens3.xz = fac*(omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y);
Tens3.yx = fac*(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z);
Tens3.yz = fac*(omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x);
Tens3.zx = fac*(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y);
Tens3.zy = fac*(omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x);
Tens1 = Tens3*Tens2;
// Contents now: { F G^-1, G^-1, F }
// Now create the vector for v_e:
// vec_e = d' - F G^-1 d
vec_e -= Tens1*vec_i;
if ((OUTPUT) && (index == REPORT))
printf("modified vec_e \n %1.6E %1.6E %1.6E \n",
vec_e.x,vec_e.y,vec_e.z);
// Let's watch out:
// this means if we change EzExt then we change vec_e.z and vec_i.z
// directly, but we also change vec_e via vec_i.
// We need to store that from this point because we are about to wipe out Tens1.
Vector3 vec_e_effect_of_EzTuning;
vec_e_effect_of_EzTuning.x = -Tens1.xz*(h*qoverM*EzShape);
vec_e_effect_of_EzTuning.y = -Tens1.yz*(h*qoverM*EzShape);
vec_e_effect_of_EzTuning.z = -h*eoverm*EzShape-Tens1.zz*(h*qoverM*EzShape);
// Contents now: { F G^-1, G^-1, F }
// Populate Tens3 as U. Multiply to get Tens2 = FG^-1 U
Tens3.xx = -0.5*h*(m_n/(m_ion+m_n))*nu_ni_MT_over_n*nT_neut_use.n*Beta_ne
- 0.5*h*moverM*nu_ieBar
- h*h*e*qoverM* M_PI * n_e_plus;
Tens3.yy = Tens3.xx;
Tens3.zz = Tens3.xx;
fac = 0.5*h*moverM*0.9*nu_eiBar*nu_ieBar/(nu_eHeart*total);
Tens3.xx += fac*(omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart);
Tens3.yy += fac*(omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart);
Tens3.zz += fac*(omega_ce.z*omega_ce.z + nu_eHeart*nu_eHeart);
Tens3.xy = fac*(omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z);
Tens3.xz = fac*(omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y);
Tens3.yx = fac*(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z);
Tens3.yz = fac*(omega_ce.z*omega_ce.y - nu_eHeart*omega_ce.x);
Tens3.zx = fac*(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y);
Tens3.zy = fac*(omega_ce.z*omega_ce.y + nu_eHeart*omega_ce.x);
// We really could do with storing Upsilon somehow.
Tens2 = Tens1*Tens3;
// Tens1 = V - F G^-1 U
// V:
Tens1.xx = 1.0 + h*0.5*(m_n/(m_e+m_n))*nu_ne_MT_over_n*nT_neut_use.n
*(1.0-Beta_ne)
+ h*0.5*nu_eiBar + h*h*e*eoverm* M_PI* n_e_plus;
Tens1.yy = Tens1.xx;
Tens1.zz = Tens1.xx;
fac = -0.5*h*0.9*nu_eiBar*nu_eiBar/(nu_eHeart*total);
Tens1.xx += fac*(omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart);
Tens1.yy += fac*(omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart);
Tens1.zz += fac*(omega_ce.z*omega_ce.z + nu_eHeart*nu_eHeart);
Tens1.xy = h*0.5*omega_ce.z
+ fac*(omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z);
Tens1.xz = -h*0.5*omega_ce.y
+ fac*(omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y);
Tens1.yx = -h*0.5*omega_ce.z
+ fac*(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z);
Tens1.yz = h*0.5*omega_ce.x
+ fac*(omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x);
Tens1.zx = h*0.5*omega_ce.y
+ fac*(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y);
Tens1.zy = -h*0.5*omega_ce.x
+ fac*(omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x);
if ((OUTPUT) && (index == REPORT))
printf( "nu_eiBar %1.5E n_e_plus %1.5E nu_en_MT %1.5E\n"
"V \n %1.6E %1.6E %1.6E \n%1.6E %1.6E %1.6E \n%1.6E %1.6E %1.6E \n\n",
nu_eiBar,n_e_plus,nu_ne_MT_over_n*nT_neut_use.n,
Tens1.xx,Tens1.xy,Tens1.xz,
Tens1.yx,Tens1.yy,Tens1.yz,
Tens1.zx,Tens1.zy,Tens1.zz);
Tens1 -= Tens2;
if ((OUTPUT) && (index == REPORT))
printf("V-FG^-1U \n %1.6E %1.6E %1.6E \n%1.6E %1.6E %1.6E \n%1.6E %1.6E %1.6E \n\n",
Tens1.xx,Tens1.xy,Tens1.xz,
Tens1.yx,Tens1.yy,Tens1.yz,
Tens1.zx,Tens1.zy,Tens1.zz);
// Now calculate v_e:
// Two cases: on 1st pass we should
// -- insert the
Tens1.Inverse(Tens2);
v_e_plus = Tens2*vec_e;
// DEBUG:
// f64_vec3 vec_e0 = vec_e;
// vec_e0.z += h*eoverm*EzShape*EzTuning;
// f64_vec3 v_e_0 = Tens2*vec_e0;
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if ((OUTPUT) && (index == REPORT)) {
printf("(V-FG^-1U)^-1 \n %1.6E %1.6E %1.6E ) %1.6E = %1.6E\n%1.6E %1.6E %1.6E ) %1.6E = %1.6E \n%1.6E %1.6E %1.6E ) %1.6E = %1.6E \n\n",
Tens2.xx,Tens2.xy,Tens2.xz,vec_e.x,v_e_plus.x,
Tens2.yx,Tens2.yy,Tens2.yz,vec_e.y,v_e_plus.y,
Tens2.zx,Tens2.zy,Tens2.zz,vec_e.z,v_e_plus.z);
printf("\n");
// Test relationship:
printf("(1-hh) %1.4E (1-..)/(1+..) %1.4E\n",
(1.0-h*h*e*eoverm*M_PI*nT_elec_src.n),(1.0-h*h*e*eoverm*M_PI*nT_elec_src.n)/(1.0+h*h*e*eoverm*M_PI*n_e_plus));
printf("vekz %1.4E rat*vekz %1.4E vez %1.4E \n************\n",
v_e_k.z,v_e_k.z*(1.0-h*h*e*eoverm*M_PI*nT_elec_src.n)/(1.0+h*h*e*eoverm*M_PI*n_e_plus),v_e_plus.z);
};
// Effect of EzTuning:
// We have to record a couple of extra values here:
{
{
Vector3 ve_plus_of_EzTuning = Tens2*vec_e_effect_of_EzTuning;
real viz_plus_of_EzTuning;
{
Vector3 temp = Tens3*ve_plus_of_EzTuning;
viz_plus_of_EzTuning = Tens2.zz*h*qoverM*EzShape
- Tens2.zx*temp.x
- Tens2.zy*temp.y
- Tens2.zz*temp.z;// Where are we getting area?
}
sigma_zz[threadIdx.x] = q*area*(viz_plus_of_EzTuning*n_ion_plus - ve_plus_of_EzTuning.z*n_e_plus);
}
// Some changes resulted in lower stack frame, higher loads+stores.
// We should realise that way outside L1, this is a worsening. NVM.
// Can we preserve U = Tens3?
// Now recreate G which we overwrote:
Tens1.xx = 1.0
// + no contrib from omega_ci x
+ (h*0.5*m_n/(m_ion+m_n))*nu_ni_MT_over_n*nT_neut_use.n
*(1.0-Beta_ni)
+ h*0.5*moverM*nu_ieBar
+ h*h*e*e* M_PI* n_ion_plus / m_ion
;
Tens1.yy = Tens1.xx;
Tens1.zz = Tens1.xx;
Tens1.xx -= fac*//Upsilon_eHeart.xx/total;
// division by "total = nu*nu+omega*omega" is in fac.
(nu_eHeart*nu_eHeart + omega_ce.x*omega_ce.x);
Tens1.yy -= fac*(nu_eHeart*nu_eHeart + omega_ce.y*omega_ce.y);
Tens1.zz -= fac*(nu_eHeart*nu_eHeart + omega_ce.z*omega_ce.z);
Tens1.xy = -h*0.5*moverM*omega_ce.z
- fac*(omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z);
Tens1.xz = h*0.5*moverM*omega_ce.y
- fac*(omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y);
Tens1.yx = h*0.5*moverM*omega_ce.z
- fac*(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z);
Tens1.yz = -h*0.5*moverM*omega_ce.x
- fac*(omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x);
Tens1.zx = -h*0.5*moverM*omega_ce.y
- fac*(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y);
Tens1.zy = h*0.5*moverM*omega_ce.x
- fac*(omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x);
// ... replace omega_ci = omega_ce*moverM ...
// Formula for Upsilon_eHeart comes from Krook model subsection in model document.
// We will prefer not to create omega_ci vector of course!!!
Tens1.Inverse(Tens2); // Tens2 now = G^-1
v_ion_plus = Tens2*(vec_i - Tens3*v_e_plus);
Iz[threadIdx.x] = q*area*(v_ion_plus.z*n_ion_plus - v_e_plus.z*n_e_plus);
if //((OUTPUT) && (index == REPORT)) {
(//(Iz[threadIdx.x] > 1.0e6) ||
(Iz[threadIdx.x] != Iz[threadIdx.x]) && (index < BEGINNING_OF_CENTRAL)){
printf("%d Iz %1.5E sig %1.4E ne %1.4E vez %1.4E\n",
index,
//q*area*(v_ion_plus.z*n_ion_plus - v_e_0.z*n_e_plus),
Iz[threadIdx.x],
sigma_zz[threadIdx.x],
n_e_plus, v_e_plus.z);
};
} // ve_plus_of_EzTuning goes out of scope
v_n_plus = v_n_0 + Beta_ne*v_e_plus + Beta_ni*v_ion_plus;
// v_e = (V-F G^-1 U) ^-1 ( vec_e_0 )
// + EzTuning (V-F G^-1 U) ^-1 ( vec_e_1 )
// v_i = G^-1 (d - U ve)
// Now:
if (b2ndPass) {
p_dAdt_out[index] = dAdt_k + h*c*c*(Lap_A_half + TWO_PI_OVER_C*
//(J_k+J_k+1)
q*(nT_ion_src.n*v_ion_k-nT_elec_src.n*v_e_k +
n_ion_plus*v_ion_plus-n_e_plus*v_e_plus)
);
// The Jk comes from what was implied earlier: our n_plus as figured here.
// . v_e_plus needs here to be the estimate from our "best guess" Ez_ext.
// . Both J's, k and k+1, need to correspond to the evolution of rho.
};
// Lap_A_half is the only variable that is only in scope in this bracket.
// We really should try putting writes outside braces.
}
if (b2ndPass == 0) {
// WE NO LONGER WANT TO DO THIS: No save-off of n,T on minor cells.
// nT_neut_use.n = (nT_neut_src.n+nT_neut_use.n)*0.5;
// nT_neut_use.T = (nT_neut_src.T+nT_neut_use.T)*0.5;
// nT_ion_use.n = (nT_ion_src.n+nT_ion_use.n)*0.5;
// nT_ion_use.T = (nT_ion_src.T+nT_ion_use.T)*0.5;
// nT_elec_use.n = (nT_elec_src.n+nT_elec_use.n)*0.5;
// nT_elec_use.T = (nT_elec_src.T+nT_elec_use.T)*0.5;
v_n_plus = 0.5*(v_n_plus+v_n_k);
v_ion_plus = 0.5*(v_ion_plus+v_ion_k);
v_e_plus = 0.5*(v_e_plus+v_e_k);
// Tween back to output half-time system
}
// p_nT_neut_out[index] = nT_neut_use;
// p_nT_ion_out[index] = nT_ion_use;
// p_nT_elec_out[index] = nT_elec_use;
// Save them off in the heating routine that takes place on majors, not here.
p_v_neut_out[index] = v_n_plus;
p_v_ion_out[index] = v_ion_plus;
p_v_elec_out[index] = v_e_plus;
// On 1st pass we use this v to calculate viscosity.
// Time to sort out heating contribution:
f64 NnTn_addition, NiTi_addition, NeTe_addition;
// Inelastic friction heating:
NiTi_addition = area* THIRD*m_ion*n_ionrec.n_ionise*((v_ion_k-v_n_k).dot(v_ion_k-v_n_k));
NnTn_addition = area* THIRD*m_ion*n_ionrec.n_recombine*((v_ion_k-v_n_k).dot(v_ion_k-v_n_k));
NeTe_addition = area* THIRD*m_e*(n_ionrec.n_ionise + n_ionrec.n_recombine)*((v_e_k-v_n_k).dot(v_e_k-v_n_k));
{
f64 total =
(nu_eHeart*nu_eHeart + omega_ce.x*omega_ce.x+omega_ce.y*omega_ce.y+omega_ce.z*omega_ce.z);
Tensor3 Tens1;
Tens1.xx = h*nu_eiBar ;
Tens1.yy = Tens1.xx;
Tens1.zz = Tens1.xx;
f64 fac = -h*0.9*nu_eiBar*nu_eiBar/(nu_eHeart*total);
Tens1.xx += fac*(omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart);
Tens1.yy += fac*(omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart);
Tens1.zz += fac*(omega_ce.z*omega_ce.z + nu_eHeart*nu_eHeart);
Tens1.xy = fac*(omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z);
Tens1.xz = fac*(omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y);
Tens1.yx = fac*(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z);
Tens1.yz = fac*(omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x);
Tens1.zx = fac*(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y);
Tens1.zy = fac*(omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x);
// This was e-i resistive heating:
NeTe_addition +=
area* SIXTH*n_e_plus*TWOTHIRDS*m_e*(
// rate of change of ve. dot(ve-vi), integrated:
(Tens1*(v_e_k-v_ion_k)).dot(v_e_k-v_ion_k)
+
(Tens1*(v_e_k-v_ion_k+v_e_plus-v_ion_plus)).dot
(v_e_k-v_ion_k+v_e_plus-v_ion_plus) // 0.25 cancels with 4
+ (Tens1*(v_e_plus-v_ion_plus)).dot(v_e_plus-v_ion_plus)
);
}
{
// Inelastic frictional heating:
// Maybe this is actually FRICTIONAL heating e-n, i-n ;
// I think that's what we're actually looking at here.
f64 M_in = m_n*m_ion/((m_n+m_ion)*(m_n+m_ion));
// f64 M_en = m_n*m_e/((m_n+m_e)*(m_n+m_e));
// f64 M_ie = m_ion*m_e/((m_ion+m_e)*(m_ion+m_e));
NeTe_addition += area * SIXTH*n_e_plus*TWOTHIRDS*m_e*(
h*(m_n/(m_e+m_n))*nu_ne_MT_over_n*nT_neut_use.n*(
(v_e_k-v_n_k).dot(v_e_k-v_n_k)
+ (v_e_k-v_n_k + v_e_plus - v_n_plus).dot(v_e_k-v_n_k + v_e_plus - v_n_plus)
+ (v_e_plus-v_n_plus).dot(v_e_plus-v_n_plus)
));
f64 v_ni_diff_sq = SIXTH*((v_n_k-v_ion_k).dot(v_n_k-v_ion_k)
+ (v_n_k-v_ion_k+v_n_plus-v_ion_plus).dot(v_n_k-v_ion_k+v_n_plus-v_ion_plus)
+ (v_n_plus-v_ion_plus).dot(v_n_plus-v_ion_plus));
NiTi_addition += area * n_ion_plus*TWOTHIRDS*m_n*
h*M_in*nu_ni_MT_over_n*nT_neut_use.n*v_ni_diff_sq;
NnTn_addition += area * n_n_plus*TWOTHIRDS*m_ion*
h*M_in*nu_ni_MT_over_n*nT_ion_use.n*v_ni_diff_sq;
// We can deduce T_k+1 afterwards from n_k+1 T_k+1.
// OR, we can rearrange conservative equations to be for T_k+1.
}
// NOTE HERE WE PUT " = "
// Rather than, adding -- which we might want to do if we put visc+cond+thermoelectric
// into same slots.
// This is the addition to NT.
p_resistive_heat_neut[index] = NnTn_addition;
p_resistive_heat_ion[index] = NiTi_addition;
p_resistive_heat_elec[index] = NeTe_addition;
} else { // (info.flag == DOMAIN_VERTEX) ...
p_resistive_heat_neut[index] = 0.0;
p_resistive_heat_ion[index] = 0.0;
p_resistive_heat_elec[index] = 0.0; // Or save some writes by doing hipMemset beforehand.
if (per_info.flag == OUTERMOST) {
// p_nT_neut_out[index] = nT_neut_src;
// p_nT_ion_out[index] = nT_ion_src;
// p_nT_elec_out[index] = nT_elec_src;
p_v_neut_out[index] = v_n_k;
p_v_ion_out[index] = v_ion_k;
p_v_elec_out[index] = v_e_k;
// Populate with something to avoid mishaps.
}
Vector3 dAdt_k,four_pi_over_c_J;
// Lap_A_half = p_Lap_A_half[index];
dAdt_k = p_Adot_k[index];
// ReverseJ calc:
four_pi_over_c_J.x = 0.0;
four_pi_over_c_J.y = 0.0;
four_pi_over_c_J.z = 0.0;
if ((index >= ReverseJzIndexStart) && (index < ReverseJzIndexEnd))
{
four_pi_over_c_J.z = four_pi_over_c_ReverseJz;
}
Vector3 Adot_plus = dAdt_k + h*c*c*(Lap_A_half + four_pi_over_c_J);
// if ((OUTPUT) && (index == REPORT))
// printf("Adot %1.5E Lap_A_half %1.5E 4pi/cJ %1.5E Adot+ %1.5E\n",
// dAdt_k.z, Lap_A_half.z, four_pi_over_c_J.z, Adot_plus.z);
p_dAdt_out[index] = Adot_plus;
sigma_zz[threadIdx.x] = 0.0;
Iz[threadIdx.x] = 0.0;
};
//} else { // index < Nverts
// sigma_zz[threadIdx.x] = 0.0;
// Iz[threadIdx.x] = 0.0;
//};
// Aggregate:
__syncthreads();
int s = blockDim.x;
int k = s/2;
while (s != 1) {
if (threadIdx.x < k)
{
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + k];
Iz[threadIdx.x] += Iz[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k-1)){
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x+s-1];
Iz[threadIdx.x] += Iz[threadIdx.x+s-1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s/2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sigma_zz[blockIdx.x] = sigma_zz[0];
p_Iz[blockIdx.x] = Iz[0];
}
}
__global__ void Kernel_Heating_routine(
f64 const h,
structural * __restrict__ p_info,
long * __restrict__ p_IndexTri,
nT * __restrict__ p_nT_neut_src,
nT * __restrict__ p_nT_ion_src,
nT * __restrict__ p_nT_elec_src,
nn * __restrict__ p_nn_ionrec,
// If we want "use" then it comes in as the output variable.
f64_vec3 * __restrict__ p_B_major,
// f64 * __restrict__ p_visccond_heatrate_neut,
// f64 * __restrict__ p_visccond_heatrate_ion,
// f64 * __restrict__ p_visccond_heatrate_elec,
// We could get rid and use the central slots from the resistive heating.
// Defined on minor:
f64 * __restrict__ p_resistive_heat_neut,
f64 * __restrict__ p_resistive_heat_ion,
f64 * __restrict__ p_resistive_heat_elec, // to include inelastic frictional effects.
// What about ion-neutral frictional heating? Where was that included??
f64 * __restrict__ p_area_cell,
nT * __restrict__ p_nT_neut_out,
nT * __restrict__ p_nT_ion_out,
nT * __restrict__ p_nT_elec_out,
bool b2ndPass // on '2ndpass', load nT_neut_use.
)
{
// Temperature advance:
__shared__ f64 resistive_neut[SIZE_OF_TRI_TILE_FOR_MAJOR];
__shared__ f64 resistive_ion[SIZE_OF_TRI_TILE_FOR_MAJOR];
__shared__ f64 resistive_elec[SIZE_OF_TRI_TILE_FOR_MAJOR]; // 6 doubles equiv
__shared__ long indextri[MAXNEIGH_d*threadsPerTileMajor]; // 6 doubles equiv
resistive_neut[threadIdx.x]
= p_resistive_heat_neut[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x];
resistive_neut[threadIdx.x + threadsPerTileMajor]
= p_resistive_heat_neut[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadsPerTileMajor + threadIdx.x];
resistive_ion[threadIdx.x]
= p_resistive_heat_ion[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x];
resistive_ion[threadIdx.x + threadsPerTileMajor]
= p_resistive_heat_ion[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadsPerTileMajor + threadIdx.x];
resistive_elec[threadIdx.x]
= p_resistive_heat_elec[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x];
resistive_elec[threadIdx.x + threadsPerTileMajor]
= p_resistive_heat_elec[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadsPerTileMajor + threadIdx.x];
__syncthreads();
f64 niTi, nnTn, neTe;
nT nT_ion_src, nT_elec_src, nT_neut_src,
nT_neut_use, nT_ion_use, nT_elec_use;
f64 n_e_plus, n_ion_plus, n_n_plus, area;
long index = blockIdx.x*blockDim.x + threadIdx.x;
long StartTri = SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x;
memcpy(indextri + MAXNEIGH_d*threadIdx.x,
p_IndexTri + index*MAXNEIGH_d, sizeof(long)*MAXNEIGH_d);
// Do we also want to gather v ? No, we can use from centrals.
// Remember to collect resistive heat from centrals as well.
nn n_ionrec = p_nn_ionrec[index];
structural info = p_info[index];
nT_neut_src = p_nT_neut_src[index];
nT_ion_src = p_nT_ion_src[index];
nT_elec_src = p_nT_elec_src[index];
area = p_area_cell[index];
n_n_plus = nT_neut_src.n + n_ionrec.n_recombine-n_ionrec.n_ionise;
n_ion_plus = nT_ion_src.n + n_ionrec.n_ionise-n_ionrec.n_recombine;
n_e_plus = nT_elec_src.n + n_ionrec.n_ionise-n_ionrec.n_recombine;
if (b2ndPass) {
nT_neut_use = p_nT_neut_out[index];
nT_ion_use = p_nT_ion_out[index];
nT_elec_use = p_nT_elec_out[index];
} else {
nT_neut_use = nT_neut_src;
nT_ion_use = nT_ion_src;
nT_elec_use = nT_elec_src;
}
niTi = (nT_ion_src.n-n_ionrec.n_recombine)*nT_ion_src.T
+ 0.5*n_ionrec.n_ionise*nT_neut_src.T;
nnTn = (nT_neut_src.n-n_ionrec.n_ionise)*nT_neut_src.T
+ n_ionrec.n_recombine*(nT_elec_src.T+nT_ion_src.T )
+ n_ionrec.n_recombine*TWOTHIRDS*13.6*kB;
neTe = (nT_elec_src.n-n_ionrec.n_recombine)*nT_elec_src.T
+ 0.5*n_ionrec.n_ionise*nT_neut_src.T
- n_ionrec.n_ionise*TWOTHIRDS*13.6*kB;
if ((OUTPUT) && (index == REPORT)){
printf(
"Tsrc %1.5E %1.5E %1.5E \n"
"nT ionise %1.5E %1.5E %1.5E \n",
nT_neut_src.T,nT_ion_src.T,nT_elec_src.T,
nnTn,niTi,neTe);
};
// This will serve as part of the right hand side for including heat transfers.
// Visc+cond heat addition:
// ------------------------
// DKE = 1/2 m n v.v
// We should associate a heating amount with each wall that will be positive.
// ( That works out nicely for offset! )
// That means we need to do a fetch. We can't work out visc htg without knowing
// neighbour v, which means we might as well store it - correct?
// If we are adding to v then we are increasing or decreasing DKE here -- but
// then we want net + heating appearing in this and the neighbour.
// So that leaves us having to do a fetch always.
// & Include heat conduction heat addition in same step.
// ------------------------
{
// CAREFUL ABOUT WHETHER THESE WERE CREATED DIVIDING BY AREA.
// Either we will reinstate these here, or,
// we will proceed by putting the necessary heat into
// what is now the "resistive" variable, major/central part.
// nnTn += h*p_visccond_heatrate_neut[index];
// niTi += h*p_visccond_heatrate_ion[index];
// neTe += h*p_visccond_heatrate_elec[index];
}
// Now drag in the resistive heat rates INCLUDING its own central.
{
f64 neut_resistive, ion_resistive, elec_resistive;
long iTri;
for (iTri = 0; iTri < info.neigh_len; iTri++)
{
// CAREFUL of cases where we are at the edge.
long index_tri = indextri[threadIdx.x];
if ((index_tri >= StartTri) && (index_tri < StartTri + SIZE_OF_TRI_TILE_FOR_MAJOR))
{
neut_resistive += resistive_neut[index_tri-StartTri];
ion_resistive += resistive_ion[index_tri-StartTri];
elec_resistive += resistive_elec[index_tri-StartTri];
} else {
neut_resistive += p_resistive_heat_neut[index_tri];
ion_resistive += p_resistive_heat_ion[index_tri];
elec_resistive += p_resistive_heat_elec[index_tri];
};
}
neut_resistive *= THIRD;
ion_resistive *= THIRD;
elec_resistive *= THIRD;
// Try __syncthreads here...
// Add the values for central cell:
neut_resistive += p_resistive_heat_neut[BEGINNING_OF_CENTRAL + index];
ion_resistive += p_resistive_heat_ion[BEGINNING_OF_CENTRAL + index];
elec_resistive += p_resistive_heat_elec[BEGINNING_OF_CENTRAL + index];
nnTn += neut_resistive/area;
niTi += ion_resistive/area;
neTe += elec_resistive/area; // These were the additions to NT
}
// So we have now to collect things like:
// nu_eHeart, nu_eiBar :
f64 nu_ne_MT_over_n, nu_ni_MT_over_n, nu_eiBar, nu_ieBar, nu_eHeart; // 5 double
Vector3 omega_ce = eovermc*p_B_major[index];
{
f64 sqrt_Te = sqrt(nT_elec_use.T);
f64 s_en_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(nT_elec_use.T*one_over_kB);
f64 electron_thermal = sqrt_Te*over_sqrt_m_e;
f64 ionneut_thermal = sqrt(nT_ion_use.T/m_ion+nT_neut_use.T/m_n); // hopefully not sqrt(0)
f64 lnLambda = Get_lnLambda_d(nT_ion_use.n,nT_elec_use.T);
f64 s_in_MT = Estimate_Neutral_MT_Cross_section(nT_ion_use.T*one_over_kB);
f64 s_en_MT = Estimate_Neutral_MT_Cross_section(nT_elec_use.T*one_over_kB);
nu_ne_MT_over_n = s_en_MT*electron_thermal; // have to multiply by n_e for nu_ne_MT
nu_ni_MT_over_n = s_in_MT*ionneut_thermal;
nu_eiBar = nu_eiBarconst*kB_to_3halves*nT_ion_use.n*lnLambda/(nT_elec_use.T*sqrt_Te);
nu_ieBar = nT_elec_use.n*nu_eiBar/nT_ion_use.n;
nu_eHeart = 1.87*nu_eiBar +
nT_neut_use.n*s_en_visc*electron_thermal;
}
// From here on doing the inter-species heat exchange:
Tensor3 Tens1;
{
f64 M_in = m_n*m_ion/((m_n+m_ion)*(m_n+m_ion));
f64 M_en = m_n*m_e/((m_n+m_e)*(m_n+m_e));
f64 M_ie = m_ion*m_e/((m_ion+m_e)*(m_ion+m_e));
// See section 10.3.1, June 2016 doc.
// Seems good idea to do this in heat, or manipulate equivalently.
// d/dt(NT) = U NT
// Add to the RH vector, h*0.5*U*NT_k:
Tens1.xx = -2.0*(M_in*nu_ni_MT_over_n*nT_ion_use.n + M_en*nu_ne_MT_over_n*nT_elec_use.n);
Tens1.xy = 2.0*M_in*nu_ni_MT_over_n*nT_neut_use.n;
Tens1.xz = 2.0*M_en*nu_ne_MT_over_n*nT_neut_use.n;
Tens1.yx = 2.0*M_in*nu_ni_MT_over_n*nT_ion_use.n;
Tens1.yy = -2.0*(M_in*nu_ni_MT_over_n*nT_neut_use.n
+ M_ie*nu_ieBar);
Tens1.yz = 2.0*M_ie*nu_eiBar;
Tens1.zx = 2.0*M_en*nu_ne_MT_over_n*nT_elec_use.n;
Tens1.zy = 2.0*M_ie*nu_ieBar;
Tens1.zz = -2.0*(M_ie*nu_eiBar + M_en*nu_ne_MT_over_n*nT_neut_use.n);
}
// Midpoint:
// d/dt (nT) = U
// (nT)_k+1 = (1 - h/2 U)^-1 (1+h/2 U) (nT)_k
nnTn += h*0.5*(Tens1.xx*(nT_neut_src.n*nT_neut_src.T)
+ Tens1.xy*(nT_ion_src.n*nT_ion_src.T)
+ Tens1.xz*(nT_elec_src.n*nT_elec_src.T)
);
niTi += h*0.5*(Tens1.yx*(nT_neut_src.n*nT_neut_src.T)
+ Tens1.yy*(nT_ion_src.n*nT_ion_src.T)
+ Tens1.yz*(nT_elec_src.n*nT_elec_src.T)
);
neTe += h*0.5*(Tens1.zx*(nT_neut_src.n*nT_neut_src.T)
+ Tens1.zy*(nT_ion_src.n*nT_ion_src.T)
+ Tens1.zz*(nT_elec_src.n*nT_elec_src.T)
);
// Matrix is 1 - h*0.5*U
Tens1.xx = 1.0-h*0.5*Tens1.xx;
Tens1.xy = -h*0.5*Tens1.xy;
Tens1.xz = -h*0.5*Tens1.xz;
Tens1.yx = -h*0.5*Tens1.yx;
Tens1.yy = 1.0-h*0.5*Tens1.yy;
Tens1.yz = -h*0.5*Tens1.yz;
Tens1.zx = -h*0.5*Tens1.zx;
Tens1.zy = -h*0.5*Tens1.zy;
Tens1.zz = 1.0-h*0.5*Tens1.zz;
if ((OUTPUT) && (index == REPORT)) {
printf("nT_before %1.5E %1.5E %1.5E \n",
nnTn,niTi,neTe);
};
{
Tensor3 Tens2;
Tens1.Inverse(Tens2);
Vector3 RH,LH;
RH.x = nnTn;
RH.y = niTi;
RH.z = neTe;
LH = Tens2*RH;
nnTn = LH.x;
niTi = LH.y;
neTe = LH.z;
}
if ((OUTPUT) && (index == REPORT)) {
printf("nT_after %1.5E %1.5E %1.5E \n",
nnTn,niTi,neTe);
};
// Overwrite any old rubbish in memory so that we can save off the output:
nT_neut_use.n = n_n_plus;
nT_neut_use.T = nnTn/n_n_plus;
nT_ion_use.n = n_ion_plus;
nT_ion_use.T = niTi/n_ion_plus;
nT_elec_use.n = n_e_plus;
nT_elec_use.T = neTe/n_e_plus;
if (b2ndPass == false) {
// Tween back to halfway if this is the first pass:
nT_neut_use.n = 0.5*(nT_neut_src.n + nT_neut_use.n);
nT_ion_use.n = 0.5*(nT_ion_src.n + nT_ion_use.n);
nT_elec_use.n = 0.5*(nT_elec_src.n + nT_elec_use.n);
nT_neut_use.T = 0.5*(nT_neut_src.T + nT_neut_use.T);
nT_ion_use.T = 0.5*(nT_ion_src.T + nT_ion_use.T);
nT_elec_use.T = 0.5*(nT_elec_src.T + nT_elec_use.T);
};
//if ((OUTPUT) && (index == REPORT))
// printf("Te %1.5E \n################\n",nT_elec_use.T);
p_nT_neut_out[index] = nT_neut_use;
p_nT_ion_out[index] = nT_ion_use;
p_nT_elec_out[index] = nT_elec_use;
}
| cec5c02cf72027d06df6aaa3de130559a20f4608.cu |
// Version 0.51
// Been over some attempts to ameliorate local accesses -- not v successful basically.
// Correction in "Get Lap phi" routine.
// Version 0.52:
// Change Lap A, Grad A routines to load CHAR4 p_tri_per_neigh instead of loading data
// to interrogate neighbour periodic status.
// Change major area calc in the INNERMOST/OUTERMOST case.
// Note that central area calc does not look right.
// Version 0.53:
// Made changes to Reladvect_nT because it was taking wrong connection for OUTERMOST.
// Changed flag tests & treatment of Inner verts in preceding routines.
// Version 0.54:
// Adjusted area calculations as written in spec.
// We set ins crossing tri minor area = 0, centroid on ins;
// frill area = 0, centroid on boundary.
// Version 0.6:
// Debugging and making corrections.
// ==
// Version 0.7:
// Debugging ... there is a kernel launch failure for Antiadvect Adot
// PLAN:
// Allow that on GPU we can move outside domain and it's fine, we do not change PB data.
// PB data will be only changed on CPU.
// Nonetheless we kept PBCTri lists which can be updated, unlike has_periodic alone, in case
// of moving something to its image within the domain.
// NOTES:
// Ensure that outside the domain, n_major is recorded as 0
// Ensure that outside the domain, resistive_heat is recorded as 0
extern real FRILL_CENTROID_OUTER_RADIUS, FRILL_CENTROID_INNER_RADIUS;
__global__ void Kernel_CalculateTriMinorAreas_AndCentroids
(structural * __restrict__ p_info_sharing, // for vertex positions
LONG3 * __restrict__ p_tri_corner_index,
CHAR4 * __restrict__ p_tri_perinfo,
// Output:
f64 * __restrict__ p_area_minor,
f64_vec2 * __restrict__ p_tri_centroid)
{
__shared__ f64_vec2 shared_vertex_pos[SIZE_OF_MAJOR_PER_TRI_TILE];
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long EndMajor = StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE;
long tid = threadIdx.x + blockIdx.x * blockDim.x;
// Note that we only do a fetch with the first half of threads:
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
structural info = p_info_sharing[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
// Well here is a major problem.
// mti.StartMajor is a separate value for every thread.
// How can we make it do a contiguous access?
// Suppose a 1:1 correspondence between minor blocks and major blocks...
// that is ONE way.
shared_vertex_pos[threadIdx.x] = info.pos;
// shared_shorts[threadIdx.x].flag = info.flag;
// shared_shorts[threadIdx.x].neigh_len = info.neigh_len;
// these were never used.
};
// If we make an extended array then we can always go through that code.
__syncthreads();
// Triangle area * 2/3 is area of minor cell.
// if (tid < Ntris) { // redundant test if we do it right
LONG3 corner_index = p_tri_corner_index[tid];
CHAR4 perinfo = p_tri_perinfo[tid];
// Do we ever require those and not the neighbours?
// Yes - this time for instance.
f64_vec2 pos1, pos2, pos3;
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
pos1 = shared_vertex_pos[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i1];
pos1 = info.pos;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
pos2 = shared_vertex_pos[corner_index.i2-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i2];
pos2 = info.pos;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
pos3 = shared_vertex_pos[corner_index.i3-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i3];
pos3 = info.pos;
}
if (perinfo.per0+perinfo.per1+perinfo.per2 == 0) {
} else {
// In this case which ones are periodic?
// Should we just store per flags?
// How it should work:
// CHAR4 perinfo: periodic, per0, per1, per2;
if (perinfo.per0 == NEEDS_ANTI)
pos1 = Anticlock_rotate2(pos1);
if (perinfo.per0 == NEEDS_CLOCK)
pos1 = Clockwise_rotate2(pos1);
if (perinfo.per1 == NEEDS_ANTI)
pos2 = Anticlock_rotate2(pos2);
if (perinfo.per1 == NEEDS_CLOCK)
pos2 = Clockwise_rotate2(pos2);
if (perinfo.per2 == NEEDS_ANTI)
pos3 = Anticlock_rotate2(pos3);
if (perinfo.per2 == NEEDS_CLOCK)
pos3 = Clockwise_rotate2(pos3);
};
// Now we've got to decide what to do about minor cells near the edges.
// Edge of memory: triangles should not continue to the edge.
// Ultimately the edge of memory will be mostly within cathode rods and suchlike things.
// So we don't need to connect tri mesh beyond outermost row of vertices, even if we could.
// Realise that this edge cell crosses into the insulator and so should be assigned nv_r = 0
// We do not know what order the corners are given in.
// So use fabs:
f64 area = fabs(0.5*( (pos2.x+pos1.x)*(pos2.y-pos1.y)
+ (pos3.x+pos2.x)*(pos3.y-pos2.y)
+ (pos1.x+pos3.x)*(pos1.y-pos3.y)
) );
f64_vec2 centroid = THIRD*(pos1+pos2+pos3);
if (area > 1.0e-3) {
printf("tri %d area %1.3E pos_x %1.6E %1.6E %1.6E \n"
" pos_y %1.6E %1.6E %1.6E \n",tid,area,
pos1.x,pos2.x,pos3.x,
pos1.y,pos2.y,pos3.y);
}
if (perinfo.flag == OUTER_FRILL)
{
f64_vec2 temp = 0.5*(pos1+pos2);
temp.project_to_radius(centroid, FRILL_CENTROID_OUTER_RADIUS_d);
area = 1.0e-14; // == 0 but tiny is less likely to cause 1/0
}
if (perinfo.flag == INNER_FRILL)
{
f64_vec2 temp = 0.5*(pos1+pos2);
temp.project_to_radius(centroid, FRILL_CENTROID_INNER_RADIUS_d);
area = 1.0e-14; // == 0 but tiny is less likely to cause 1/0
}
if (perinfo.flag == CROSSING_INS) {
f64_vec2 centroid2;
centroid.project_to_ins(centroid2);
centroid = centroid2;
// The major cells will abut the insulator.
// Only count the % of the area that is in the domain.
//bool b1, b2, b3;
//b1 = (pos1.x*pos1.x+pos1.y*pos1.y > INSULATOR_OUTER_RADIUS*INSULATOR_OUTER_RADIUS);
//b2 = (pos2.x*pos2.x+pos2.y*pos2.y > INSULATOR_OUTER_RADIUS*INSULATOR_OUTER_RADIUS);
//b3 = (pos3.x*pos3.x+pos3.y*pos3.y > INSULATOR_OUTER_RADIUS*INSULATOR_OUTER_RADIUS);
// Save ourselves some bother for now by setting area to be near 0.
// area = 1.0e-14;
// FOR NOW, legislate v = 0 in insulator-crossing tris.
// And so avoid having to do an awkward area calculation.
// Stick with correct area for tri as area variable.
// Possibly we never use 'area' except for domain-related matters; if that can be
// verified, then it's best to change to 'domain_intersection_area', however tricky.
}
p_tri_centroid[tid] = centroid;
p_area_minor[tid] = 0.666666666666667*area;
if (p_area_minor[tid] < 0.0) {
printf("kernel -- tid %d flag %d area %1.8E \n",tid,perinfo.flag,area);
};
// Perhaps we need instead to read data from neighbours to create tri minor area.
// Note that we subsequently CHANGED the nodes of minor mesh to be at averages
// so that we could average neatly A to edges. However, this means TWOTHIRDS*tri area
// is not an exact estimate.
}
// FOR OUTERMOST,
//
// | 4 \/ 3 |
// pt0| ------- |pt3
// 0 2
// pt1| 1 |pt2
// If we have an outer point,
// then the number of neighs is not the number of tris;
// SO EXPLOIT THIS
// Make sure that the omitted edge is the one that would go between the frill tris.
// This has to go into the reconstructing code that will
// generate the mesh with frill tris.
// ---------------------------------------------------------------------------------
// We'll want to calculate areas for triangles AND for central cells.
// But they require different codes so might as well be different kernels.
// Central area = sum of 1/6 of each neighbouring tri minor area.
// So now let's write central area calc routine:
// We should be passed the pointer to the start of the central minor area array.
__global__ void Kernel_CalculateCentralMinorAreas (
structural * __restrict__ p_info_sharing,
long * __restrict__ p_IndexTri,
f64 * __restrict__ p_triminor_area,
// Output:
f64 * __restrict__ p_area_minor
// pass output array starting from the central array start
)
{
__shared__ f64 shared_area[SIZE_OF_TRI_TILE_FOR_MAJOR];
__shared__ long Indextri[MAXNEIGH_d*threadsPerTileMajor];
// 2*8+12*4 = 64 bytes => room for 768 threads in 48K
long index = threadIdx.x + blockIdx.x * blockDim.x;
// Load in minor data: how to manage this? fill in with 2 strides; rely on contiguity.
long StartMinor = blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR; // Have to do this way.
// will this be recognised as contiguous access?
shared_area[threadIdx.x] =
p_triminor_area[blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
shared_area[blockDim.x + threadIdx.x] =
p_triminor_area[blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + blockDim.x + threadIdx.x];
// Loaded in 2 times as many areas as central cells
__syncthreads();
//if (index < Nverts)
{
structural info = p_info_sharing[index];
memcpy(Indextri + MAXNEIGH_d*threadIdx.x,
p_IndexTri + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(long)); // MAXNEIGH_d should be chosen to be 12, for 1 full bus.
f64 sum = 0.0;
#pragma unroll 12
for (short iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indextri < StartMinor) || (indextri >= StartMinor+SIZE_OF_TRI_TILE_FOR_MAJOR))
{
sum += p_triminor_area[indextri];
} else {
sum += shared_area[indextri-StartMinor];
}
}
// place separation of central from edge cell at 1/3 along line.
// Then have 1/9 area in central shard, 2/3 in edge minor,
// so (1/9)/(2/3) = 1/6
p_area_minor[index] = sum*SIXTH;
if (sum < 0.0) {
printf("kerncentral -- tid %d area %1.2E \n",index,p_area_minor[index]);
};
};
// This may give funny results at the edges of memory, where we have added
// areas only of shards that formed part of triangles. But that is the expected
// behaviour here.
// If we had frills with repeated corners, then we get area 0 from them.
// If we used a special vertex then we had to do something special in setting area - perhaps we want it to = 0.
// BUG:
// This 1/6 only holds as long as we position the minor joins on the lines
// between vertices. If we use (1/3)(vertex + centroid 1 + centroid 2)
// then we should not be doing this area sum. Rather, given each pair of
// triangles, we can infer the area of the triangle that is part of central cell.
}
__global__ void Kernel_CalculateMajorAreas (
structural * __restrict__ p_info,
f64_vec2 * __restrict__ p_tri_centroid,
long * __restrict__ pIndexTri,
char * __restrict__ pPBCtri,
// Output:
f64 * __restrict__ p_area
)
{
__shared__ f64_vec2 shared_centroids[SIZE_OF_TRI_TILE_FOR_MAJOR];
__shared__ long Indextri[MAXNEIGH_d*threadsPerTileMajor];
__shared__ char PBCtri[MAXNEIGH_d*threadsPerTileMajor];
long StartMinor = blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR; // Have to do this way.
shared_centroids[threadIdx.x] =
p_tri_centroid[blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
shared_centroids[blockDim.x + threadIdx.x] =
p_tri_centroid[blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + blockDim.x + threadIdx.x];
//
//if (shared_centroids[threadIdx.x].x*shared_centroids[threadIdx.x].x
// + shared_centroids[threadIdx.x].y*shared_centroids[threadIdx.x].y > DOMAIN_OUTER_RADIUS*DOMAIN_OUTER_RADIUS)
// shared_centroids[threadIdx.x].project_to_radius(shared_centroids[threadIdx.x],DOMAIN_OUTER_RADIUS);
//
//if (shared_centroids[threadIdx.x].x*shared_centroids[threadIdx.x].x
// + shared_centroids[threadIdx.x].y*shared_centroids[threadIdx.x].y < INNER_A_BOUNDARY*INNER_A_BOUNDARY)
// shared_centroids[threadIdx.x].project_to_radius(shared_centroids[threadIdx.x],INNER_A_BOUNDARY);
//
//if (shared_centroids[blockDim.x + threadIdx.x].x*shared_centroids[blockDim.x + threadIdx.x].x
// + shared_centroids[blockDim.x + threadIdx.x].y*shared_centroids[blockDim.x + threadIdx.x].y > DOMAIN_OUTER_RADIUS*DOMAIN_OUTER_RADIUS)
// shared_centroids[blockDim.x + threadIdx.x].project_to_radius(shared_centroids[blockDim.x + threadIdx.x],DOMAIN_OUTER_RADIUS);
//
//if (shared_centroids[blockDim.x + threadIdx.x].x*shared_centroids[blockDim.x + threadIdx.x].x
// + shared_centroids[blockDim.x + threadIdx.x].y*shared_centroids[blockDim.x + threadIdx.x].y < INNER_A_BOUNDARY*INNER_A_BOUNDARY)
// shared_centroids[blockDim.x + threadIdx.x].project_to_radius(shared_centroids[blockDim.x + threadIdx.x],INNER_A_BOUNDARY);
//
__syncthreads();
long index = threadIdx.x + blockIdx.x * blockDim.x;
f64_vec2 uprev, unext;
//if (index < Nverts) { // redundant test, should be
structural info = p_info[index];
memcpy(Indextri + MAXNEIGH_d*threadIdx.x,
pIndexTri + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(long)); // MAXNEIGH_d should be chosen to be 12, for 1 full bus.
memcpy(PBCtri + MAXNEIGH_d*threadIdx.x,
pPBCtri + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(char)); // MAXNEIGH_d should be chosen to be 12, for 1 full bus.
f64 grad_x_integrated_x = 0.0;
// Going to do shoelace on tri centroids which must be sorted anticlockwise.
// If we have a frilled e.g.OUTERMOST vertex, we shall find that
// info.neigh_len = 4 whereas tri_len = 5. Bear in mind...
if ((info.flag != OUTERMOST) && (info.flag != INNERMOST))
{
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + info.neigh_len-1];
if ((indextri >= StartMinor) && (indextri < StartMinor + SIZE_OF_TRI_TILE_FOR_MAJOR)) {
uprev = shared_centroids[indextri-StartMinor];
} else {
uprev = p_tri_centroid[indextri];
}
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + info.neigh_len-1];
if (PBC == NEEDS_CLOCK) {
uprev = Clockwise_rotate2(uprev);
}
if (PBC == NEEDS_ANTI) {
uprev = Anticlock_rotate2(uprev);
}
short iNeigh;
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++) // iNeigh is the anticlockwise one
{
indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indextri >= StartMinor) && (indextri < StartMinor + SIZE_OF_TRI_TILE_FOR_MAJOR)) {
unext = shared_centroids[indextri-StartMinor];
} else {
unext = p_tri_centroid[indextri];
}
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + iNeigh];
if (PBC == NEEDS_CLOCK) {
unext = Clockwise_rotate2(unext);
}
if (PBC == NEEDS_ANTI) {
unext = Anticlock_rotate2(unext);
}
// Get edge_normal.x and average x on edge
grad_x_integrated_x += //0.5*(unext.x+uprev.x)*edge_normal.x
0.5*(unext.x+uprev.x)*(unext.y-uprev.y);
uprev = unext;
};
} else {
// FOR THE OUTERMOST / INNERMOST CELLS :
// In this case we basically substituted tri_len for neigh_len:
// Also we project frill centroid on to the inner/outer radius.
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + info.neigh_len];
if ((indextri >= StartMinor) && (indextri < StartMinor + SIZE_OF_TRI_TILE_FOR_MAJOR)) {
uprev = shared_centroids[indextri-StartMinor];
} else {
uprev = p_tri_centroid[indextri];
}
if (uprev.x*uprev.x + uprev.y*uprev.y > DOMAIN_OUTER_RADIUS*DOMAIN_OUTER_RADIUS)
uprev.project_to_radius(uprev,DOMAIN_OUTER_RADIUS);
if (uprev.x*uprev.x + uprev.y*uprev.y < INNER_A_BOUNDARY*INNER_A_BOUNDARY)
uprev.project_to_radius(uprev,INNER_A_BOUNDARY);
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + info.neigh_len];
if (PBC == NEEDS_CLOCK) uprev = Clockwise_rotate2(uprev);
if (PBC == NEEDS_ANTI) uprev = Anticlock_rotate2(uprev);
short iNeigh;
for (iNeigh = 0; iNeigh < info.neigh_len+1; iNeigh++) // iNeigh is the anticlockwise one
{
indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indextri >= StartMinor) && (indextri < StartMinor + SIZE_OF_TRI_TILE_FOR_MAJOR)) {
unext = shared_centroids[indextri-StartMinor];
} else {
unext = p_tri_centroid[indextri];
}
if (unext.x*unext.x + unext.y*unext.y > DOMAIN_OUTER_RADIUS*DOMAIN_OUTER_RADIUS)
unext.project_to_radius(unext,DOMAIN_OUTER_RADIUS);
if (unext.x*unext.x + unext.y*unext.y < INNER_A_BOUNDARY*INNER_A_BOUNDARY)
unext.project_to_radius(unext,INNER_A_BOUNDARY);
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + iNeigh];
if (PBC == NEEDS_CLOCK) unext = Clockwise_rotate2(unext);
if (PBC == NEEDS_ANTI) unext = Anticlock_rotate2(unext);
grad_x_integrated_x += 0.5*(unext.x+uprev.x)*(unext.y-uprev.y);
// We do have to even count the edge looking into frills, or polygon
// area would not be right.
uprev = unext;
};
};
p_area[index] = grad_x_integrated_x;
/*if ((index == 36685)) {
printf("index %d flag %d area %1.3E \n",
index, info.flag, grad_x_integrated_x);
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + info.neigh_len-1];
if ((indextri >= StartMinor) && (indextri < StartMinor + SIZE_OF_TRI_TILE_FOR_MAJOR)) {
uprev = shared_centroids[indextri-StartMinor];
} else {
uprev = p_tri_centroid[indextri];
}
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + info.neigh_len-1];
if (PBC == NEEDS_CLOCK) {
uprev = Clockwise_rotate2(uprev);
}
if (PBC == NEEDS_ANTI) {
uprev = Anticlock_rotate2(uprev);
}
//printf("uprev %1.5E %1.5E ... %1.5E\n",uprev.x,uprev.y,uprev.modulus());
short iNeigh;
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++) // iNeigh is the anticlockwise one
{
indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indextri >= StartMinor) && (indextri < StartMinor + SIZE_OF_TRI_TILE_FOR_MAJOR)) {
unext = shared_centroids[indextri-StartMinor];
} else {
unext = p_tri_centroid[indextri];
}
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + iNeigh];
if (PBC == NEEDS_CLOCK) {
unext = Clockwise_rotate2(unext);
}
if (PBC == NEEDS_ANTI) {
unext = Anticlock_rotate2(unext);
}
// printf("unext %1.5E %1.5E ... %1.5E \n",unext.x,unext.y,unext.modulus());
// Get edge_normal.x and average x on edge
grad_x_integrated_x += //0.5*(unext.x+uprev.x)*edge_normal.x
0.5*(unext.x+uprev.x)*(unext.y-uprev.y);
uprev = unext;
};
};*/
}
__global__ void Kernel_Average_nT_to_tri_minors (
LONG3 * __restrict__ p_tri_corner_index,
CHAR4 * __restrict__ p_tri_perinfo,
nT * __restrict__ p_nT_neut,
nT * __restrict__ p_nT_ion,
nT * __restrict__ p_nT_elec,
// Output:
nT * __restrict__ p_minor_nT_neut,
nT * __restrict__ p_minor_nT_ion,
nT * __restrict__ p_minor_nT_elec
)
{
// Average by area so that we get the same total mass on minor mesh as on major.
// We have to know intersection. It's not always 1/3 of triangle is it.
// ??
// Even corner positions do not tell us intersection. We'd have to know the neighbouring
// centroid also.
__shared__ nT shared_nT[SIZE_OF_MAJOR_PER_TRI_TILE];
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
shared_nT[threadIdx.x] = p_nT_neut[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
}
__syncthreads();
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long EndMajor = StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE;
long tid = threadIdx.x + blockIdx.x * blockDim.x;
nT nT1, nT2, nT3, nT_out;
LONG3 corner_index;
CHAR4 per_info = p_tri_perinfo[tid];
corner_index = p_tri_corner_index[tid];
// Do we ever require those and not the neighbours? Yes - this time for instance.
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
nT1 = shared_nT[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
nT1 = p_nT_neut[corner_index.i1];
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
nT2 = shared_nT[corner_index.i2-StartMajor];
} else {
nT2 = p_nT_neut[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
nT3 = shared_nT[corner_index.i3-StartMajor];
} else {
nT3 = p_nT_neut[corner_index.i3];
}
if (per_info.flag == CROSSING_INS) {
// An idea: Ensure that outside the domain, n is recorded as 0
int divide = 0.0;
nT_out.n = 0.0;
nT_out.T = 0.0;
if (nT1.n > 0.0) {
nT_out.n += nT1.n;
nT_out.T += nT1.T;
divide++;
}
if (nT2.n > 0.0) {
nT_out.n += nT2.n;
nT_out.T += nT2.T;
divide++;
}
if (nT3.n > 0.0) {
nT_out.n += nT3.n;
nT_out.T += nT3.T;
divide++;
}
nT_out.n /= (real)divide;
nT_out.T /= (real)divide;
} else {
nT_out.n = THIRD*(nT1.n+nT2.n+nT3.n);
nT_out.T = THIRD*(nT1.T+nT2.T+nT3.T);
};
// SO THIS IS JUST ROUGH FOR NOW? What we wanted to do:
// Sum (Area_intersection * nT) / Sum(Area_intersection)
// You cannot get the intersection area just from knowing the corner positions.
// But do note that since centroid = (1/3)(sum of positions), (1/3) represents linear interpolation on a plane.
p_minor_nT_neut[tid] = nT_out;
__syncthreads();
// Now repeat same thing for each species
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
shared_nT[threadIdx.x] = p_nT_ion[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
}
__syncthreads();
//if (tid < Ntris) {
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
nT1 = shared_nT[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
nT1 = p_nT_ion[corner_index.i1];
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
nT2 = shared_nT[corner_index.i2-StartMajor];
} else {
nT2 = p_nT_ion[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
nT3 = shared_nT[corner_index.i3-StartMajor];
} else {
nT3 = p_nT_ion[corner_index.i3];
}
if (per_info.flag == CROSSING_INS) {
// An idea: Ensure that outside the domain, n is recorded as 0
int divide = 0.0;
nT_out.n = 0.0;
nT_out.T = 0.0;
if (nT1.n > 0.0) {
nT_out.n += nT1.n;
nT_out.T += nT1.T;
divide++;
}
if (nT2.n > 0.0) {
nT_out.n += nT2.n;
nT_out.T += nT2.T;
divide++;
}
if (nT3.n > 0.0) {
nT_out.n += nT3.n;
nT_out.T += nT3.T;
divide++;
}
nT_out.n /= (real)divide;
nT_out.T /= (real)divide;
} else {
nT_out.n = THIRD*(nT1.n+nT2.n+nT3.n);
nT_out.T = THIRD*(nT1.T+nT2.T+nT3.T);
};
p_minor_nT_ion[tid] = nT_out;
//};
__syncthreads();
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
shared_nT[threadIdx.x] = p_nT_elec[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
}
__syncthreads();
//if (tid < Ntris) {
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
nT1 = shared_nT[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
nT1 = p_nT_elec[corner_index.i1];
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
nT2 = shared_nT[corner_index.i2-StartMajor];
} else {
// have to load in from global memory:
nT2 = p_nT_elec[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
nT3 = shared_nT[corner_index.i3-StartMajor];
} else {
// have to load in from global memory:
nT3 = p_nT_elec[corner_index.i3];
}
if (per_info.flag == CROSSING_INS) {
// An idea: Ensure that outside the domain, n is recorded as 0
int divide = 0.0;
nT_out.n = 0.0;
nT_out.T = 0.0;
if (nT1.n > 0.0) {
nT_out.n += nT1.n;
nT_out.T += nT1.T;
divide++;
}
if (nT2.n > 0.0) {
nT_out.n += nT2.n;
nT_out.T += nT2.T;
divide++;
}
if (nT3.n > 0.0) {
nT_out.n += nT3.n;
nT_out.T += nT3.T;
divide++;
}
nT_out.n /= (real)divide;
nT_out.T /= (real)divide;
} else {
nT_out.n = THIRD*(nT1.n+nT2.n+nT3.n);
nT_out.T = THIRD*(nT1.T+nT2.T+nT3.T);
};
p_minor_nT_elec[tid] = nT_out;
// if frills have corners repeated, we end up with 1/3+2/3 --- should never matter.
// If special vertex, probably we set nT at special vertex to 0 so 1/3+1/3.
// nT should not be important at frills, as outermost points and innermost points
// do not need to know pressure.
}
__global__ void Kernel_GetZCurrent(
CHAR4 * __restrict__ p_minor_info,
nT * __restrict__ p_minor_nT_ion,
nT * __restrict__ p_minor_nT_elec,
f64_vec3 * __restrict__ p_minor_v_ion,
f64_vec3 * __restrict__ p_minor_v_elec, // Not clear if this should be nv or {n,v} ? {n,v}
f64 * __restrict__ p_area_minor,
f64 * __restrict__ p_summands )
{
__shared__ f64 intrablock[threadsPerTileMinor];
long tid = threadIdx.x + blockIdx.x * blockDim.x;
CHAR4 minor_info = p_minor_info[tid];
// This is called for all minor cells.
if ((minor_info.flag == DOMAIN_MINOR) || (minor_info.flag == OUTERMOST)) {
// Let DOMAIN_MINOR == DOMAIN_TRIANGLE ...
// And if you are DOMAIN_MINOR then n,v should be meaningful.
// Other possible values:
// OUTERMOST_CENTRAL == OUTERMOST, OUTER_FRILL, INNERMOST_CENTRAL, INNER_FRILL, INNER_TRIANGLE,
// CROSSING_INS, INNER_CENTRAL --
f64 n_ion = p_minor_nT_ion[tid].n;
f64 n_e = p_minor_nT_elec[tid].n;
f64_vec3 v_ion = p_minor_v_ion[tid];
f64_vec3 v_e = p_minor_v_elec[tid];
f64 Iz = q*(n_ion*v_ion.z - n_e*v_e.z)*p_area_minor[tid];
// Lots of bus loads, hopefully all contig.
intrablock[threadIdx.x] = Iz;
// HERE ASSUMED that area is calculated as DOMAIN INTERSECTION AREA
// if we start including nv in insulator-crossing tris.
} else {
intrablock[threadIdx.x] = 0.0;
};
__syncthreads();
// Now it's the aggregation:
int s = blockDim.x;
int k = s/2;
while (s != 1) {
if (threadIdx.x < k)
{
intrablock[threadIdx.x] += intrablock[threadIdx.x + k];
};
__syncthreads();
// Attempt to modify:
if ((s % 2 == 1) && (threadIdx.x == k-1)){
intrablock[threadIdx.x] += intrablock[threadIdx.x+s-1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s/2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_summands[blockIdx.x] = intrablock[0];
};
} // Doesn't matter much if function is slow, I think it is only called for debug purposes anyway.
__global__ void Kernel_Create_v_overall_and_newpos(
structural * __restrict__ p_info,
f64 const h,
nT * __restrict__ p_nT_neut,
nT * __restrict__ p_nT_ion,
nT * __restrict__ p_nT_elec,
f64_vec3 * __restrict__ p_v_neut,
f64_vec3 * __restrict__ p_v_ion,
f64_vec3 * __restrict__ p_v_elec,
// Output:
structural * __restrict__ p_info_out,
f64_vec2 * __restrict__ p_v_overall
)
{
long tid = threadIdx.x + blockIdx.x * blockDim.x;
//if (tid < Nverts)
structural info = p_info[tid];
f64_vec2 v_save;
if (info.flag == DOMAIN_VERTEX)
{
nT nT_neut, nT_ion, nT_elec;
f64_vec3 v_n, v_i, v_e;
nT_neut = p_nT_neut[tid];
nT_ion = p_nT_ion[tid];
nT_elec = p_nT_elec[tid];
v_n = p_v_neut[tid];
v_i = p_v_ion[tid];
v_e = p_v_elec[tid]; // expensive loads; can we avoid function by putting it in with smth else?
f64_vec3 v_overall = (m_n*nT_neut.n*v_n + m_ion*nT_ion.n*v_i + m_e*nT_elec.n*v_e)/
(m_n*nT_neut.n + m_ion*nT_ion.n + m_e*nT_elec.n);
v_save.x = v_overall.x;
v_save.y = v_overall.y;
info.pos += h*v_save;
} else {
v_save.x = 0.0; v_save.y = 0.0;
}
p_v_overall[tid] = v_save;
p_info_out[tid] = info; // safer to do unnecessary write of whole object to get contiguity.
// can we do anything else with the data?
// We could transfer it to shared and do something with it. But there isn't anything.
}
__global__ void Kernel_Average_v_overall_to_tris (
LONG3 * __restrict__ p_tri_corner_index,
CHAR4 * __restrict__ p_tri_perinfo,
f64_vec2 * __restrict__ p_v_overall,
f64_vec2 * __restrict__ p_tri_centroid,
// Output:
f64_vec2 * __restrict__ p_minor_v_overall
)
{
__shared__ f64_vec2 shared_v[SIZE_OF_MAJOR_PER_TRI_TILE];
// Averaging as 1/3 to tris.
// Even corner positions do not tell us intersection. We'd have to know the neighbouring
// centroid also.
// Load to shared:
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
shared_v[threadIdx.x] = p_v_overall[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
}
// Let's hope it works with that sort of index. If it doesn't we're in a tough situation.
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long EndMajor = StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE;
long tid = threadIdx.x + blockIdx.x * blockDim.x;
__syncthreads();
f64_vec2 v0, v1, v2, v_out;
LONG3 corner_index;
CHAR4 perinfo;
//if (tid < Ntris) { // redundant check
corner_index = p_tri_corner_index[tid];
perinfo = p_tri_perinfo[tid];
if ((perinfo.flag == DOMAIN_TRIANGLE) ||
(perinfo.flag == CROSSING_INS))
{
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
v0 = shared_v[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
v0 = p_v_overall[corner_index.i1];
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
v1 = shared_v[corner_index.i2-StartMajor];
} else {
v1 = p_v_overall[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
v2 = shared_v[corner_index.i3-StartMajor];
} else {
v2 = p_v_overall[corner_index.i3];
}
if (perinfo.per0+perinfo.per1+perinfo.per2 == 0) {
} else {
// In this case which ones are periodic?
// Should we just store per flags?
// How it should work:
// CHAR4 perinfo: periodic, per0, per1, per2;
if (perinfo.per0 == NEEDS_ANTI)
v0 = Anticlock_rotate2(v0);
if (perinfo.per0 == NEEDS_CLOCK)
v0 = Clockwise_rotate2(v0);
if (perinfo.per1 == NEEDS_ANTI)
v1 = Anticlock_rotate2(v1);
if (perinfo.per1 == NEEDS_CLOCK)
v1 = Clockwise_rotate2(v1);
if (perinfo.per2 == NEEDS_ANTI)
v2 = Anticlock_rotate2(v2);
if (perinfo.per2 == NEEDS_CLOCK)
v2 = Clockwise_rotate2(v2);
};
v_out = THIRD*(v0+v1+v2);
// For insulator triangle,
// we should take v_overall_r = 0
// because this tri centroid will remain on the insulator.
// It is OK to average with places that should have v_overall = 0.
if (perinfo.flag == CROSSING_INS)
{
f64_vec2 r = p_tri_centroid[tid]; // random accesses??
//f64_vec2 rhat = r/r.modulus();
// v_out = v_out - rhat*v_out.dot(rhat);
v_out = v_out - r*v_out.dot(r)/(r.x*r.x+r.y*r.y);
// Well this is kinda wrong.
}
} else {
v_out.x = 0.0; v_out.y = 0.0;
}
p_minor_v_overall[tid] = v_out;
}
__global__ void Kernel_Average_nnionrec_to_tris
(
CHAR4 * __restrict__ p_tri_perinfo,
LONG3 * __restrict__ p_tri_corner_index,
nn * __restrict__ p_nn_ionrec,
nn * __restrict__ p_nn_ionrec_minor
)
{
__shared__ nn shared_nn[SIZE_OF_MAJOR_PER_TRI_TILE];
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
shared_nn[threadIdx.x] = p_nn_ionrec[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
}
// Let's hope it works with that sort of index. If it doesn't we're in a tough situation.
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long EndMajor = StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE;
long tid = threadIdx.x + blockIdx.x * blockDim.x;
__syncthreads();
nn nn0, nn1, nn2;
LONG3 corner_index;
nn nn_out;
//if (tid < Ntris) { // redundant check - ?
corner_index = p_tri_corner_index[tid];
CHAR4 perinfo = p_tri_perinfo[tid];
if ((perinfo.flag == DOMAIN_TRIANGLE) ||
(perinfo.flag == CROSSING_INS))
{
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
nn0 = shared_nn[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
nn0 = p_nn_ionrec[corner_index.i1];
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
nn1 = shared_nn[corner_index.i2-StartMajor];
} else {
nn1 = p_nn_ionrec[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
nn2 = shared_nn[corner_index.i3-StartMajor];
} else {
nn2 = p_nn_ionrec[corner_index.i3];
}
nn_out.n_ionise = THIRD*(nn0.n_ionise+nn1.n_ionise+nn2.n_ionise);
nn_out.n_recombine = THIRD*(nn0.n_recombine+nn1.n_recombine+nn2.n_recombine);
if (perinfo.flag == CROSSING_INS)
{
// Ensure that we are not using silly data...
// Assume n_ionise = 0 outside domain.
nn_out.n_ionise = 0.5*(nn0.n_ionise+nn1.n_ionise+nn2.n_ionise);
nn_out.n_recombine = 0.5*(nn0.n_recombine+nn1.n_recombine+nn2.n_recombine);
}
} else {
nn_out.n_ionise = 0.0;
nn_out.n_recombine = 0.0;
}
p_nn_ionrec_minor[tid] = nn_out;
}
__global__ void Kernel_RelAdvect_nT(
real h,
structural * __restrict__ p_info, // Advection for domain vertices only
long * __restrict__ pIndexTri,
// char * __restrict__ pPBCTri,
// do we want this - or should we just use has_periodic flag ?
// Debatable: has_periodic flag is non-maintainable if things cross the PB.
// However that is probably all right, we should only like doing PB manip on CPU.
f64_vec2 * __restrict__ p_minor_centroid, // work out tri centroids b4hand
nT * __restrict__ p_nT_neut,
nT * __restrict__ p_nT_ion,
nT * __restrict__ p_nT_elec,
nT * __restrict__ p_minor_nT_neut,
nT * __restrict__ p_minor_nT_ion,
nT * __restrict__ p_minor_nT_elec,
f64_vec3 * __restrict__ p_minor_v_neut,
f64_vec3 * __restrict__ p_minor_v_ion,
f64_vec3 * __restrict__ p_minor_v_elec,
f64_vec2 * __restrict__ p_minor_v_overall,
f64 * __restrict__ p_area_old,
f64 * __restrict__ p_area_new,
// dest:
nT * __restrict__ p_nT_neut_out,
nT * __restrict__ p_nT_ion_out,
nT * __restrict__ p_nT_elec_out
)
{
// Idea is, we don't need to look at other nT
// when we do this one -- though we can afterwards
// push through from registry into shared, take grad nT,
// if we want.
// It is reasonable to overwrite and do one species after another.
__shared__ f64_vec2 p_tri_centroid[SIZE_OF_TRI_TILE_FOR_MAJOR]; // + 2*2
__shared__ f64_vec2 p_nv_shared[SIZE_OF_TRI_TILE_FOR_MAJOR]; // + 2*2
// Note: trimmed to vxy since we do not advect v here.
__shared__ f64 p_T_shared[SIZE_OF_TRI_TILE_FOR_MAJOR]; // +1*2
__shared__ long Indextri[MAXNEIGH_d*threadsPerTileMajor]; // +6 doublesworth
// We could increase the occupancy by changing Indextri to load 1 index at a time
// but if we use 63 registers then I think we get only about 512 per SM anyway.
// FIRM PLAN:
// Put triangles outside the outermost row of vertices. Padding that will make us have
// 2x the number of triangles as vertices.
// These triangles will have v = 0 in our setup.
// In general though they serve no purpose?
// We still need to load a periodic flag for each triangle so not loading a general flag didn't achieve much...
// Alternatively, load a "has periodic" flag for this vertex as part of structural:
// instead of two shorts, have short char char = neigh_len,has_periodic,general_flag
// That seems logical - we don't need a general flag to be a short rather than a char.
// Occupancy calculator says try having 192 instead of 128 in a major tile.
// Just have to see empirically when programming is all done.
// 2 ways round: with tri centroids loading in:
// shared 2 * (2 + 2 + 1) + 6 from indextri = 16 doubles equiv!!
// with vertex pos loading in:
// shared 2 + 2*(2 + 1) + 6 + 6 = 20 doubles equiv!!
// This is simply a huge amount of data to have to deal with.
// Was advection a full up routine before?
// Yes - it was so bad we could not fit in IndexNeigh into shared.
// One way is to CONTIGUOUSLY load Indextri on the go:
// put arrays of member0, member1, etc.
// Would it even recognise as a contiguous load? We could force it to.
// That is an interesting alternative but not sure about it.
// On the plus side, bus activity is reduced by doing the way we ARE doing it.
// This way, we reuse Indextri for 3 species instead of loading x3.
// Don't have a sense of really how long bus trips take compared to everything else.
// That will be something to learn this time - with nvprof, nSight.
// How much shared now? About 10 doubles per thread.
// 80*256 vs 48*1024 = 192*256. 2 blocks of 256 at a time.
// We are ending up with too few threads running.
// Solution: Store both nv, and T, thus to compose nvT when needed.
// *****
// It would be more sensible to run thread for each triangle but with same stored data as in this block --- surely?
// *****
long StartMinor = blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR;
long EndMinor = (blockIdx.x+1)*SIZE_OF_TRI_TILE_FOR_MAJOR;
long index = blockIdx.x*blockDim.x + threadIdx.x;
f64_vec3 v_3;
f64_vec2 v1, v2, v_overall, v_overall2; // can drop 1 of these...
f64 area_old, area_new;
structural info = p_info[index];
//if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
// TRY WITH AND WITHOUT THIS.
// LOADING IN UNNECESSARY DATA FOR OUT-OF-DOMAIN .VS.
// IF IT DOES NOT TRIGGER CONTIG ACCESS PROPERLY WITHIN BRANCH.
// ... Probably it _IS_ necessary to load unnecessary data.
// ##################################################################
// The easy and correct thing, if we are only treating those
// that are DOMAIN/OUTERMOST, should be to only call for those blocks.
// ##################################################################
// Behaviour we want:
// Valid edges for traffic: domain-domain
// domain-outermost
// outermost-outermost [optional]
// Not valid: traffic into insulator --- but should get v_tri_r == 0
// traffic into frills --- careful to avoid!
// anything not involving domain/outermost
{
{
nT nT_temp = p_minor_nT_neut[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_3 = p_minor_v_neut[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_overall = p_minor_v_overall[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v2.x = v_3.x-v_overall.x;
v2.y = v_3.y-v_overall.y;
p_nv_shared[threadIdx.x] = nT_temp.n*v2;
// **********************************************************************
// CONSIDER: We promised that we would use the J that appears in the
// A-dot advance formula, for flowing charge. Is that what we are doing?
// We averaged n to the minor tile and multiplied by minor velocity rel to mesh.
// I guess that is okay...
// **********************************************************************
p_T_shared[threadIdx.x] = nT_temp.T;
nT_temp = p_minor_nT_neut[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_3 = p_minor_v_neut[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_overall2 = p_minor_v_overall[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v2.x = v_3.x-v_overall2.x;
v2.y = v_3.y-v_overall2.y;
p_nv_shared[blockDim.x + threadIdx.x] = nT_temp.n*v2;
p_T_shared[blockDim.x + threadIdx.x] = nT_temp.T;
}
p_tri_centroid[threadIdx.x] = p_minor_centroid[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
p_tri_centroid[blockDim.x + threadIdx.x] = p_minor_centroid[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
}
__syncthreads(); // Avoid putting within branch.
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
// h*n*v.dot(edgenormal) is amount of traffic between major cells
// Next job is to compute edge_normal
// And decide whether this is a legit edge for traffic or not.
nT nTsrc = p_nT_neut[index]; // Not needed up here...
area_old = p_area_old[index];
area_new = p_area_new[index];
// hope that by putting here we get contiguous access.
memcpy(Indextri + MAXNEIGH_d*threadIdx.x,
pIndexTri + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(long)); // MAXNEIGH should be chosen to be 12, for 1 full bus.
// memcpy(PBCtri + MAXNEIGH_d*threadIdx.x,
// pPBCTri + MAXNEIGH_d*index,
// MAXNEIGH_d*sizeof(char)); // MAXNEIGH should be chosen to be 12, for 1 full bus.
// By running threads per tri, we'd dispense with Indextri in shared and store solution (towards colour
// array) for vertex tile instead.
// Then we just incur a reload to aggregate the colour array.
// Easiest way:
// Edge involves 2 centres and 2 values of nv etc
f64_vec2 nv1, nvT1, nv2, nvT2, pos1, pos2; // lots of registers ... here 12
short iNeigh1 = info.neigh_len-1; // NOTE it is possible vertex has
// different number of neighs and triangles. What happens?
if (info.flag == OUTERMOST) iNeigh1++; // get to end of array.
short iNeigh2 = 0;
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh1];
if ((indextri >= StartMinor) && (indextri < EndMinor))
{
nv1 = p_nv_shared[indextri-StartMinor];
nvT1 = p_T_shared[indextri-StartMinor]*nv1;
pos1 = p_tri_centroid[indextri-StartMinor];
} else {
nT nT1 = p_minor_nT_neut[indextri];
v_3 = p_minor_v_neut[indextri];
v_overall = p_minor_v_overall[indextri];
v1.x = v_3.x-v_overall.x;
v1.y = v_3.y-v_overall.y;
nv1 = nT1.n*v1;
nvT1 = nT1.T*nv1;
pos1 = p_minor_centroid[indextri];
// Bad news: 3 separate bus journeys.
// We probably spend AT LEAST half our time here. Tile of 12 x 12 -> 144 within, 48 edge.
// We could be sending a more full bus by putting nTv.
// That would reduce costs here by 33%.
// The increased cost would be, that when we create n,T by averaging, we have to write access only
// part of an nvT struct.
// However, it is possible that a lot of these bus journeys take place at the same time that
// other threads are NOT needing a bus journey. Consider that.
// Stick with separate nT,v for now. We may never know, how much faster nvT would have been.
};
//char PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh1];
if (info.has_periodic) {
if ((pos1.x > pos1.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x < -info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv1 = Anticlock_rotate2(nv1); // ANTI is to mean that the tri is on the right, vertex on left.
nvT1 = Anticlock_rotate2(nvT1); // ANTI means apply anticlockwise.
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -pos1.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x > info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv1 = Clockwise_rotate2(nv1);
nvT1 = Clockwise_rotate2(nvT1);
pos1 = Clockwise_rotate2(pos1);
};
// Assume we always find periodic neigh to right/left of 1/4-way line, and same
// for the point itself.
};
f64 mass, heat;
mass = 0.0; heat = 0.0;
#pragma unroll 12
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
{
indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh2];
if ((indextri >= StartMinor) && (indextri < EndMinor))
{
nv2 = p_nv_shared[indextri-StartMinor];
nvT2 = p_T_shared[indextri-StartMinor]*nv2;
pos2 = p_tri_centroid[indextri-StartMinor];
} else {
nT nT2 = p_minor_nT_neut[indextri];
v_3 = p_minor_v_neut[indextri];
f64_vec2 v_overall_ = p_minor_v_overall[indextri];
v2.x = v_3.x-v_overall_.x;
v2.y = v_3.y-v_overall_.y;
nv2 = nT2.n*v2;
nvT2 = nT2.T*nv2;
pos2 = p_minor_centroid[indextri];
};
// Two ways to store periodic: either 3 longs in registers, or,
// as an array of chars in shared memory.
// Alternative: each tri knows if it is periodic and we somehow
// load this alongside tri centroid, as a CHAR4.
if (info.has_periodic) {
if ((pos2.x > pos2.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x < -info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv2 = Anticlock_rotate2(nv2); // ANTI is to mean that the tri is on the right, vertex on left.
nvT2 = Anticlock_rotate2(nvT2); // ANTI means apply anticlockwise.
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -pos2.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x > info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv2 = Clockwise_rotate2(nv2);
nvT2 = Clockwise_rotate2(nvT2);
pos2 = Clockwise_rotate2(pos2);
};
// Assume we always find periodic neigh to right/left of 1/4-way line, and same
// for the point itself.
};
f64_vec2 edgenormal;
edgenormal.x = pos2.y-pos1.y; // 2 is the more anticlockwise one
edgenormal.y = pos1.x-pos2.x;
// At edge of memory, whether we have extra outer tris involved or not,
// counting all edges means we create an edge looking out of the domain.
// It's our choice whether current can flow out of the domain or not.
// Probably best if not.
// So either we need to find a way to import a flag here, OR, set vr to zero (v=0?)
// either in extra-outer tris or in tris just inside.
// The extra-outer tris are sounding more appealing all the time. Let's go for them.
if (1) { // if legitimate edge
f64 flow = 0.5*h*((nv1+nv2).dot(edgenormal));
mass -= flow; // correct? -- compare
flow = 0.5*h*((nvT1+nvT2).dot(edgenormal));
heat -= flow;
};
nvT1 = nvT2;
nv1 = nv2;
pos1 = pos2;
}; // next neigh
// If we did the above with triangle threads that update a solution in shared memory,
// we could switch to half the block doing the following:
mass += nTsrc.n*area_old;
heat += nTsrc.n*nTsrc.T*area_old;
nT nT_out;
nT_out.n = mass/area_new;
nT_out.T = heat/mass;
// Compressive heating:
// We need here new area and old area:
nT_out.T *= (1.0-0.666666666666667*(nT_out.n-nTsrc.n)/nTsrc.n
-0.111111111111111*(nT_out.n-nTsrc.n)*(nT_out.n-nTsrc.n)/(nTsrc.n*nTsrc.n));
// Note: 2 divisions vs 1 call to pow
p_nT_neut_out[index] = nT_out;
} // whether DOMAIN VERTEX --- try with and without.
__syncthreads(); // avoid syncthreads within branch.
// + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
// Ready for next species:
//if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{ // The point here is to reuse both the tri indices and the centroids.
// We should realise that putting a DOMAIN condition on this will make it go wrong:
// we are not loading all ins triangles this way, but we will assume we can
// use them, as far as I can see.
// If we only drop whole blocks that are INNER_VERTEX -- which we should --
// then we should be all right here -- if it's not part of this block then it's loaded
// separately.
nT nT_temp = p_minor_nT_ion[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_3 = p_minor_v_ion[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v2.x = v_3.x-v_overall.x;
v2.y = v_3.y-v_overall.y;
p_nv_shared[threadIdx.x] = nT_temp.n*v2;
p_T_shared[threadIdx.x] = nT_temp.T;
nT_temp = p_minor_nT_ion[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_3 = p_minor_v_ion[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v2.x = v_3.x-v_overall2.x;
v2.y = v_3.y-v_overall2.y;
p_nv_shared[blockDim.x + threadIdx.x] = nT_temp.n*v2;
p_T_shared[blockDim.x + threadIdx.x] = nT_temp.T;
}
__syncthreads(); // Avoid putting within branch.
f64_vec2 nv1, nvT1, pos1, nv2, nvT2, pos2;
nT nT1, nT2;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
// h*n*v.dot(edgenormal) is amount of traffic between major cells
nT nTsrc = p_nT_ion[index];
short iNeigh1 = info.neigh_len-1;
short iNeigh2 = 0;
if (info.flag == OUTERMOST) iNeigh1++; // get to end of array.
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh1];
if ((indextri >= StartMinor) && (indextri < EndMinor))
{
nv1 = p_nv_shared[indextri-StartMinor];
nvT1 = p_T_shared[indextri-StartMinor]*nv1; // extra access to shared - nvm
pos1 = p_tri_centroid[indextri-StartMinor];
} else {
nT1 = p_minor_nT_ion[indextri];
v_3 = p_minor_v_ion[indextri];
v_overall = p_minor_v_overall[indextri];
v1.x = v_3.x-v_overall.x;
v1.y = v_3.y-v_overall.y;
nv1 = nT1.n*v1;
nvT1 = nT1.T*nv1;
pos1 = p_minor_centroid[indextri];
};
//char PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh1];
if (info.has_periodic) {
if ((pos1.x > pos1.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x < -info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv1 = Anticlock_rotate2(nv1); // ANTI is to mean that the tri is on the right, vertex on left.
nvT1 = Anticlock_rotate2(nvT1); // ANTI means apply anticlockwise.
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -pos1.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x > info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv1 = Clockwise_rotate2(nv1);
nvT1 = Clockwise_rotate2(nvT1);
pos1 = Clockwise_rotate2(pos1);
};
// Assume we always find periodic neigh to right/left of 1/4-way line, and same
// for the point itself.
};
f64 mass, heat;
mass = 0.0; heat = 0.0;
#pragma unroll 12
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
// aar - if we have an outer point
// then the number of neighs is not the number of tris
// SO EXPLOIT THIS
// Make sure that the omitted edge is the one that would go between the frill tris.
{
indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh2];
if ((indextri >= StartMinor) && (indextri < EndMinor))
{
nv2 = p_nv_shared[indextri-StartMinor];
nvT2 = p_T_shared[indextri-StartMinor]*nv2;
pos2 = p_tri_centroid[indextri-StartMinor];
} else {
nT2 = p_minor_nT_ion[indextri];
v_3 = p_minor_v_ion[indextri];
f64_vec2 v_overall_ = p_minor_v_overall[indextri];
v2.x = v_3.x-v_overall_.x;
v2.y = v_3.y-v_overall_.y;
f64_vec2 nv2 = nT2.n*v2;
nvT2 = nT2.T*nv2;
pos2 = p_minor_centroid[indextri];
};
if (info.has_periodic) {
if ((pos2.x > pos2.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x < -info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv2 = Anticlock_rotate2(nv2); // ANTI is to mean that the tri is on the right, vertex on left.
nvT2 = Anticlock_rotate2(nvT2); // ANTI means apply anticlockwise.
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -pos2.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x > info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv2 = Clockwise_rotate2(nv2);
nvT2 = Clockwise_rotate2(nvT2);
pos2 = Clockwise_rotate2(pos2);
};
};
f64_vec2 edgenormal;
edgenormal.x = pos2.y-pos1.y; // 2 is the more anticlockwise one
edgenormal.y = pos1.x-pos2.x;
if (1) { // if legitimate edge -- remember we should treat edge the same way from both sides.
f64 flow = 0.5*h*((nv1+nv2).dot(edgenormal));
mass -= flow; // correct? -- compare
flow = 0.5*h*((nvT1+nvT2).dot(edgenormal));
heat -= flow;
};
nvT1 = nvT2;
nv1 = nv2;
pos1 = pos2;
}; // next neigh
mass += nTsrc.n*area_old;
heat += nTsrc.n*nTsrc.T*area_old;
nT nT_out;
nT_out.n = mass/area_new;
nT_out.T = heat/mass;
// Compressive heating:
// We need here new area and old area:
nT_out.T *= (1.0-0.666666666666667*(nT_out.n-nTsrc.n)/nTsrc.n
-0.111111111111111*(nT_out.n-nTsrc.n)*(nT_out.n-nTsrc.n)/(nTsrc.n*nTsrc.n));
p_nT_ion_out[index] = nT_out;
};
// The point here is to reuse both the tri indices and the centroids.
// Ready for next species:
//if (info.flag == DOMAIN_VERTEX) {
// TRY WITH AND WITHOUT THIS.
{
nT nT_temp = p_minor_nT_elec[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_3 = p_minor_v_elec[
blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v2.x = v_3.x-v_overall.x;
v2.y = v_3.y-v_overall.y;
nv2 = nT2.n*v2;
nvT2 = nT2.T*nv2;
p_nv_shared[threadIdx.x] = nT_temp.n*v2;
p_T_shared[threadIdx.x] = nT_temp.T;
nT_temp = p_minor_nT_elec[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v_3 = p_minor_v_elec[
blockDim.x + blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
v2.x = v_3.x-v_overall2.x;
v2.y = v_3.y-v_overall2.y;
p_nv_shared[blockDim.x + threadIdx.x] = nT_temp.n*v2;
p_T_shared[blockDim.x + threadIdx.x] = nT_temp.T;
}
//}
__syncthreads(); // Avoid putting within branch.
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
nT nTsrc = p_nT_elec[index];
short iNeigh1 = info.neigh_len-1;
short iNeigh2 = 0;
if (info.flag == OUTERMOST) iNeigh1++; // get to end of array.
long indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh1];
if ((indextri >= StartMinor) && (indextri < EndMinor))
{
nv1 = p_nv_shared[indextri-StartMinor];
nvT1 = p_T_shared[indextri-StartMinor]*nv1; // extra access to shared - nvm
pos1 = p_tri_centroid[indextri-StartMinor];
} else {
nT1 = p_minor_nT_elec[indextri];
v_3 = p_minor_v_elec[indextri];
v_overall = p_minor_v_overall[indextri];
v1.x = v_3.x-v_overall.x;
v1.y = v_3.y-v_overall.y;
nv1 = nT1.n*v1;
nvT1 = nT1.T*nv1;
pos1 = p_minor_centroid[indextri];
};
//char PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh1];
if (info.has_periodic) {
if ((pos1.x > pos1.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x < -info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv1 = Anticlock_rotate2(nv1); // ANTI is to mean that the tri is on the right, vertex on left.
nvT1 = Anticlock_rotate2(nvT1); // ANTI means apply anticlockwise.
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -pos1.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x > info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv1 = Clockwise_rotate2(nv1);
nvT1 = Clockwise_rotate2(nvT1);
pos1 = Clockwise_rotate2(pos1);
};
// Assume we always find periodic neigh to right/left of 1/4-way line, and same
// for the point itself.
};
f64 mass, heat;
mass = 0.0; heat = 0.0;
#pragma unroll 12
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
{
indextri = Indextri[MAXNEIGH_d*threadIdx.x + iNeigh2];
if ((indextri >= StartMinor) && (indextri < EndMinor))
{
nv2 = p_nv_shared[indextri-StartMinor];
nvT2 = p_T_shared[indextri-StartMinor]*nv2;
pos2 = p_tri_centroid[indextri-StartMinor];
} else {
nT2 = p_minor_nT_elec[indextri];
v_3 = p_minor_v_elec[indextri];
f64_vec2 v_overall_ = p_minor_v_overall[indextri];
v2.x = v_3.x-v_overall_.x;
v2.y = v_3.y-v_overall_.y;
nv2 = nT2.n*v2;
nvT2 = nT2.T*nv2;
pos2 = p_minor_centroid[indextri];
};
if (info.has_periodic) {
if ((pos2.x > pos2.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x < -info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv2 = Anticlock_rotate2(nv2); // ANTI is to mean that the tri is on the right, vertex on left.
nvT2 = Anticlock_rotate2(nvT2); // ANTI means apply anticlockwise.
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -pos2.y*GRADIENT_X_PER_Y*0.5) &&
(info.pos.x > info.pos.y*GRADIENT_X_PER_Y*0.5))
{
nv2 = Clockwise_rotate2(nv2);
nvT2 = Clockwise_rotate2(nvT2);
pos2 = Clockwise_rotate2(pos2);
};
// Assume we always find periodic neigh to right/left of 1/4-way line, and same
// for the point itself.
};
f64_vec2 edgenormal;
edgenormal.x = pos2.y-pos1.y; // 2 is the more anticlockwise one
edgenormal.y = pos1.x-pos2.x;
if (1) { // if legitimate edge --- how to know if we are looking into outermost??
// We are not loading info about neighbours. Yet it is only the neigh that knows
// it is OUTERMOST.
f64 flow = 0.5*h*((nv1+nv2).dot(edgenormal));
mass -= flow; // correct? -- compare
flow = 0.5*h*((nvT1+nvT2).dot(edgenormal));
heat -= flow;
// Meanwhile what if we are looking through insulator.
// We should find there that we have insisted on v_r=0 and so v.dot(edgenormal) roughly = 0.
// But we need to consider about outermost what to do.
// We don't really want to be arbitrarily losing or gaining charge.
// The answer is to include OUTERMOST flag, but, disinclude the outermost edge of an OUTERMOST vertex.
// This can happen automatically by a CAREFUL NUMBERING of outermost tris and neighs.
// Does it disagree with the numbering we previously considered canonical? Probably yes --> edit through :-/
};
nvT1 = nvT2;
nv1 = nv2;
pos1 = pos2;
}; // next neigh
mass += nTsrc.n*area_old;
heat += nTsrc.n*nTsrc.T; // ??? ***
nT nT_out;
nT_out.n = mass/area_new;
nT_out.T = heat/mass;
// Compressive heating:
// We need here new area and old area:
nT_out.T *= (1.0-0.666666666666667*(nT_out.n-nTsrc.n)/nTsrc.n
-0.111111111111111*(nT_out.n-nTsrc.n)*(nT_out.n-nTsrc.n)/(nTsrc.n*nTsrc.n));
p_nT_elec_out[index] = nT_out;
};
}
__global__ void Kernel_Populate_A_frill(
CHAR4 * __restrict__ p_tri_info,
f64_vec3 * __restrict__ p_A, // update own, read others
f64_vec2 * __restrict__ p_tri_centroid,
//LONG3 * __restrict__ p_corner_index
LONG3 * __restrict__ p_tri_neigh_index)
{
//long index = (blockIdx.x + BLOCK_START_OF_FRILL_SEARCH_d)*blockDim.x + threadIdx.x;
long index = blockIdx.x*blockDim.x + threadIdx.x;
// load the two corner indices
CHAR4 perinfo = p_tri_info[index];
if (perinfo.flag == OUTER_FRILL) {
//LONG3 cornerindex = p_corner_index[index];
//A0 = p_A[BEGINNING_OF_CENTRAL + cornerindex.i1];
//A1 = p_A[BEGINNING_OF_CENTRAL + cornerindex.i2];
//if (perinfo.per0 == NEEDS_CLOCK) A0 = Clockwise_rotate2(A0);
//if (perinfo.per1 == NEEDS_CLOCK) A1 = Clockwise_rotate2(A1);
//if (perinfo.per0 == NEEDS_ANTI) A0 = Anticlock_rotate2(A0);
//if (perinfo.per1 == NEEDS_ANTI) A1 = Anticlock_rotate2(A1);
//p_A[index] = 0.5*(A0 + A1);
// Just do this instead:
LONG3 neighindex = p_tri_neigh_index[index];
f64_vec2 cent = p_tri_centroid[index];
f64_vec2 centneigh = p_tri_centroid[neighindex.i1];
f64_vec3 A = p_A[neighindex.i1];
// Axy decrease radially:
f64 factor = sqrt((centneigh.x*centneigh.x+centneigh.y*centneigh.y)/
(cent.x*cent.x+cent.y*cent.y));
A.x *= factor;
A.y *= factor;
p_A[index] = A;
};
if (perinfo.flag == INNER_FRILL) {
//LONG3 cornerindex = p_corner_index[index];
//A0 = p_A[BEGINNING_OF_CENTRAL + cornerindex.i1];
//A1 = p_A[BEGINNING_OF_CENTRAL + cornerindex.i2];
//if (perinfo.per0 == NEEDS_CLOCK) A0 = Clockwise_rotate2(A0);
//if (perinfo.per1 == NEEDS_CLOCK) A1 = Clockwise_rotate2(A1);
//if (perinfo.per0 == NEEDS_ANTI) A0 = Anticlock_rotate2(A0);
//if (perinfo.per1 == NEEDS_ANTI) A1 = Anticlock_rotate2(A1);
//p_A[index] = 0.5*(A0 + A1);
// Just do this instead:
LONG3 neighindex = p_tri_neigh_index[index];
f64_vec2 cent = p_tri_centroid[index];
f64_vec2 centneigh = p_tri_centroid[neighindex.i1];
f64_vec3 A = p_A[neighindex.i1];
// Axy decrease radially:
f64 factor = sqrt((cent.x*cent.x+cent.y*cent.y)/
(centneigh.x*centneigh.x+centneigh.y*centneigh.y));
A.x *= factor;
A.y *= factor;
p_A[index] = A;
};
}
// The same sort of routine as the following will be needed to anti-advect A,Adot,phi,phidot.
// Bad news; no way to avoid though... could we interpolate to new values? Is that really much different.
// Crude estimate of grad is okay.
__global__ void Kernel_Compute_Grad_A_minor_antiadvect(
f64_vec3 * __restrict__ p_A_tri, // for creating grad
f64_vec3 * __restrict__ p_A_vert, //
f64 h,
f64_vec2 * __restrict__ p_v_overall, // hv = amt to anti-advect
structural * __restrict__ p_info, //
f64_vec2 * __restrict__ p_tri_centroid, //
CHAR4 * __restrict__ p_tri_perinfo, //
CHAR4 * __restrict__ p_tri_per_neigh,
LONG3 * __restrict__ p_corner_index, //
LONG3 * __restrict__ p_neigh_tri_index, //
long * __restrict__ p_IndexTri, // we said carry on using this for now.
bool bAdd,
f64_vec3 * __restrict__ p_Addition_Rate,
// output:
f64_vec3 * __restrict__ p_A_out // fill in for both tri and vert...
)
{
__shared__ f64_vec3 A_tri[threadsPerTileMinor];
__shared__ f64_vec2 tri_centroid[threadsPerTileMinor]; // 5
__shared__ f64_vec3 A_vert[SIZE_OF_MAJOR_PER_TRI_TILE]; // +1.5
__shared__ f64_vec2 vertex_pos[SIZE_OF_MAJOR_PER_TRI_TILE];
// If we want 512 threads/SM, 12 doubles in shared per thread is limit.
// We can accommodate 12 .. so 6 per major in addition to this but not when we have shared_per.
// Well we could limit it to 10 tris but it's asking for trouble.
// 6 longs = 3 doublesworth per thread
__shared__ long IndexTri[SIZE_OF_MAJOR_PER_TRI_TILE*MAXNEIGH_d]; // +3
// Do first with 3 dimensions Axyz at once - may be slower but we'll see.
long index = blockIdx.x*blockDim.x + threadIdx.x;
long StartTri = blockIdx.x*blockDim.x; // can replace this one.
long StartMajor = SIZE_OF_MAJOR_PER_TRI_TILE*blockIdx.x; // can replace this one.
// could replace with #define here.
A_tri[threadIdx.x] = p_A_tri[index];
tri_centroid[threadIdx.x] = p_tri_centroid[index];
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE) {
A_vert[threadIdx.x] = p_A_vert[SIZE_OF_MAJOR_PER_TRI_TILE*blockIdx.x + threadIdx.x];
structural info = p_info[SIZE_OF_MAJOR_PER_TRI_TILE*blockIdx.x + threadIdx.x];
vertex_pos[threadIdx.x] = info.pos;
}
// shared_per[threadIdx.x] = perinfo.per0+perinfo.per1+perinfo.per2; // if periodic tri then neigh will need to be able to know.
// note that we have to make sure CHAR4 takes up 32 bits not 4 x 32.
// Is that the root of our problems with footprint?
// If so, what should we do? Bitwise operations on a char?
__syncthreads();
f64_vec2 gradAx(0.0,0.0);
f64_vec2 gradAy(0.0,0.0);
f64_vec2 gradAz(0.0,0.0);
f64_vec2 v_overall = p_v_overall[index];
CHAR4 perinfo = p_tri_perinfo[index];
{
// Allow it to run through and produce nonsense for frills....
CHAR4 tri_rotate = p_tri_per_neigh[index];
LONG3 corner_index = p_corner_index[index];
LONG3 neightri = p_neigh_tri_index[index];
// Note that A, as well as position, has to be ROTATED to make a contiguous image.
// This tri minor has 3 edges with triangles and 3 edges with centrals.
f64 area = 0.0;
f64_vec2 pos0(9.0,19.0), pos1 (1.0,2.0), pos2(4.0,2.0);
// f64_vec3 Avert0,Avert1,Avert2;
// We need 4 values at a time in order to do a side.
// We don't need to have all 7 values (3+ 3 + itself)
// So we'd be better just to load one quadrilateral's doings at a time, given the paucity of register and L1 space.
// Either we store all 7x3 A-values at once + 7 positions. Or we use 4 and 4 positions at once.
// Bear in mind a partial saving might yield big benefits.
// HAZARD: we don't know the ordering of the points.
// Halfway house: for simplicity store all the positions already loaded.
// A does not load from the same place anyway.
// Then go round the quadrilaterals.
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos0 = vertex_pos[corner_index.i1-StartMajor];
} else {
structural info = p_info[corner_index.i1];
pos0 = info.pos;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos1 = vertex_pos[corner_index.i2-StartMajor];
} else {
structural info = p_info[corner_index.i2];
pos1 = info.pos;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos2 = vertex_pos[corner_index.i3-StartMajor];
} else {
structural info = p_info[corner_index.i3];
pos2 = info.pos;
}
if (perinfo.per0+perinfo.per1+perinfo.per2 == 0) {
} else {
// In this case which ones are periodic?
// Should we just store per flags?
// How it should work:
// CHAR4 perinfo: periodic, per0, per1, per2;
if (perinfo.per0 == NEEDS_ANTI) {
pos0 = Anticlock_rotate2(pos0);
}
if (perinfo.per0 == NEEDS_CLOCK) { // this means the corner is off the clockwise side. Therefore anticlockwise rotated.
pos0 = Clockwise_rotate2(pos0);
}
if (perinfo.per1 == NEEDS_ANTI) {
pos1 = Anticlock_rotate2(pos1);
}
if (perinfo.per1 == NEEDS_CLOCK) {
pos1 = Clockwise_rotate2(pos1);
}
if (perinfo.per2 == NEEDS_ANTI) {
pos2 = Anticlock_rotate2(pos2);
}
if (perinfo.per2 == NEEDS_CLOCK) {
pos2 = Clockwise_rotate2(pos2);
}
};
// It worked with none of the calcs in. Now we bring back the above. Still works
f64_vec2 u0(1.0,2.0),
u1(0.0,2.0),
u2(3.0,1.0);
// to be the positions of neighbouring centroids
// CHAR4 tri_rotate; // 4 chars but really using 3
// tri_rotate.per0 = 0; tri_rotate.per1 = 0; tri_rotate.per2 = 0;
char periodic = perinfo.per0+perinfo.per1+perinfo.per2;
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri+blockDim.x))
{
u0 = tri_centroid[neightri.i1-StartTri];
} else {
u0 = p_tri_centroid[neightri.i1];
};
if (tri_rotate.per0 == NEEDS_CLOCK)
u0 = Clockwise_rotate2(u0);
if (tri_rotate.per0 == NEEDS_ANTI)
u0 = Anticlock_rotate2(u0);
if ((neightri.i2 >= StartTri) && (neightri.i2 < StartTri+blockDim.x))
{
u1 = tri_centroid[neightri.i2-StartTri];
} else {
u1 = p_tri_centroid[neightri.i2];
}
if (tri_rotate.per1 == NEEDS_CLOCK)
u1 = Clockwise_rotate2(u1);
if (tri_rotate.per1 == NEEDS_ANTI)
u1 = Anticlock_rotate2(u1);
if ((neightri.i3 >= StartTri) && (neightri.i3 < StartTri+blockDim.x))
{
u2 = tri_centroid[neightri.i3-StartTri];
} else {
u2 = p_tri_centroid[neightri.i3];
}
if (tri_rotate.per2 == NEEDS_CLOCK)
u2 = Clockwise_rotate2(u2);
if (tri_rotate.per2 == NEEDS_ANTI)
u2 = Anticlock_rotate2(u2);
// still works
// ............................................................................................
// . I think working round with 4 has a disadvantage: if we get back around to one that is off-tile,
// we have to load it all over again. Still that is only 1 out of 7 that gets duplicated.
// Here is the best thing I can come up with: store 7 positions. That is already
// 28 longs' worth... each A-value uses 6 of the 7 positions to have an effect.
// Load each A-value at a time and recalc shoelace for 3 quadrilaterals. ??
// Too complicated.
// If we store all positions, can we finish with each A as we handle it? Yes but let's not.
//f64_vec2 ourpos = tri_centroid[threadIdx.x]; // can try with and without this assignment to variable
//f64_vec3 A0 = A_tri[threadIdx.x]; // can try with and without this assignment to variable
f64_vec3 A_1(0.0,0.0,0.0),
A_out(0.0,0.0,0.0),
A_2(0.0,0.0,0.0);
// Our A: A_tri[threadIdx.x]
// Now fill in the A values:
// ____________________________
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_1 = A_vert[corner_index.i2-StartMajor];
} else {
A_1 = p_A_vert[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_2 = A_vert[corner_index.i3-StartMajor];
} else {
A_2 = p_A_vert[corner_index.i3];
}
if (periodic == 0) {
} else {
if (perinfo.per1 == NEEDS_ANTI) {
A_1 = Anticlock_rotate3(A_1);
}
if (perinfo.per1 == NEEDS_CLOCK) {
A_1 = Clockwise_rotate3(A_1);
}
if (perinfo.per2 == NEEDS_ANTI) {
A_2 = Anticlock_rotate3(A_2);
};
if (perinfo.per2 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
};
}
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + blockDim.x))
{
A_out = A_tri[neightri.i1-StartTri];
} else {
A_out = p_A_tri[neightri.i1];
}
if (tri_rotate.per0 != 0) {
if (tri_rotate.per0 == NEEDS_CLOCK) {
A_out = Clockwise_rotate3(A_out);
} else {
A_out = Anticlock_rotate3(A_out);
};
};
// ======================================================
// shoelace = (ourpos.x-u0.x)*(pos1.y-pos2.y)
// + (pos1.x-pos2.x)*(u0.y-ourpos.y); // if u0 is opposite point 0
// clock.x-anti.x
// We are now going to put the corners of the minor cell at
// e.g. 1/3(pos1 + u0 + ourpos)
// rather than at
// e.g. 2/3 pos1 + 1/3 pos2
//corner1 = 0.3333333*(pos1+u0+ourpos)
//corner2 = 0.3333333*(pos2+u0+ourpos)
//edgenormal.x = corner1.y-corner2.y = 0.333333(pos1.y-pos2.y) -- so no change here
f64_vec2 edgenormal;
edgenormal.x = (pos1.y-pos2.y)*0.333333333333333;
edgenormal.y = (pos2.x-pos1.x)*0.333333333333333; // cut off 1/3 of the edge
if (edgenormal.dot(pos0-pos1) > 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// Think about averaging at typical edge.
// Using 5/12:
// corners are say equidistant from 3 points, so on that it would be 1/6
// but allocate the middle half of the bar to 50/50 A_tri[threadIdx.x]+Aout.
// tried without A_tri[threadIdx.x].z+ ...
gradAx += (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A_tri[threadIdx.x].x+A_out.x))*edgenormal;
gradAy += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A_tri[threadIdx.x].y+A_out.y))*edgenormal;
gradAz += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A_tri[threadIdx.x].z+A_out.z))*edgenormal;
// Now that we put minor corners at (1/3)(2 centroids+vertex), this makes even more sense.
area += 0.333333333333333*
(0.5*(pos1.x+pos2.x)+tri_centroid[threadIdx.x].x+u0.x)*edgenormal.x;
// NOT CONSISTENT BEHAVIOUR:
// TO HERE WAS ENOUGH TO FAIL.
// ASSUMING ALL VALUES VALID (consider edge of memory a different case):
// From here on is where it gets thorny as we no longer map A_1 to vertex 1.
// %%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%%
A_1 = A_out; // now A_1 points at tri neigh 0
A_out = A_2; // now looking at vertex 2
// A_2 is now to point at tri neigh 1
if ((neightri.i2 >= StartTri) && (neightri.i2 < StartTri + blockDim.x))
{
A_2 = A_tri[neightri.i2-StartTri];
} else {
A_2 = p_A_tri[neightri.i2];
}
if (tri_rotate.per1 != 0) {
if (tri_rotate.per1 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} else {
A_2 = Anticlock_rotate3(A_2);
};
};
// shoelace = (ourpos.x-pos2.x)*(u0.y-u1.y)
// + (u0.x-u1.x)*(pos2.y-ourpos.y);
//x1 = (2/3)pos2+(1/3)pos0;
//x2 = (2/3)pos2+(1/3)pos1;
//edgenormal.x = (x1.y-x2.y);
//edgenormal.y = (x2.x-x1.x); // cut off 1/3 of the edge
edgenormal.x = 0.333333333333333*(pos0.y-pos1.y);
edgenormal.y = 0.333333333333333*(pos1.x-pos0.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos2-pos1) < 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
gradAx += (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A_tri[threadIdx.x].x+A_out.x))*edgenormal;
gradAy += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A_tri[threadIdx.x].y+A_out.y))*edgenormal;
gradAz += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A_tri[threadIdx.x].z+A_out.z))*edgenormal;
area += 0.333333333333333*(0.5*(u0.x+u1.x)
+tri_centroid[threadIdx.x].x+pos2.x)*edgenormal.x;
A_1 = A_out; // now A_1 points at corner 2
A_out = A_2; // now points at tri 1
// A_2 to point at corner 0
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_2 = A_vert[corner_index.i1-StartMajor];
} else {
A_2 = p_A_vert[corner_index.i1];
}
if (perinfo.per0 != 0) {
if (perinfo.per0 == NEEDS_ANTI) {
A_2 = Anticlock_rotate3(A_2);
}
if (perinfo.per0 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
}
}
//shoelace = (ourpos.x-u1.x)*(pos2.y-pos0.y) // clock.y-anti.y
// + (pos2.x-pos0.x)*(u1.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos0.y-pos2.y);
edgenormal.y = 0.333333333333333*(pos2.x-pos0.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos1-pos0) > 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
gradAx += (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A_tri[threadIdx.x].x+A_out.x))*edgenormal;
gradAy += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A_tri[threadIdx.x].y+A_out.y))*edgenormal;
gradAz += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A_tri[threadIdx.x].z+A_out.z))*edgenormal;
area += 0.333333333333333*(0.5*(pos2.x+pos0.x)+tri_centroid[threadIdx.x].x+u1.x)*edgenormal.x;
A_1 = A_out;
A_out = A_2;
// A_2 is now to point at tri neigh 2
if ((neightri.i3 >= StartTri) && (neightri.i3 < StartTri + blockDim.x))
{
A_2 = A_tri[neightri.i3-StartTri];
} else {
A_2 = p_A_tri[neightri.i3];
}
if (tri_rotate.per2 != 0) {
if (tri_rotate.per2 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} else {
A_2 = Anticlock_rotate3(A_2);
};
};
// f64 shoelace = (ourpos.x-pos0.x)*(u1.y-u2.y) // clock.y-anti.y
// + (u1.x-u2.x)*(pos0.y-ourpos.y);
// Where is it used?
edgenormal.x = 0.333333333333333*(pos1.y-pos2.y);
edgenormal.y = 0.333333333333333*(pos2.x-pos1.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos0-pos1) < 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
gradAx += (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A_tri[threadIdx.x].x+A_out.x))*edgenormal;
gradAy += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A_tri[threadIdx.x].y+A_out.y))*edgenormal;
gradAz += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A_tri[threadIdx.x].z+A_out.z))*edgenormal;
area += 0.333333333333333*(0.5*(u2.x+u1.x)+tri_centroid[threadIdx.x].x+pos0.x)*edgenormal.x;
A_1 = A_out;
A_out = A_2;
// A2 to be for corner 1
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_2 = A_vert[corner_index.i2-StartMajor];
} else {
A_2 = p_A_vert[corner_index.i2];
}
if (perinfo.per1 != 0) {
if (perinfo.per1 == NEEDS_ANTI) {
A_2 = Anticlock_rotate3(A_2);
}
if (perinfo.per1 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
}
}
//shoelace = (ourpos.x-u2.x)*(pos0.y-pos1.y) // clock.y-anti.y
//+ (pos0.x-pos1.x)*(u2.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos1.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos1.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos2-pos1) > 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
gradAx += (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A_tri[threadIdx.x].x+A_out.x))*edgenormal;
gradAy += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A_tri[threadIdx.x].y+A_out.y))*edgenormal;
gradAz += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A_tri[threadIdx.x].z+A_out.z))*edgenormal;
area += 0.333333333333333*(0.5*(pos0.x+pos1.x)+tri_centroid[threadIdx.x].x+u2.x)*edgenormal.x;
A_1 = A_out;
A_out = A_2;
// A2 to be for tri 0
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + blockDim.x))
{
A_2 = A_tri[neightri.i1-StartTri];
} else {
A_2 = p_A_tri[neightri.i1];
}
if (tri_rotate.per0 != 0) {
if (tri_rotate.per0 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} else {
A_2 = Anticlock_rotate3(A_2);
};
};
//shoelace = (ourpos.x-pos1.x)*(u2.y-u0.y) // clock.y-anti.y
// + (u2.x-u0.x)*(pos1.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos2.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos2.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos1-pos2) < 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
gradAx += (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A_tri[threadIdx.x].x+A_out.x))*edgenormal;
gradAy += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A_tri[threadIdx.x].y+A_out.y))*edgenormal;
gradAz += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A_tri[threadIdx.x].z+A_out.z))*edgenormal;
area += 0.333333333333333*(0.5*(u0.x+u2.x)+tri_centroid[threadIdx.x].x+pos1.x)*edgenormal.x;
// CHECKED ALL THAT
gradAx /= area;
gradAy /= area;
gradAz /= area;
}
// Now we have to do something about anti-advecting:
if ((perinfo.flag == DOMAIN_TRIANGLE) || (perinfo.flag == CROSSING_INS)) // otherwise the centroid can be assumed not moving??
{
f64_vec3 anti_Advect;
anti_Advect.x = h*v_overall.dot(gradAx);
anti_Advect.y = h*v_overall.dot(gradAy);
anti_Advect.z = h*v_overall.dot(gradAz);
p_A_out[index] += anti_Advect;
}
// Similar routine will be needed to create grad A ... or Adot ... what a waste of calcs.
// Is there a more sensible way: only do a mesh move every 10 steps -- ??
// Then what do we do on the intermediate steps -- that's a problem -- flowing Eulerian fluid
// will give the right change in pressure, but then mesh has to catch up. Still that might be a thought.
// Next consideration: Lap A on central.
// Idea for doing at same time: (don't do it -- too much atomicAdd, I do not trust)
// ___ only certain major cells "belong" to this tri tile.
// ___ write to a given output from our total effect coming from this tile's tris.
// ____ when we hit a central cell outside this tile, send it atomicAdd to an array
// that collects up all the extra contribs to it.
// __ then we just reload, sum 2 things and divide
// However, atomicAdd fp64 only exists on Compute 6.0 :-(
// Workaround taken from http://stackoverflow.com/questions/16077464/atomicadd-for-double-on-gpu
// Eventually decided not to use but to carry on with half the threads to target centrals in this routine.
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE) {
index = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x;
structural info = p_info[index];
if (info.flag == DOMAIN_VERTEX) {
// Does branching disrupt contiguity???
f64_vec2 v_overall = p_v_overall[BEGINNING_OF_CENTRAL + index];
memcpy(IndexTri+threadIdx.x*MAXNEIGH_d, p_IndexTri+index*MAXNEIGH_d,
sizeof(long)*MAXNEIGH_d);
f64_vec3 A0 = A_vert[threadIdx.x]; // can ditch
f64_vec2 u0 = vertex_pos[threadIdx.x];
f64_vec3 A1(0.0,0.0,0.0),A2(0.0,0.0,0.0),A3(0.0,0.0,0.0);
f64_vec2 u1(0.0,0.0),u2(1.1,1.1),u3(1.0,2.0);
f64 //shoelace,
area = 0.0;
f64_vec2 edgenormal;
// As before we need 4 A values and positions at a time. Now 3 all come from tris.
gradAx.x = 0.0; gradAx.y = 0.0;
gradAy.x = 0.0; gradAy.y = 0.0;
gradAz.x = 0.0; gradAz.y = 0.0;
// Note that we found out, unroll can be slower if registers are used up (!) CAUTION:
// Initial situation: inext = 1, i = 0, iprev = -1
long iindextri = IndexTri[threadIdx.x*MAXNEIGH_d+info.neigh_len-1];
// BEWARE OF FRILLED VERTCELLS: neigh_len < tri_len ??
if ((iindextri >= StartTri) && (iindextri < StartTri +
blockDim.x)) // matching code above to see what happens
// threadsPerTileMinor))
{
// DOES NOT WORK WITH 2 LINES HERE.
A3 = A_tri[iindextri-StartTri]; // this breaks it
u3 = tri_centroid[iindextri-StartTri]; // this breaks it
} else {
A3 = p_A_tri[iindextri];
u3 = p_tri_centroid[iindextri];
};
// The peculiar thing is that a very similar read happens earlier on.
// INCONSISTENT BEHAVIOUR: now does not work with all above reads commented.
// FAILS IF START COMMENT HERE
if (info.has_periodic != 0) {
if ((u3.x > u3.y*GRADIENT_X_PER_Y*0.5) && (u3.x < -0.5*GRADIENT_X_PER_Y*u3.y))
{
A3 = Anticlock_rotate3(A3);
u3 = Anticlock_rotate2(u3);
};
if ((u3.x < -u3.y*GRADIENT_X_PER_Y*0.5) && (u3.x > 0.5*GRADIENT_X_PER_Y*u3.y))
{
A3 = Clockwise_rotate3(A3);
u3 = Clockwise_rotate2(u3);
};
}
iindextri = IndexTri[threadIdx.x*MAXNEIGH_d]; // + 0
if ((iindextri >= StartTri) && (iindextri < StartTri + threadsPerTileMinor))
{
A2 = A_tri[iindextri-StartTri];
u2 = tri_centroid[iindextri-StartTri];
} else {
A2 = p_A_tri[iindextri];
u2 = p_tri_centroid[iindextri];
};
if (info.has_periodic != 0) {
if ((u2.x > u2.y*GRADIENT_X_PER_Y*0.5) && (u2.x < -0.5*GRADIENT_X_PER_Y*u2.y))
{
A2 = Anticlock_rotate3(A2);
u2 = Anticlock_rotate2(u2);
};
if ((u2.x < -u2.y*GRADIENT_X_PER_Y*0.5) && (u2.x > 0.5*GRADIENT_X_PER_Y*u2.y))
{
A2 = Clockwise_rotate3(A2);
u2 = Clockwise_rotate2(u2);
};
}
int inext = 0; // will be ++ straight away.
#pragma unroll MAXNEIGH_d
for (int i = 0; i < info.neigh_len; i++) // WHY ARE WE GOING TO MAXNEIGH_d ?
{
inext++;
if (inext == info.neigh_len) inext = 0;
// Bear in mind, this would not work for OUTERMOST.
iindextri = IndexTri[threadIdx.x*MAXNEIGH_d+inext];
if ((iindextri >= StartTri) && (iindextri < StartTri + threadsPerTileMinor))
{
A1 = A_tri[iindextri-StartTri];
u1 = tri_centroid[iindextri-StartTri];
} else {
A1 = p_A_tri[iindextri];
u1 = p_tri_centroid[iindextri];
};
if (info.has_periodic != 0) {
if ((u1.x > 0.5*GRADIENT_X_PER_Y*u1.y) && (u1.x < -0.5*GRADIENT_X_PER_Y*u1.y))
{
A1 = Anticlock_rotate3(A1);
u1 = Anticlock_rotate2(u1);
};
if ((u1.x < -0.5*GRADIENT_X_PER_Y*u1.y) && (u1.x > 0.5*GRADIENT_X_PER_Y*u1.y))
{
A1 = Clockwise_rotate3(A1);
u1 = Clockwise_rotate2(u1);
};
}
// So how are we going to get the corners of central cell?
// Do we change the plan and make them the average of 2 tri centroids and the vertex?
// That is one way, not sure I'm keen on it, not having thought about it.
// YES, that is what we have to do.
// ==============
// edge_cnr1 = (u1+u2+u0)*0.333333333333333;
// edge_cnr2 = (u3+u2+u0)*0.333333333333333;
edgenormal.x = 0.333333333333333*(u1.y-u3.y);
edgenormal.y = 0.333333333333333*(u3.x-u1.x);
// edgenormal to point at u2:
if ((u2-u1).dot(edgenormal) < 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
}
//shoelace = (u0.x-u2.x)*(u1.y-u3.y) +
// (u1.x-u3.x)*(u2.y-u0.y);
gradAx += (TWELTH*(A1.x+A3.x)+FIVETWELTHS*(A0.x+A2.x))*edgenormal;
gradAy += (TWELTH*(A1.y+A3.y)+FIVETWELTHS*(A0.y+A2.y))*edgenormal;
gradAz += (TWELTH*(A1.z+A3.z)+FIVETWELTHS*(A0.z+A2.z))*edgenormal;
area += (0.3333333333333333*(0.5*(u1.x+u3.x)+u2.x+u0.x))*edgenormal.x;
// ( grad x )_x
// move round A values and positions:
// ----------------------------------
A3 = A2;
u3 = u2;
A2 = A1;
u2 = u1;
}
// COMMENTED ENDING HERE FOR IT TO WORK
gradAx /= area;
gradAy /= area;
gradAz /= area;
f64_vec3 anti_Advect;
anti_Advect.x = h*v_overall.dot(gradAx);
anti_Advect.y = h*v_overall.dot(gradAy);
anti_Advect.z = h*v_overall.dot(gradAz);
// Save off:
if (bAdd) {
anti_Advect += h*p_Addition_Rate[BEGINNING_OF_CENTRAL + index];
}
p_A_out[BEGINNING_OF_CENTRAL + index] += anti_Advect; // best way may be: if we know start of central stuff, can send
}; // ONLY FOR DOMAIN VERTEX
}; // IS THREAD IN THE FIRST HALF OF THE BLOCK
// =============================================================================
// Understand the following important fact:
// If you will use 63 registers (and this routine surely will -
// we have positions 7 x 2 x 2 = 28 registers, A 7 x 3 x 2 = 35 registers
// -- though we could try taking into account, 1 dimension at a time)
// Then the max thread throughput per SM is 512 which means that we will get
// no penalty from using up to 12 doubles in shared memory per thread.
// =============================================================================
// That does mean L1 has room for only 4 doubles. It is not big compared to registry itself.
}
__global__ void Kernel_Compute_Lap_A_and_Grad_A_to_get_B_on_all_minor(
f64_vec3 * __restrict__ p_A_tri,
f64_vec3 * __restrict__ p_A_vert,
structural * __restrict__ p_info,
f64_vec2 * __restrict__ p_tri_centroid,
CHAR4 * __restrict__ p_tri_perinfo,
CHAR4 * __restrict__ p_tri_per_neigh,
LONG3 * __restrict__ p_corner_index,
LONG3 * __restrict__ p_neigh_tri_index,
long * __restrict__ p_IndexTri,
// output:
f64_vec3 * __restrict__ p_Lap_A,
f64_vec3 * __restrict__ p_Lap_A_central,
f64_vec3 * __restrict__ p_B,
f64_vec3 * __restrict__ p_B_central // could just infer
)
{
// The logic here. Lap A requires A on quadrilateral over each honey-edge.
// Therefore we need both tri and vertex values of A at once.
// The same applies for Lap_A_central as for Lap_A_tri.
// Therefore we carry on to do Lap_A_central using the same data ; in fact we can
// avoid loading Indextri because we work on the result in shared memory as we are doing tris.
__shared__ f64_vec3 A_tri[threadsPerTileMinor];
__shared__ f64_vec2 tri_centroid[threadsPerTileMinor]; // 5
__shared__ f64_vec3 A_vert[SIZE_OF_MAJOR_PER_TRI_TILE]; // +1.5
__shared__ f64_vec2 vertex_pos[SIZE_OF_MAJOR_PER_TRI_TILE];// altogether 9 doubles per thread so far here.
// __shared__ short shared_per[threadsPerTileMinor]; // short easier to access than char maybe.
// If we want 512 threads/SM, 12 doubles in shared per thread is limit.
// We can accommodate 12 .. so 6 per major in addition to this but not when we have shared_per.
// Well we could limit it to 10 tris but it's asking for trouble.
// 6 longs = 3 doublesworth per thread
__shared__ long IndexTri[SIZE_OF_MAJOR_PER_TRI_TILE*MAXNEIGH_d];
// __shared__ char PBCtri[SIZE_OF_MAJOR_PER_TRI_TILE*MAXNEIGH_d];
// Total 3+2+1.5+1+3 = 11.5 -- it ought to work -- plus shared_per. Even if that counts
// as a long, we still just about get it.
// It OUGHT still to run 2 blocks per SM.
// Only half the threads will continue to the 2nd part. But on the other hand,
// if each major thread has 6 (ind) + 2*5 + 5 = 21+ doubles, only 256 of those can run.
// Anything else needed? Yes - the list of chars -- which is 6 bytes per thread here
// and thus makes this all too chancy.
// Go with the unintelligent way -- two separate routines ??
// Note that if a triangle is not periodic itself then it's impossible that its data
// needs to be rotated for central, since central is a corner of the triangle.
// Therefore we can consult shared_per list instead. Okay but what if a tri is not in the list?
//
// __shared__ f64_vec3 Lap_A_central[SIZE_OF_MAJOR_PER_TRI_TILE]; // +1.5
// Let's hope atomicAdd to shared isn't as bad as we expect.
// https://devtalk.nvidia.com/default/topic/514085/cuda-programming-and-performance/atomicadd-in-shared-memory-is-measured-slower-than-in-global-memory-timing-shared-memory-atomic-o/
// says that it's AWFUL.
// Factors against doing:
// _ Must take account of periodic in applying effect from this side
// _ Must do atomic add to an extra array to avoid conflicting with other blocks' contribs
// _ Must do atomic add within shared memory to avoid conflicting with other threads' contribs
// We got so far 7.5 to 8 doubles per go.
// Do we want to add 3 more, put the routine following this. Yes.
// do we also want tri centroid? probably yes really
// Do we need shared flags?
// Note that we do not need to do 3D at all to do LAP - this is something where we can use 1 dimension at a time.
// Problem with that, we'd have to load A all over again.
// We could try it both ways.
// Stripping back the solution to 1D at a time, is probably just tinkering at the edges.
// The only thing worth comparing is if we do both that AND reload values 3x to do Ax,Ay,Az separately.
// Now bear in mind: if 10 doubles is a lot for shared that is 48K, 5 doubles is already a lot for L1.
// Do first with 3 dimensions Axyz at once - may be slower but we'll see.
long index = blockIdx.x*blockDim.x + threadIdx.x;
long StartTri = blockIdx.x*blockDim.x; // can replace this one.
long StartMajor = SIZE_OF_MAJOR_PER_TRI_TILE*blockIdx.x; // can replace this one.
// could replace with #define here.
A_tri[threadIdx.x] = p_A_tri[index];
tri_centroid[threadIdx.x] = p_tri_centroid[index];
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE) {
A_vert[threadIdx.x] = p_A_vert[SIZE_OF_MAJOR_PER_TRI_TILE*blockIdx.x + threadIdx.x];
structural info = p_info[SIZE_OF_MAJOR_PER_TRI_TILE*blockIdx.x + threadIdx.x];
vertex_pos[threadIdx.x] = info.pos;
// f64_vec3 zero(0.0,0.0,0.0);
// Lap_A_central[threadIdx.x] = zero;
// To save Lap A central solution we'd need to send it to the array per this tile's colour
// and then aggregate the results, divide by shoelace?
}
// shared_per[threadIdx.x] = perinfo.per0+perinfo.per1+perinfo.per2; // if periodic tri then neigh will need to be able to know.
__syncthreads();
// perinfo is still in scope later on but we'd rather get rid of it.
// The construction here is so we can get it before syncthreads, which is awkward.
f64_vec3 LapA(0.0,0.0,0.0);
f64_vec3 B(0.0,0.0,0.0);
CHAR4 perinfo = p_tri_perinfo[index];
if ((perinfo.flag != OUTER_FRILL) && (perinfo.flag != INNER_FRILL))
{
// We may need to find a way to AVOID doing branch around memory accesses.
// For frills, we would get a division by zero I'd expect.
// We probably should be splitting out tri vs central.
f64_vec2 edgenormal; // moving this inside unfortunately did not make any gains at all.
LONG3 corner_index = p_corner_index[index];
LONG3 neightri = p_neigh_tri_index[index];
CHAR4 tri_rotate = p_tri_per_neigh[index];
// Note that A, as well as position, has to be ROTATED to make a contiguous image.
// This tri minor has 3 edges with triangles and 3 edges with centrals.
// To accumulate Lap_A_central at the same time:
// * We should colour the blocks so that no two colours are shared by 1 major. That is possible.
// * The block outputs to its own colour array of centrals affected.
// * Then we aggregate the colour arrays.
// @@@@@@@@@@@@@@@@
// Now consider another one: what if we launched 3 threads per triangle. Same shared data for block as here.
// Does that really help anything? Think no.
// We need to divide by area when we've done something.
f64 area = 0.0;
f64_vec2 pos0(0.0,0.0), pos1(1.0,0.0), pos2(0.0,1.0);
// DEBUG: COMMENTING FROM HERE IT WORKED.
// f64_vec3 Avert0,Avert1,Avert2;
// We need 4 values at a time in order to do a side.
// We don't need to have all 7 values (3+ 3 + itself)
// So we'd be better just to load one quadrilateral's doings at a time, given the paucity of register and L1 space.
// Either we store all 7x3 A-values at once + 7 positions. Or we use 4 and 4 positions at once.
// Bear in mind a partial saving might yield big benefits.
// HAZARD: we don't know the ordering of the points.
// Halfway house: for simplicity store all the positions already loaded.
// A does not load from the same place anyway.
// Then go round the quadrilaterals.
// THIS BIT IS ENOUGH TO CRASH IT:
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
//pos0 = vertex_pos[corner_index.i1-StartMajor]; // this line okay
} else {
// This bit breaks it:
//structural info = p_info[corner_index.i1];
if ((corner_index.i1 >= 0) && (corner_index.i1 < 36864))
{
// Debug: Rule out that it's a bad index
structural info = p_info[corner_index.i1];
pos0 = info.pos;
} else {
printf("%d %d %d %d \n##################################################\n",
index,corner_index.i1,corner_index.i2, corner_index.i3);
// comes out with big negative
// same thing applies when we call with pX1->tri_corner_index
// When we output tri_corner_index it is all valid.
};
}
/* if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos1 = vertex_pos[corner_index.i2-StartMajor];
} else {
structural info = p_info[corner_index.i2];
pos1 = info.pos;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos2 = vertex_pos[corner_index.i3-StartMajor];
} else {
structural info = p_info[corner_index.i3];
pos2 = info.pos;
}*/
char periodic = perinfo.per0 + perinfo.per1 + perinfo.per2;
if (periodic == 0) {
} else {
// In this case which ones are periodic?
// Should we just store per flags?
// How it should work:
// CHAR4 perinfo: periodic, per0, per1, per2;
if (perinfo.per0 == NEEDS_ANTI) {
pos0 = Anticlock_rotate2(pos0);
// Avert0 = Anticlock_rotate2(Avert0);
}
if (perinfo.per0 == NEEDS_CLOCK) {
pos0 = Clockwise_rotate2(pos0);
// Avert0 = Clockwise_rotate2(Avert0);
}
if (perinfo.per1 == NEEDS_ANTI) {
pos1 = Anticlock_rotate2(pos1);
// Avert1 = Anticlock_rotate2(Avert1);
}
if (perinfo.per1 == NEEDS_CLOCK) {
pos1 = Clockwise_rotate2(pos1);
// Avert1 = Clockwise_rotate2(Avert1);
}
if (perinfo.per2 == NEEDS_ANTI) {
pos2 = Anticlock_rotate2(pos2);
// Avert2 = Anticlock_rotate2(Avert2);
}
if (perinfo.per2 == NEEDS_CLOCK) {
pos2 = Clockwise_rotate2(pos2);
// Avert2 = Clockwise_rotate2(Avert2);
}
};
f64_vec2 u0(0.0,0.0),u1(1.0,1.0),u2(1.0,3.0); // to be the positions of neighbouring centroids
/*
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + threadsPerTileMinor))
{
u0 = tri_centroid[neightri.i1-StartTri];
} else {
u0 = p_tri_centroid[neightri.i1];
}
if (tri_rotate.per0 == NEEDS_CLOCK) {
u0 = Clockwise_rotate2(u0);
}
if (tri_rotate.per0 == NEEDS_ANTI) {
u0 = Anticlock_rotate2(u0);
}
// Am I correct that this is to avoid tri_neigh_per information being recorded...
if ((neightri.i2 >= StartTri) && (neightri.i2 < StartTri + threadsPerTileMinor))
{
u1 = tri_centroid[neightri.i2-StartTri];
} else {
u1 = p_tri_centroid[neightri.i2];
}
if (tri_rotate.per1 == NEEDS_CLOCK) {
u1 = Clockwise_rotate2(u1);
}
if (tri_rotate.per1 == NEEDS_ANTI) {
u1 = Anticlock_rotate2(u1);
}
if ((neightri.i3 >= StartTri) && (neightri.i3 < StartTri + threadsPerTileMinor))
{
u2 = tri_centroid[neightri.i3-StartTri];
} else {
u2 = p_tri_centroid[neightri.i3];
}
if (tri_rotate.per2 == NEEDS_CLOCK) {
u2 = Clockwise_rotate2(u2);
}
if (tri_rotate.per2 == NEEDS_ANTI) {
u2 = Anticlock_rotate2(u2);
}*/
// ............................................................................................
// . I think working round with 4 has a disadvantage: if we get back around to one that is off-tile,
// we have to load it all over again. Still that is only 1 out of 7 that gets duplicated.
// Here is the best thing I can come up with: store 7 positions. That is already
// 28 longs' worth... each A-value uses 6 of the 7 positions to have an effect.
// Load each A-value at a time and recalc shoelace for 3 quadrilaterals. ??
// Too complicated.
// If we store all positions, can we finish with each A as we handle it? Yes but let's not.
f64_vec2 ourpos = tri_centroid[threadIdx.x]; // can try with and without this assignment to variable
f64_vec3 A0 = A_tri[threadIdx.x]; // can try with and without this assignment to variable
f64_vec3 A_1(0.0,0.0,0.0),
A_out(1.0,2.0,3.0),
A_2(4.0,5.0,6.0);
// Our A: A_tri[threadIdx.x]
/*
// Now fill in the A values:
// ____________________________
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_1 = A_vert[corner_index.i2-StartMajor];
} else {
A_1 = p_A_vert[corner_index.i2];
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_2 = A_vert[corner_index.i3-StartMajor];
} else {
A_2 = p_A_vert[corner_index.i3];
}
if (perinfo.per1 == NEEDS_ANTI) {
A_1 = Anticlock_rotate3(A_1);
}
if (perinfo.per1 == NEEDS_CLOCK) {
A_1 = Clockwise_rotate3(A_1);
}
if (perinfo.per2 == NEEDS_ANTI) {
A_2 = Anticlock_rotate3(A_2);
};
if (perinfo.per2 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
};
/*
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + threadsPerTileMinor))
{
A_out = A_tri[neightri.i1-StartTri];
} else {
A_out = p_A_tri[neightri.i1];
}
if (tri_rotate.per0 != 0) {
if (tri_rotate.per0 == NEEDS_CLOCK) {
A_out = Clockwise_rotate3(A_out);
} else {
A_out = Anticlock_rotate3(A_out);
};
};*/
// ======================================================
f64 shoelace = (ourpos.x-u0.x)*(pos1.y-pos2.y)
+ (pos1.x-pos2.x)*(u0.y-ourpos.y); // if u0 is opposite point 0
// clock.x-anti.x
// We are now going to put the corners of the minor cell at
// e.g. 1/3(pos1 + u0 + ourpos)
// rather than at
// e.g. 2/3 pos1 + 1/3 pos2
//corner1 = 0.3333333*(pos1+u0+ourpos)
//corner2 = 0.3333333*(pos2+u0+ourpos)
//edgenormal.x = corner1.y-corner2.y = 0.333333(pos1.y-pos2.y) -- so no change here
edgenormal.x = (pos1.y-pos2.y)*0.333333333333333;
edgenormal.y = (pos2.x-pos1.x)*0.333333333333333; // cut off 1/3 of the edge
if (edgenormal.dot(pos0-pos1) > 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// note: same coeff to A0->grad_x as to x0 in shoelace:
f64 coeff = ((pos1.y-pos2.y)*edgenormal.x +
(pos2.x-pos1.x)*edgenormal.y)/shoelace;
LapA.x += coeff*(A0.x-A_out.x);
LapA.y += coeff*(A0.y-A_out.y);
LapA.z += coeff*(A0.z-A_out.z);
coeff = ((u0.y-ourpos.y)*edgenormal.x +
(ourpos.x-u0.x)*edgenormal.y)/shoelace; // from top line same
LapA.x += coeff*(A_1.x-A_2.x);
LapA.y += coeff*(A_1.y-A_2.y);
LapA.z += coeff*(A_1.z-A_2.z);
// Think about averaging at typical edge.
// Using 5/12:
// corners are say equidistant from 3 points, so on that it would be 1/6
// but allocate the middle half of the bar to 50/50 A0+Aout.
// Bx = dAz/dy
//B.x += Az_edge*edgenormal.y;
// By = -dAz/dx
//B.y += -Az_edge*edgenormal.x;
// Bz = dAy/dx-dAx/dy
//B.z += Ay_edge*edgenormal.x-Ax_edge*edgenormal.y;
B.x += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.y;
B.y += -(TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.x;
B.z += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A0.y+A_out.y))*edgenormal.x
- (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A0.x+A_out.x))*edgenormal.y;
// Now that we put minor corners at (1/3)(2 centroids+vertex), this makes even more sense.
area += 0.333333333333333*(0.5*(pos1.x+pos2.x)+ourpos.x+u0.x)*edgenormal.x;
/*
// ASSUMING ALL VALUES VALID (consider edge of memory a different case):
// From here on is where it gets thorny as we no longer map A_1 to vertex 1.
// %%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%$$%%%
A_1 = A_out; // now A_1 points at tri neigh 0
A_out = A_2; // now looking at vertex 2
// A_2 is now to point at tri neigh 1
if ((neightri.i2 >= StartTri) && (neightri.i2 < StartTri + threadsPerTileMinor))
{
A_2 = A_tri[neightri.i2-StartTri];
} else {
A_2 = p_A_tri[neightri.i2];
}
if (tri_rotate.per1 != 0) {
if (tri_rotate.per1 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} else {
A_2 = Anticlock_rotate3(A_2);
};
};
shoelace = (ourpos.x-pos2.x)*(u0.y-u1.y)
+ (u0.x-u1.x)*(pos2.y-ourpos.y); // can insert formula instead of creating var.
//x1 = (2/3)pos2+(1/3)pos0;
//x2 = (2/3)pos2+(1/3)pos1;
//edgenormal.x = (x1.y-x2.y);
//edgenormal.y = (x2.x-x1.x); // cut off 1/3 of the edge
edgenormal.x = 0.333333333333333*(pos0.y-pos1.y);
edgenormal.y = 0.333333333333333*(pos1.x-pos0.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos2-pos1) < 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
coeff = ((u0.y-u1.y)*edgenormal.x +
(u1.x-u0.x)*edgenormal.y)/shoelace; // This is correct - see coeff in shoelace on ourpos.y
LapA.x += coeff*(A0.x-A_out.x);
LapA.y += coeff*(A0.y-A_out.y);
LapA.z += coeff*(A0.z-A_out.z);
//// Now do contribution to Lap A central for vertex 2:
//if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
//{
// f64_vec3 addition = coeff*(A0-A_out);
// atomicAdd((double *)(Lap_A_solution+neightri.i2-StartTri), addition.x);
// atomicAdd((double *)(Lap_A_solution+neightri.i2-StartTri)+1, addition.y);
// atomicAdd((double *)(Lap_A_solution+neightri.i2-StartTri)+2, addition.z);
// // Will this simultaneously be affected by other threads? YES
// // So have to use atomicAdd on shared memory.
//
// // I guess we learned our lesson: it really is more of a headache to do this way
// // than just to write a whole separate routine for central cells.
// // !
// // the workaround atomicAdd will make it slow because of converting to long-long ?
// // So this is probably slower than recreating the whole routine and calculating again.
// // :-(
//} else {
// f64_vec3 addition = coeff*(A0-A_out);
// atomicAdd((double *)(Lap_A_extra_array+neightri), addition.x);
// // We forgot something ELSE:
// // we have to take into account periodic orientation as well!
// // Okay let's scrap this attempt to create central at same time.
// // Unfortunately I do not see a way to overwrite part of shared memory with indices
// // either.
//}
// A_1 ~ u0, A_2 ~ u1
coeff = ((pos2.y-ourpos.y)*edgenormal.x +
(ourpos.x-pos2.x)*edgenormal.y)/shoelace;
LapA.x += coeff*(A_1.x-A_2.x);
LapA.y += coeff*(A_1.y-A_2.y);
LapA.z += coeff*(A_1.z-A_2.z);
B.x += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.y;
B.y += -(TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.x;
B.z += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A0.y+A_out.y))*edgenormal.x
- (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A0.x+A_out.x))*edgenormal.y;
area += 0.333333333333333*(0.5*(u0.x+u1.x)+ourpos.x+pos2.x)*edgenormal.x;
A_1 = A_out; // now A_1 points at corner 2
A_out = A_2; // now points at tri 1
// A_2 to point at corner 0
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_2 = A_vert[corner_index.i1-StartMajor];
} else {
A_2 = p_A_vert[corner_index.i1];
}
if (perinfo.per0 != 0) {
if (perinfo.per0 == NEEDS_ANTI) {
A_2 = Anticlock_rotate3(A_2);
}
if (perinfo.per0 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
}
}
shoelace = (ourpos.x-u1.x)*(pos2.y-pos0.y) // clock.y-anti.y
+ (pos2.x-pos0.x)*(u1.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos0.y-pos2.y);
edgenormal.y = 0.333333333333333*(pos2.x-pos0.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos1-pos0) > 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
coeff = ((pos2.y-pos0.y)*edgenormal.x +
(pos0.x-pos2.x)*edgenormal.y)/shoelace; // see coeffs on ourpos in shoelace
LapA.x += coeff*(A0.x-A_out.x);
LapA.y += coeff*(A0.y-A_out.y);
LapA.z += coeff*(A0.z-A_out.z);
// A_1~pos2 A_2~pos0
coeff = ((u1.y-ourpos.y)*edgenormal.x +
(ourpos.x-u1.x)*edgenormal.y)/shoelace; // something suspicious: that we had to change smth here.
LapA.x += coeff*(A_1.x-A_2.x);
LapA.y += coeff*(A_1.y-A_2.y);
LapA.z += coeff*(A_1.z-A_2.z);
B.x += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.y;
B.y += -(TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.x;
B.z += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A0.y+A_out.y))*edgenormal.x
- (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A0.x+A_out.x))*edgenormal.y;
area += 0.333333333333333*(0.5*(pos2.x+pos0.x)+ourpos.x+u1.x)*edgenormal.x;
A_1 = A_out;
A_out = A_2;
// A_2 is now to point at tri neigh 2
if ((neightri.i3 >= StartTri) && (neightri.i3 < StartTri + threadsPerTileMinor))
{
A_2 = A_tri[neightri.i3-StartTri];
} else {
A_2 = p_A_tri[neightri.i3];
}
if (tri_rotate.per2 != 0) {
if (tri_rotate.per2 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} else {
A_2 = Anticlock_rotate3(A_2);
};
};
shoelace = (ourpos.x-pos0.x)*(u1.y-u2.y) // clock.y-anti.y
+ (u1.x-u2.x)*(pos0.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos1.y-pos2.y);
edgenormal.y = 0.333333333333333*(pos2.x-pos1.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos0-pos1) < 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
coeff = ((u1.y-u2.y)*edgenormal.x +
(u2.x-u1.x)*edgenormal.y)/shoelace; // see coeffs on ourpos in shoelace
LapA.x += coeff*(A0.x-A_out.x);
LapA.y += coeff*(A0.y-A_out.y);
LapA.z += coeff*(A0.z-A_out.z);
// A_1~u1 A_2~u2
coeff = ((pos0.y-ourpos.y)*edgenormal.x +
(ourpos.x-pos0.x)*edgenormal.y)/shoelace; // something suspicious: that we had to change smth here.
LapA.x += coeff*(A_1.x-A_2.x);
LapA.y += coeff*(A_1.y-A_2.y);
LapA.z += coeff*(A_1.z-A_2.z);
B.x += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.y;
B.y += -(TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.x;
B.z += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A0.y+A_out.y))*edgenormal.x
- (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A0.x+A_out.x))*edgenormal.y;
area += THIRD*(0.5*(u2.x+u1.x)+ourpos.x+pos0.x)*edgenormal.x;
A_1 = A_out;
A_out = A_2;
// A2 to be for corner 1
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
A_2 = A_vert[corner_index.i2-StartMajor];
} else {
A_2 = p_A_vert[corner_index.i2];
}
if (perinfo.per1 != 0) {
if (perinfo.per1 == NEEDS_ANTI) {
A_2 = Anticlock_rotate3(A_2);
}
if (perinfo.per1 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} // CAREFUL WITH FLAGS N MEANINGS
}
shoelace = (ourpos.x-u2.x)*(pos0.y-pos1.y) // clock.y-anti.y
+ (pos0.x-pos1.x)*(u2.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos1.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos1.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos2-pos1) > 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
coeff = ((pos0.y-pos1.y)*edgenormal.x +
(pos1.x-pos0.x)*edgenormal.y)/shoelace; // see coeffs on ourpos in shoelace
LapA.x += coeff*(A0.x-A_out.x);
LapA.y += coeff*(A0.y-A_out.y);
LapA.z += coeff*(A0.z-A_out.z);
// A_1~pos0 A_2~pos1
coeff = ((u2.y-ourpos.y)*edgenormal.x +
(ourpos.x-u2.x)*edgenormal.y)/shoelace; // something suspicious: that we had to change smth here.
LapA.x += coeff*(A_1.x-A_2.x);
LapA.y += coeff*(A_1.y-A_2.y);
LapA.z += coeff*(A_1.z-A_2.z);
B.x += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.y;
B.y += -(TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.x;
B.z += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A0.y+A_out.y))*edgenormal.x
- (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A0.x+A_out.x))*edgenormal.y;
area += 0.333333333333333*(0.5*(pos0.x+pos1.x)+ourpos.x+u2.x)*edgenormal.x;
A_1 = A_out;
A_out = A_2;
// A2 to be for tri 0
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + threadsPerTileMinor))
{
A_2 = A_tri[neightri.i1-StartTri];
} else {
A_2 = p_A_tri[neightri.i1];
}
if (tri_rotate.per0 != 0) {
if (tri_rotate.per0 == NEEDS_CLOCK) {
A_2 = Clockwise_rotate3(A_2);
} else {
A_2 = Anticlock_rotate3(A_2);
};
};
shoelace = (ourpos.x-pos1.x)*(u2.y-u0.y) // clock.y-anti.y
+ (u2.x-u0.x)*(pos1.y-ourpos.y);
edgenormal.x = 0.333333333333333*(pos2.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos2.x); // cut off 1/3 of the edge
if (edgenormal.dot(pos1-pos2) < 0.0) {
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
coeff = ((u2.y-u0.y)*edgenormal.x +
(u0.x-u2.x)*edgenormal.y)/shoelace; // see coeffs on ourpos in shoelace
LapA.x += coeff*(A0.x-A_out.x);
LapA.y += coeff*(A0.y-A_out.y);
LapA.z += coeff*(A0.z-A_out.z);
// A_1~pos0 A_2~pos1
coeff = ((pos1.y-ourpos.y)*edgenormal.x +
(ourpos.x-pos1.x)*edgenormal.y)/shoelace; // something suspicious: that we had to change smth here.
LapA.x += coeff*(A_1.x-A_2.x);
LapA.y += coeff*(A_1.y-A_2.y);
LapA.z += coeff*(A_1.z-A_2.z);
B.x += (TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.y;
B.y += -(TWELTH*(A_1.z+A_2.z)+FIVETWELTHS*(A0.z+A_out.z))*edgenormal.x;
B.z += (TWELTH*(A_1.y+A_2.y)+FIVETWELTHS*(A0.y+A_out.y))*edgenormal.x
- (TWELTH*(A_1.x+A_2.x)+FIVETWELTHS*(A0.x+A_out.x))*edgenormal.y;
area += 0.333333333333333*(0.5*(u0.x+u2.x)+ourpos.x+pos1.x)*edgenormal.x;
*/
// CHECKED ALL THAT
// Heavy calcs are actually here: six divisions!
LapA /= (area + 1000.0);
B /= (area + 1000.0); // DEBUG
} else {
// frill - leave Lap A = B = 0
}
p_Lap_A[index] = LapA;
p_B[index] = B;
// Similar routine will be needed to create grad A ... or Adot ... what a waste of calcs.
// Is there a more sensible way: only do a mesh move every 10 steps -- ??
// Then what do we do on the intermediate steps -- that's a problem -- flowing Eulerian fluid
// will give the right change in pressure, but then mesh has to catch up. Still that might be a thought.
// Next consideration: Lap A on central.
// Idea for doing at same time: (don't do it -- too much atomicAdd, I do not trust)
// ___ only certain major cells "belong" to this tri tile.
// ___ write to a given output from our total effect coming from this tile's tris.
// ____ when we hit a central cell outside this tile, send it atomicAdd to an array
// that collects up all the extra contribs to it.
// __ then we just reload, sum 2 things and divide
// However, atomicAdd fp64 only exists on Compute 6.0 :-(
// Workaround taken from http://stackoverflow.com/questions/16077464/atomicadd-for-double-on-gpu
// Eventually decided not to use but to carry on with half the threads to target centrals in this routine.
/*
// COMMENTED FOR DEBUG:
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE) {
// Create Lap A for centrals.
// Outermost has to supply good boundary conditions for the outer edge.
index = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x;
structural info = p_info[index];
memcpy(IndexTri+threadIdx.x*MAXNEIGH_d, p_IndexTri+index*MAXNEIGH_d,
sizeof(long)*MAXNEIGH_d);
f64_vec3 A0 = A_vert[threadIdx.x]; // can ditch
f64_vec2 u0 = vertex_pos[threadIdx.x];
f64_vec3 A1,A2,A3;
f64_vec2 u1,u2,u3;
f64 shoelace, area = 0.0;
f64_vec2 edgenormal;
// As before we need 4 A values and positions at a time. Now 3 all come from tris.
LapA.x = 0.0; LapA.y = 0.0; LapA.z = 0.0;
B.x = 0.0; B.y = 0.0; B.z = 0.0;
// Note that I found out, unroll can be slower if registers are used up (!) CAUTION:
long iindextri;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// In this case there are extra triangles, for frills.
iindextri = IndexTri[threadIdx.x*MAXNEIGH_d+info.neigh_len];
} else {
iindextri = IndexTri[threadIdx.x*MAXNEIGH_d+info.neigh_len-1];
}
if ((iindextri >= StartTri) && (iindextri < StartTri + threadsPerTileMinor))
{
A3 = A_tri[iindextri-StartTri];
u3 = tri_centroid[iindextri-StartTri];
} else {
A3 = p_A_tri[iindextri];
u3 = p_tri_centroid[iindextri];
};
if (info.has_periodic != 0) {
if ((u3.x > 0.5*GRADIENT_X_PER_Y*u3.y) && (u0.x < -0.5*GRADIENT_X_PER_Y*u0.y))
{
A3 = Anticlock_rotate3(A3);
u3 = Anticlock_rotate2(u3);
};
if ((u3.x < -0.5*GRADIENT_X_PER_Y*u3.y) && (u0.x > 0.5*GRADIENT_X_PER_Y*u0.y))
{
A3 = Clockwise_rotate3(A3);
u3 = Clockwise_rotate2(u3);
};
}
// Initial situation: inext = 1, i = 0, iprev = -1
iindextri = IndexTri[threadIdx.x*MAXNEIGH_d]; // + 0
if ((iindextri >= StartTri) && (iindextri < StartTri + threadsPerTileMinor))
{
A2 = A_tri[iindextri-StartTri];
u2 = tri_centroid[iindextri-StartTri];
} else {
A2 = p_A_tri[iindextri];
u2 = p_tri_centroid[iindextri];
};
if (info.has_periodic != 0) {
if ((u2.x > 0.5*GRADIENT_X_PER_Y*u2.y) && (u0.x < -0.5*GRADIENT_X_PER_Y*u0.y))
{
A2 = Anticlock_rotate3(A2);
u2 = Anticlock_rotate2(u2);
};
if ((u2.x < -0.5*GRADIENT_X_PER_Y*u2.y) && (u0.x > 0.5*GRADIENT_X_PER_Y*u0.y))
{
A2 = Clockwise_rotate3(A2);
u2 = Clockwise_rotate2(u2);
};
}
short limit = info.neigh_len;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
limit++;
// Ordinarily, number of tri pairs = number of tris = number of neighs
// For outermost, number of neighs = 4 but the number of tri pairs to use = 5.
// Now we attempt to go all the way round: A and u from frills are valid and we can
// form a quadrilateral
int inext = 0; // will be ++ straight away.
#pragma unroll MAXNEIGH_d
for (short i = 0; i < limit; i++)
{
inext++;
if (inext == limit) inext = 0;
iindextri = IndexTri[threadIdx.x*MAXNEIGH_d+inext];
if ((iindextri >= StartTri) && (iindextri < StartTri + threadsPerTileMinor))
{
A1 = A_tri[iindextri-StartTri];
u1 = tri_centroid[iindextri-StartTri];
} else {
A1 = p_A_tri[iindextri];
u1 = p_tri_centroid[iindextri];
};
if (info.has_periodic != 0) {
if ((u1.x > 0.5*GRADIENT_X_PER_Y*u1.y) && (u0.x < -0.5*GRADIENT_X_PER_Y*u0.y))
{
A1 = Anticlock_rotate3(A1);
u1 = Anticlock_rotate2(u1);
};
if ((u1.x < -0.5*GRADIENT_X_PER_Y*u1.y) && (u0.x > 0.5*GRADIENT_X_PER_Y*u0.y))
{
A1 = Clockwise_rotate3(A1);
u1 = Clockwise_rotate2(u1);
};
}
// Affect LapA,B:
// ==============
// edge_cnr1 = (u1+u2+u0)*0.333333333333333;
edgenormal.x = 0.333333333333333*(u1.y-u3.y);
edgenormal.y = 0.333333333333333*(u3.x-u1.x);
// edgenormal to point at u2:
if ((u2-u1).dot(edgenormal) < 0.0)
{
edgenormal.x=-edgenormal.x; edgenormal.y = -edgenormal.y;
}
shoelace = (u0.x-u2.x)*(u1.y-u3.y) +
(u1.x-u3.x)*(u2.y-u0.y);
//coeff = ((u1.y-u3.y)*edgenormal.x + (u3.x-u1.x)*edgenormal.y)/shoelace;
//LapA += coeff*(A0-A2);
LapA += (A0-A2)*(((u1.y-u3.y)*edgenormal.x + (u3.x-u1.x)*edgenormal.y)/shoelace);
//coeff = ((u2.y-u0.y)*edgenormal.x + (u0.x-u2.x)*edgenormal.y)/shoelace;
LapA += (A1-A3)*(((u2.y-u0.y)*edgenormal.x + (u0.x-u2.x)*edgenormal.y)/shoelace);
B.x += (TWELTH*(A1.z+A3.z)+FIVETWELTHS*(A0.z+A2.z))*edgenormal.y;
B.y += -(TWELTH*(A1.z+A3.z)+FIVETWELTHS*(A0.z+A2.z))*edgenormal.x;
B.z += (TWELTH*(A1.y+A3.y)+FIVETWELTHS*(A0.y+A2.y))*edgenormal.x
-(TWELTH*(A1.x+A3.x)+FIVETWELTHS*(A0.x+A2.x))*edgenormal.y;
area += (0.3333333333333333*(0.5*(u1.x+u3.x)+u2.x+u0.x))*edgenormal.x;
// ( grad x )_x
// move round A values and positions:
// ----------------------------------
A3 = A2;
u3 = u2;
A2 = A1;
u2 = u1;
}
// if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
// Add on the outer edge: dA/dr times length of edge.
// dAx/dr = -(Ax r)/r^2 = -Ax/r
// We find a way just to go all the way round instead.
LapA /= area;
B /= area;
// Save off:
p_Lap_A_central[index] = LapA; // best way may be: if we know start of central stuff, can send
p_B_central[index] = B; // into the 1 array where it belongs.
}
*/
// =============================================================================
// Understand the following important fact:
// If you will use 63 registers (and this routine surely will -
// we have positions 7 x 2 x 2 = 28 registers, A 7 x 3 x 2 = 35 registers
// -- though we could try taking into account, 1 dimension at a time)
// Then the max thread throughput per SM is 512 which means that we will get
// no penalty from using up to 12 doubles in shared memory per thread.
// =============================================================================
// That does mean L1 has room for only 4 doubles. It is not big compared to registry itself.
}
__global__ void Kernel_Rel_advect_v_tris (
f64 h,
structural * __restrict__ p_info,
nT * __restrict__ p_nT_minor,
nT * __restrict__ p_nT_minor_new,
f64_vec2 * __restrict__ p_v_overall_minor,
f64_vec3 * __restrict__ p_v_minor,
f64_vec2 * __restrict__ p_tri_centroid,
LONG3 * __restrict__ p_tri_corner_index,
LONG3 * __restrict__ p_tri_neigh_index,
CHAR4 * __restrict__ p_tri_per_info,
CHAR4 * __restrict__ p_tri_per_neigh, // is neighbour re-oriented rel to this
f64 * __restrict__ p_area_old, // get from where?
f64 * __restrict__ p_area_new,
f64_vec3 * __restrict__ p_v_out
)
{
// Idea of momentum advection
// ==========================
// n_tri has been inferred from n_major
// Average nv to the edge between minors;
// find mom flow
// ARE WE CLEAR ABOUT USING nv AT ALL? NEED TO CHECK CORRESPONDENCE ---
// v = (n_k area_k v_k + additional mom)/(
// Need rel to v_overall ...
// Let's assume this kernel is called for threads corresp to ##triangles##.
// This info needed to do the "more proper" way with v_edge subtracted from each v that gets averaged.
/* __shared__ f64_vec2 tri_centroid[blockDim.x]; // + 2
__shared__ f64_vec2 vertex_pos[SIZE_OF_MAJOR_PER_TRI_TILE]; // + 1
__shared__ f64_vec3 p_v_tri[blockDim.x]; // + 3
__shared__ f64_vec3 p_v_central[SIZE_OF_MAJOR_PER_TRI_TILE]; // + 1.5
__shared__ f64 p_n_central[SIZE_OF_MAJOR_PER_TRI_TILE]; // + 0.5
__shared__ f64 p_n_tri[blockDim.x]; // + 1 = 9
__shared__ f64_vec2 p_v_overall[blockDim.x]; // +2
__shared__ f64_vec2 p_v_overall[SIZE_OF_MAJOR_PER_TRI_TILE]; // +1 needs to be limited to vertices --
*/
// 9+3 = 12 so that leaves no room for tri perflag - but that's OK.
__shared__ f64_vec2 tri_centroid[threadsPerTileMinor];
__shared__ f64_vec3 v_tri[threadsPerTileMinor];
__shared__ f64_vec2 n_vrel_tri[threadsPerTileMinor];
// For central cells, going to have to run all over again with the following
// replaced by __shared__ long IndexTri[MAXNEIGH_d*SIZE_OF_MAJOR_PER_TRI_TILE];
__shared__ f64_vec2 n_vrel_central[SIZE_OF_MAJOR_PER_TRI_TILE];
__shared__ f64_vec3 v_central[SIZE_OF_MAJOR_PER_TRI_TILE];
__shared__ f64_vec2 vertex_pos[SIZE_OF_MAJOR_PER_TRI_TILE]; // 2 + 1 + 3+ 1.5 +2 +1 = 10.5
// It is more certain that something vile does not go wrong, if we do stick with loading
// tri index each central.
// But we don't have room for that here due to sharing v_central.
// So we basically have to write 2 routines, even doing it this way. :-[
// Consider to chop and change to the alternative: how can we try to ensure that we do
// get a contiguous access each time we do a load and go through? We can't because it may
// do extra bus loads for some threads in-between.
// So. Stick with inelegant ways.
// __shared__ char shared_per[blockDim.x]; // tri periodic info --- there may be other more sensible ways though
// I'm seeing now that there is sense in just loading a CHAR4 with the information in.
// ?
// Then it doesn't need to even do a load of tests - it'll decide beforehand on CPU where it needs to
// have a periodic rotation looking at the next triangle.
// Loading per_info for itself and putting into shared is reasonable mind you. It's not an extra load.
// But sometimes there IS an extra load then, because we have to ask edge triangles about their periodic data
// and that is not a contiguous fetch.
// Keep shared memory cleaner. Okay then. So we COULD then fit in all the other things separately if we wanted.
// But how would we ideally do the advect formula then?
// Overall v_edge comes from
// But actually there is no reason to average v_edge along the edge.
// Look instead at each end.
// There we have v_overall = average of 3. nv = average of 3.
// So then do I want to calc v_overall, push it back into each of them: sum n_i (v_i - [v_overall = avg])
// These seem like niceties.
// ______________________________________________________________________________
// can create on our own triangle but how shall we create for edge?
// Use vertices nearby edge instead??
// These are the ones moving and therefore moving the edge, not the tri centroid.
// IMPORTANT:
// Another alternative way is to infer the edge motion from the 4 relevant points, but use
// only the opposing 2, or use 5/12, to create v of species.
// To get actual conservation of momentum we have to run again and divide by,
// for each cell, NEW N_k+1 that comes from n_k+1 avged, area_k+1.
long StartTri = blockIdx.x*threadsPerTileMinor;
//long EndMinor = (blockIdx.x+1)*blockDim.x; // can ditch
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long index = blockIdx.x*blockDim.x + threadIdx.x;
// Valid traffic of momentum:
tri_centroid[threadIdx.x] = p_tri_centroid[index];
f64_vec3 v_own = p_v_minor[index];
v_tri[threadIdx.x] = v_own;
f64 n_own = p_nT_minor[index].n;
f64_vec2 v_overall = p_v_overall_minor[index];
f64_vec2 nvrel;
nvrel.x = n_own*(v_own.x - v_overall.x);
nvrel.y = n_own*(v_own.y - v_overall.y);
n_vrel_tri[threadIdx.x] = nvrel;
// What makes this way better?
// Isn't it better to put
// store n_s, store v_overall, store v_s.
CHAR4 perinfo = p_tri_per_info[index];
// CHAR4 perneighinfo = p_tri_per_neigh[index];
// 3 chars for neighs per0,1,2 to show rel rotation; 'periodic' is just padding.
// If we load tri_per_info for neighbours then ?
// If the neigh is periodic and we are not, we can tell from x-values.
// How was it done for Lap A??
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE) {
structural info = p_info[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
vertex_pos[threadIdx.x] = info.pos;
v_central[threadIdx.x] = p_v_minor[ BEGINNING_OF_CENTRAL + blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x ];
f64 n = p_nT_minor[ BEGINNING_OF_CENTRAL + blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x ].n;
v_overall = p_v_overall_minor[ BEGINNING_OF_CENTRAL + blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x ];
nvrel.x = n*(v_central[threadIdx.x].x - v_overall.x);
nvrel.y = n*(v_central[threadIdx.x].y - v_overall.y);
n_vrel_central[threadIdx.x] = nvrel;
// Saved data vertex_pos, v_central, n_vrel_central | for each vertex in tile.
}
__syncthreads();
nvrel = n_vrel_tri[threadIdx.x];
if (perinfo.flag == DOMAIN_TRIANGLE)
{
// The other cases:
// CROSSING_INS, we assume v = 0 for now
// OUTER_FRILL, v = 0
// INNER_TRIANGLE, v = 0
//nT nTsrc = p_nT_shared[threadIdx.x];
f64 area_old = p_area_old[index]; // where getting these from?
f64 area_new = p_area_new[index];
LONG3 corner_index = p_tri_corner_index[index];
LONG3 neightri = p_tri_neigh_index[index];
CHAR4 perneigh = p_tri_per_neigh[index];
// Of course, if we were smart we could roll these into 3 longs
// in both cases, because we only need 24 bits to describe index.
// Ultimately that would be better.
f64_vec2 pos0, pos1, pos2, edgenormal;
f64_vec2 u0,u1,u2, ownpos;
f64_vec3 Nv(0.0,0.0,0.0);
// Create pos0,1,2 and adjust for periodic:
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos0 = vertex_pos[corner_index.i1-StartMajor];
} else {
structural info = p_info[corner_index.i1];
pos0 = info.pos;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos1 = vertex_pos[corner_index.i2-StartMajor];
} else {
structural info = p_info[corner_index.i2];
pos1 = info.pos;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
pos2 = vertex_pos[corner_index.i3-StartMajor];
} else {
structural info = p_info[corner_index.i3];
pos2 = info.pos;
};
if (perinfo.per0 == NEEDS_ANTI) {
pos0 = Anticlock_rotate2(pos0);
}
if (perinfo.per0 == NEEDS_CLOCK) {
pos0 = Clockwise_rotate2(pos0);
}
if (perinfo.per1 == NEEDS_ANTI) {
pos1 = Anticlock_rotate2(pos1);
}
if (perinfo.per1 == NEEDS_CLOCK) {
pos1 = Clockwise_rotate2(pos1);
}
if (perinfo.per2 == NEEDS_ANTI) {
pos2 = Anticlock_rotate2(pos2);
}
if (perinfo.per2 == NEEDS_CLOCK) {
pos2 = Clockwise_rotate2(pos2);
}
// };
// Create u0,1,2 and adjust for periodic:
// CHAR4 tri_rotate(0,0,0,0); // 4 chars but really using 3
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + threadsPerTileMinor))
{
u0 = tri_centroid[neightri.i1-StartTri];
// perneigh = shared_per[neightri.i1-StartTri];
} else {
u0 = p_tri_centroid[neightri.i1];
// CHAR4 perinfoneigh = p_tri_per_info[neightri.i1];
// perneigh = perinfoneigh.periodic; // just load and use 1 char ?...
}
//if (perneigh != perinfo.periodic) {
// // Test to see if we need to rotate the neighbour centroid and A:
// if ((perneigh != 0) && (ownpos.x > 0.0)) {
// // Avoid loading per flags again: save this as a char
// tri_rotate.per0 = 1; // rotate it clockwise
// u0 = Clockwise_rotate2(u0);
// };
// if ((perinfo.periodic != 0) && (u0.x > 0.0)) {
// u0 = Anticlock_rotate2(u0);
// tri_rotate.per0 = -1;
// };
//};
// ^^ Did I decide this was bad for some reason? Better to load
// just a char4 for periodic relationship to neighs? COULD BE.
// When we load all of these for edge ones it's individual.
// 64 accesses vs 256/12. 256/8 = 32 so it's better this way round.
// HMM
if (perneigh.per0 == NEEDS_ANTI)
u0 = Anticlock_rotate2(u0);
if (perneigh.per0 == NEEDS_CLOCK)
u0 = Clockwise_rotate2(u0);
if ((neightri.i2 >= StartTri) && (neightri.i2 < StartTri + threadsPerTileMinor))
{
u1 = tri_centroid[neightri.i2 - StartTri];
} else {
u1 = p_tri_centroid[neightri.i2];
};
if (perneigh.per1 == NEEDS_ANTI)
u1 = Anticlock_rotate2(u1);
if (perneigh.per1 == NEEDS_CLOCK)
u1 = Clockwise_rotate2(u1);
if ((neightri.i3 >= StartTri) && (neightri.i3 < StartTri + threadsPerTileMinor))
{
u2 = tri_centroid[neightri.i3 - StartTri];
} else {
u2 = p_tri_centroid[neightri.i3];
};
if (perneigh.per2 == NEEDS_ANTI)
u2 = Anticlock_rotate2(u2);
if (perneigh.per2 == NEEDS_CLOCK)
u2 = Clockwise_rotate2(u2);
// Let's say that we only need to take the average v with the opposite cell.
// Edge facing tri 0:
edgenormal.x = 0.333333333333333*(pos1.y-pos2.y);
edgenormal.y = 0.333333333333333*(pos2.x-pos1.x);
if ((pos0-pos1).dot(edgenormal) > 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// The edge is moving with ?
// Corners at (1/3)(ownpos + u2 + pos0),(1/3)(ownpos+u2 + pos1)
// v_overall only really matters insofar that it has a dotproduct with edgenormal.
// Think about this clearly.
// v_overall was generated in major cells.
// Then it was averaged out to triangles.
// Here the edge endpoints are formed by taking the average of 2 centroids + 1 vertex.
// Therefore we do want to use v_overall from those 4 locations.
f64_vec2 nvrel_prev, nvrel_out, nvrel_next;
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
nvrel_prev = n_vrel_central[corner_index.i2-StartMajor];
} else {
f64_vec3 v = p_v_minor[ BEGINNING_OF_CENTRAL + corner_index.i2];
f64 n = p_nT_minor[ BEGINNING_OF_CENTRAL + corner_index.i2].n;
v_overall = p_v_overall_minor[ BEGINNING_OF_CENTRAL + corner_index.i2];
nvrel_prev.x = n*(v.x - v_overall.x);
nvrel_prev.y = n*(v.y - v_overall.y);
};
if (perinfo.per1 == NEEDS_ANTI)
nvrel_prev = Anticlock_rotate2(nvrel_prev);
if (perinfo.per1 == NEEDS_CLOCK)
nvrel_prev = Clockwise_rotate2(nvrel_prev);
// Every single one of these rotates will need to be checked.
f64_vec3 v_out, vnext;
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + threadsPerTileMinor))
{
nvrel_out = n_vrel_tri[neightri.i1-StartTri];
v_out = v_tri[neightri.i1-StartTri];
} else {
f64_vec3 v = p_v_minor [ neightri.i1];
f64 n = p_nT_minor[neightri.i1].n;
v_overall = p_v_overall_minor[neightri.i1];
// I do not say this is the best way. Only that it is a way.
nvrel_out.x = n*(v.x- v_overall.x);
nvrel_out.y = n*(v.y - v_overall.y);
v_out = v;
};
if (perneigh.per0 == NEEDS_ANTI)
{
nvrel_out = Anticlock_rotate2(nvrel_out);
v_out = Anticlock_rotate3(v_out);
};
if (perneigh.per0 == NEEDS_CLOCK)
{
nvrel_out = Clockwise_rotate2(nvrel_out);
v_out = Clockwise_rotate3(v_out);
};
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
nvrel_next = n_vrel_central[corner_index.i3-StartMajor];
vnext = v_central[corner_index.i3-StartMajor];
} else {
vnext = p_v_minor [BEGINNING_OF_CENTRAL + corner_index.i3];
f64 n = p_nT_minor[BEGINNING_OF_CENTRAL + corner_index.i3].n;
v_overall = p_v_overall_minor[BEGINNING_OF_CENTRAL + corner_index.i3];
nvrel_next.x = n*(vnext.x - v_overall.x);
nvrel_next.y = n*(vnext.y - v_overall.y);
// Need 'vnext' to avoid loading data twice.
};
// So we keep how many in memory? 3 out of 6. Then we move round.
if (perinfo.per2 == NEEDS_ANTI)
{
nvrel_next = Anticlock_rotate2(nvrel_next);
vnext = Anticlock_rotate3(vnext);
};
if (perinfo.per2 == NEEDS_CLOCK)
{
nvrel_next = Clockwise_rotate2(nvrel_next);
vnext = Clockwise_rotate3(vnext);
};
// momflow = h*(nv.dot(edgenormal))*v;
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_out + nvrel_out + nvrel_prev + nvrel_next).dot
(edgenormal))*(0.5*(v_out + v_own));
// ....................................
// Edge facing point 2:
edgenormal.x = 0.333333333333333*(pos1.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos1.x);
if ((pos2-pos1).dot(edgenormal) < 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// Churn them around:
// "next" is now "out"
nvrel_prev = nvrel_out;
v_out = vnext;
nvrel_out = nvrel_next;
// new 'next' is tri 1
if ((neightri.i2 >= StartTri) && (neightri.i2 < StartTri + threadsPerTileMinor))
{
nvrel_next = n_vrel_tri[neightri.i2-StartTri];
vnext = v_tri[neightri.i2-StartTri];
} else {
f64_vec3 v = p_v_minor [ neightri.i2];
f64 n = p_nT_minor[neightri.i2].n;
v_overall = p_v_overall_minor[neightri.i2];
// I do not say this is the best way. Only that it is a way.
nvrel_next.x = n*(v.x- v_overall.x);
nvrel_next.y = n*(v.y - v_overall.y);
vnext = v;
};
if (perneigh.per1 == NEEDS_ANTI)
{
nvrel_next = Anticlock_rotate2(nvrel_next);
vnext = Anticlock_rotate3(vnext);
};
if (perneigh.per1 == NEEDS_CLOCK)
{
nvrel_next = Clockwise_rotate2(nvrel_next);
vnext = Clockwise_rotate3(vnext);
};
// momflow = h*(nv.dot(edgenormal))*v;
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_out + nvrel_out + nvrel_prev + nvrel_next).dot
(edgenormal))*(0.5*(v_out + v_own));
// ....................................
// Edge facing tri 1:
edgenormal.x = 0.333333333333333*(pos2.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos2.x);
if ((pos1-pos0).dot(edgenormal) > 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// Churn them around:
// "next" is now "out"
nvrel_prev = nvrel_out;
v_out = vnext;
nvrel_out = nvrel_next;
// new 'next' is point 0
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
nvrel_next = n_vrel_central[corner_index.i1-StartMajor];
vnext = v_central[corner_index.i1-StartMajor];
} else {
vnext = p_v_minor [BEGINNING_OF_CENTRAL + corner_index.i1];
f64 n = p_nT_minor[BEGINNING_OF_CENTRAL + corner_index.i1].n;
v_overall = p_v_overall_minor[BEGINNING_OF_CENTRAL + corner_index.i1];
nvrel_next.x = n*(vnext.x - v_overall.x);
nvrel_next.y = n*(vnext.y - v_overall.y);
// Need 'vnext' to avoid loading data twice.
};
if (perinfo.per0 == NEEDS_ANTI)
{
nvrel_next = Anticlock_rotate2(nvrel_next);
vnext = Anticlock_rotate3(vnext);
};
if (perinfo.per0 == NEEDS_CLOCK)
{
nvrel_next = Clockwise_rotate2(nvrel_next);
vnext = Clockwise_rotate3(vnext);
};
// momflow = h*(nv.dot(edgenormal))*v;
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_out + nvrel_out + nvrel_prev + nvrel_next).dot
(edgenormal))*(0.5*(v_out + v_own));
// ....................................
// Edge facing point 0:
edgenormal.x = 0.333333333333333*(pos2.y-pos1.y);
edgenormal.y = 0.333333333333333*(pos1.x-pos2.x);
if ((pos0-pos1).dot(edgenormal) < 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// Churn them around:
// "next" is now "out"
nvrel_prev = nvrel_out;
v_out = vnext;
nvrel_out = nvrel_next;
// new 'next' is tri 2 :
if ((neightri.i3 >= StartTri) && (neightri.i3 < StartTri + threadsPerTileMinor))
{
nvrel_next = n_vrel_tri[neightri.i3-StartTri];
vnext = v_tri[neightri.i3-StartTri];
} else {
f64_vec3 v = p_v_minor [ neightri.i3];
f64 n = p_nT_minor[neightri.i3].n;
v_overall = p_v_overall_minor[neightri.i3];
// I do not say this is the best way. Only that it is a way.
nvrel_next.x = n*(v.x- v_overall.x);
nvrel_next.y = n*(v.y - v_overall.y);
vnext = v;
};
if (perneigh.per2 == NEEDS_ANTI)
{
nvrel_next = Anticlock_rotate2(nvrel_next);
vnext = Anticlock_rotate3(vnext);
};
if (perneigh.per2 == NEEDS_CLOCK)
{
nvrel_next = Clockwise_rotate2(nvrel_next);
vnext = Clockwise_rotate3(vnext);
};
// momflow = h*(nv.dot(edgenormal))*v;
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_out + nvrel_out + nvrel_prev + nvrel_next).dot
(edgenormal))*(0.5*(v_out + v_own));
// ....................................
// Edge facing tri 2:
edgenormal.x = 0.333333333333333*(pos0.y-pos1.y);
edgenormal.y = 0.333333333333333*(pos1.x-pos0.x);
if ((pos2-pos1).dot(edgenormal) > 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// Churn them around:
// "next" is now "out"
nvrel_prev = nvrel_out;
v_out = vnext;
nvrel_out = nvrel_next;
// new 'next' is point 1
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE))
{
nvrel_next = n_vrel_central[corner_index.i2-StartMajor];
vnext = v_central[corner_index.i2-StartMajor];
} else {
vnext = p_v_minor [BEGINNING_OF_CENTRAL + corner_index.i2];
f64 n = p_nT_minor[BEGINNING_OF_CENTRAL + corner_index.i2].n;
v_overall = p_v_overall_minor[BEGINNING_OF_CENTRAL + corner_index.i2];
nvrel_next.x = n*(vnext.x - v_overall.x);
nvrel_next.y = n*(vnext.y - v_overall.y);
// Need 'vnext' to avoid loading data twice.
};
if (perinfo.per1 == NEEDS_ANTI)
{
nvrel_next = Anticlock_rotate2(nvrel_next);
vnext = Anticlock_rotate3(vnext);
};
if (perinfo.per1 == NEEDS_CLOCK)
{
nvrel_next = Clockwise_rotate2(nvrel_next);
vnext = Clockwise_rotate3(vnext);
};
// momflow = h*(nv.dot(edgenormal))*v;
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_out + nvrel_out + nvrel_prev + nvrel_next).dot
(edgenormal))*(0.5*(v_out + v_own));
// ....................................
// Edge facing point 1:
edgenormal.x = 0.333333333333333*(pos2.y-pos0.y);
edgenormal.y = 0.333333333333333*(pos0.x-pos2.x);
if ((pos1-pos0).dot(edgenormal) < 0.0)
{
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
};
// Churn them around:
// "next" is now "out"
nvrel_prev = nvrel_out;
v_out = vnext;
nvrel_out = nvrel_next;
// new 'next' is tri 0
if ((neightri.i1 >= StartTri) && (neightri.i1 < StartTri + threadsPerTileMinor))
{
nvrel_next = n_vrel_tri[neightri.i1-StartTri];
// vnext = v_tri[neightri.i1-StartTri];
} else {
f64_vec3 v = p_v_minor [ neightri.i1];
f64 n = p_nT_minor[neightri.i1].n;
v_overall = p_v_overall_minor[neightri.i1];
// I do not say this is the best way. Only that it is a way.
nvrel_next.x = n*(v.x - v_overall.x);
nvrel_next.y = n*(v.y - v_overall.y);
// vnext = v;
};
if (perneigh.per0 == NEEDS_ANTI)
{
nvrel_next = Anticlock_rotate2(nvrel_next);
// vnext = Anticlock_rotate2(vnext);
};
if (perneigh.per0 == NEEDS_CLOCK)
{
nvrel_next = Clockwise_rotate2(nvrel_next);
// vnext = Clockwise_rotate2(vnext);
};
// momflow = h*(nv.dot(edgenormal))*v;
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_out + nvrel_out + nvrel_prev + nvrel_next).dot
(edgenormal))*(0.5*(v_out + v_own));
// ....................................
// that's it - that was 6.
// -------------------------------------------------
Nv += n_own*v_own*area_old; // Reused n and v : CAREFUL ?
// Note that 'n' does get overwritten above.
// save off:
f64 dest_n = p_nT_minor_new[index].n;
p_v_out[index] = (Nv / (dest_n*area_new));
////if (index == 43654) {
//// printf("43654: %1.8E %1.8E %1.8E | %1.8E %1.8E | %1.8E %1.8E | %1.8E %1.8E %1.8E \n",
//// Nv.x,Nv.y,Nv.z,dest_n,area_new, n_own, area_old,v_own.x,v_own.y,v_own.z);
//// // dest_n comes out 0 --- yet when we print out from host code it is not 0.
////}
////
} else {
// Set v = 0?
};
// Now move on to centrals with the same data in memory.
// Unfortunately we can't -- unless we figured out how to overwrite the central n data with
// indextri data
// Or, do what we should have done, and make indextri[0] a contiguous fetch so no array storage is needed.
}
__global__ void Kernel_Rel_advect_v_central(
f64 const h,
structural * __restrict__ p_info,
f64_vec2 * __restrict__ p_tri_centroid,
nT * __restrict__ p_nT,
nT * __restrict__ p_nT_minor,
nT * __restrict__ p_nT_new,
f64_vec3 * __restrict__ p_v,
f64_vec2 * __restrict__ p_v_overall_minor,
long * __restrict__ p_indextri,
char * __restrict__ pPBCtri,
f64 * __restrict__ p_area_old,
f64 * __restrict__ p_area_new,
f64_vec3 * __restrict__ p_v_out
// Not making a whole lot of sense: we need nT_minor, for tris?
)
{
// Maybe we SHOULD change it to put indextri packed the other way ---> be able
// to merge this into the tris routine.
// That is the good alternative. Using scatter-not-gather with atomicAdd and doing as part of tri code is not a good way for us.
// what other way is there?
// We do want to have thread run for each central.
// Or...
// stuck with atomic add between threads even if we could arrange
// it to be not between blocks by running certain colours at once.
// Gather not scatter.
// Need indextri ... would have been far better
// to have put contiguous storage for first, second, third index.
//
// OK - stick with incredibly inelegant way for now,
// know that we should eventually change it given time, then we can merge
// the central calcs routine into this tri calcs routine.
// nvm
// Alternative for central rel v:
__shared__ f64_vec2 tri_centroid[SIZE_OF_TRI_TILE_FOR_MAJOR]; // + 2
__shared__ f64_vec3 v_tri[SIZE_OF_TRI_TILE_FOR_MAJOR]; // + 3
__shared__ f64_vec2 n_vrel_tri[SIZE_OF_TRI_TILE_FOR_MAJOR]; // + 2
//__shared__ char shared_per[SIZE_OF_TRI_TILE_FOR_MAJOR]; // tri periodic info
// Perhaps better to load in PBCtri list instead.
// I think so? Saves interrogating tris outside the tile.
__shared__ long IndexTri[threadsPerTileMajor*MAXNEIGH_d];
__shared__ char PBCtri[threadsPerTileMajor*MAXNEIGH_d];
// per thread: 2*7 + 6 + 1.5 = 21.5 < 24
// We'd bring down to 14 if we chose to do contiguous index loads per neigh;
// however that feels like it has a high chance of not working, unless we did syncthreads.
long index = blockDim.x*blockIdx.x + threadIdx.x;
v_tri[threadIdx.x] = p_v[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x];
tri_centroid[threadIdx.x] = p_tri_centroid[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x];
f64 n = p_nT_minor[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x].n;
f64_vec2 v_overall = p_v_overall_minor[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x];
f64_vec2 nvrel;
nvrel.x = n*(v_tri[threadIdx.x].x - v_overall.x);
nvrel.y = n*(v_tri[threadIdx.x].y - v_overall.y);
n_vrel_tri[threadIdx.x] = nvrel;
long const StartTri = SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x;
v_tri[threadIdx.x + blockDim.x] = p_v[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + blockDim.x + threadIdx.x];
tri_centroid[threadIdx.x + blockDim.x] = p_tri_centroid[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + blockDim.x + threadIdx.x];
n = p_nT_minor[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + blockDim.x + threadIdx.x].n;
v_overall = p_v_overall_minor[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + blockDim.x + threadIdx.x];
nvrel.x = n*(v_tri[threadIdx.x + blockDim.x].x - v_overall.x);
nvrel.y = n*(v_tri[threadIdx.x + blockDim.x].y - v_overall.y);
n_vrel_tri[threadIdx.x + blockDim.x] = nvrel;
__syncthreads();
structural info = p_info[index];
//f64_vec2 ownpos = info.pos;
if (info.flag == DOMAIN_VERTEX) {
// otherwise???
memcpy(IndexTri + threadIdx.x*MAXNEIGH_d,p_indextri + index*MAXNEIGH_d, sizeof(long)*MAXNEIGH_d);
memcpy(PBCtri + threadIdx.x*MAXNEIGH_d, pPBCtri + index*MAXNEIGH_d, sizeof(char)*MAXNEIGH_d);
// For each triangle abutting this central, we want to know things like --
// where are the corners of the edge .. this requires the neighbouring centroids also.
f64_vec2 edgenormal,ownpos,
u_prev, u_out,u_next, nvrel_prev, nvrel_out, nvrel_next; // 8 x 2
f64_vec3 Nv(0.0,0.0,0.0); // + 3
f64_vec3 v_out, v_next, v; // + 9 = 28
v = p_v[BEGINNING_OF_CENTRAL + index];
n = p_nT_minor[BEGINNING_OF_CENTRAL + index].n;
v_overall = p_v_overall_minor[BEGINNING_OF_CENTRAL + index];
// ???????????????????????????????????????????????????????
nvrel.x = n*(v.x-v_overall.x);
nvrel.y = n*(v.y-v_overall.y);
// Assume we load in u_prev:
long indextri = IndexTri[threadIdx.x*MAXNEIGH_d + info.neigh_len-1]; // bad news, neigh_len is not tri_len
// ###############################################################################
// OOPS -- it's not true at the edge of memory, is it, so what will happen there?
// ###############################################################################
if ((indextri >= StartTri) && (indextri < StartTri + SIZE_OF_TRI_TILE_FOR_MAJOR))
{
u_prev = tri_centroid[indextri-StartTri];
nvrel_prev = n_vrel_tri[indextri-StartTri];
} else {
u_prev = p_tri_centroid[indextri];
f64_vec3 v_ = p_v[indextri];
n = p_nT_minor[indextri].n;
v_overall = p_v_overall_minor[indextri];
nvrel_prev.x = n*(v_.x - v_overall.x);
nvrel_prev.y = n*(v_.y - v_overall.y);
};
char PBC = PBCtri[threadIdx.x*MAXNEIGH_d + info.neigh_len-1];
if (PBC == NEEDS_CLOCK)
{
// Always check these rotate flags throughout.
u_prev = Clockwise_rotate2(u_prev);
nvrel_prev = Clockwise_rotate2(nvrel_prev);
};
if (PBC == NEEDS_ANTI)
{
u_prev = Anticlock_rotate2(u_prev);
nvrel_prev = Anticlock_rotate2(nvrel_prev);
};
indextri = IndexTri[0];
if ((indextri >= StartTri) && (indextri < StartTri + SIZE_OF_TRI_TILE_FOR_MAJOR))
{
u_out = tri_centroid[indextri-StartTri];
v_out = v_tri[indextri-StartTri];
nvrel_out = n_vrel_tri[indextri-StartTri];
} else {
u_out = p_tri_centroid[indextri];
v_out = p_v[indextri];
n = p_nT_minor[indextri].n;
v_overall = p_v_overall_minor[indextri];
nvrel_out.x = n*(v_out.x - v_overall.x);
nvrel_out.y = n*(v_out.y - v_overall.y);
};
PBC = PBCtri[0];
if (PBC == NEEDS_CLOCK)
{
u_out = Clockwise_rotate2(u_out);
nvrel_out = Clockwise_rotate2(nvrel_out);
v_out = Clockwise_rotate3(v_out);
};
if (PBC == NEEDS_ANTI)
{
u_out = Anticlock_rotate2(u_out);
nvrel_out = Anticlock_rotate2(nvrel_out);
v_out = Anticlock_rotate3(v_out);
};
int i,inext;
for (i = 0; i < info.neigh_len; i++)
{
inext = i+1; if (inext == info.neigh_len) inext = 0;
indextri = IndexTri[threadIdx.x*MAXNEIGH_d + inext];
if ((indextri >= StartTri) && (indextri < StartTri + SIZE_OF_TRI_TILE_FOR_MAJOR))
{
u_next = tri_centroid[indextri-StartTri];
v_next = v_tri[indextri-StartTri];
nvrel_next = n_vrel_tri[indextri-StartTri];
} else {
u_next = p_tri_centroid[indextri];
v_next = p_v[indextri];
n = p_nT_minor[indextri].n;
v_overall = p_v_overall_minor[indextri];
nvrel_next.x = n*(v_next.x - v_overall.x);
nvrel_next.y = n*(v_next.y - v_overall.y);
}
PBC = PBCtri[threadIdx.x*MAXNEIGH_d + inext];
if (PBC == NEEDS_CLOCK)
{
u_next = Clockwise_rotate2(u_next);
nvrel_next = Clockwise_rotate2(nvrel_next);
v_next = Clockwise_rotate3(v_next);
};
if (PBC == NEEDS_ANTI)
{
u_next = Anticlock_rotate2(u_next);
nvrel_next = Anticlock_rotate2(nvrel_next);
v_next = Anticlock_rotate3(v_next);
};
// edgenormal:
edgenormal.x = u_prev.y-u_next.y;
edgenormal.y = u_next.x-u_prev.x;
if ((ownpos-u_prev).dot(edgenormal) > 0.0) {
// NOT SURE ABOUT THAT TEST ?
edgenormal.x = -edgenormal.x;
edgenormal.y = -edgenormal.y;
}
Nv -= h*(SIXTH*(nvrel + nvrel + nvrel_prev + nvrel_next + nvrel_out + nvrel_out).dot(edgenormal))
*(0.5*(v_out + v));
u_prev = u_out;
u_out = u_next;
v_out = v_next;
nvrel_prev = nvrel_out;
nvrel_out = nvrel_next;
}
// Now how does it end?
f64 area_old = p_area_old[index];
f64 area_new = p_area_new[index];
Nv += n*v*area_old; // CAREFUL: n and v ?
// Probably got overwritten somewhere.
f64 dest_n = p_nT_new[index].n;
p_v_out[index] = (Nv / (dest_n*area_new));
} else {
f64_vec3 zero(0.0,0.0,0.0);
p_v_out[index] = zero;
};
}
// Grad phi: first put on triangles from major
__global__ void Kernel_Compute_grad_phi_Te_centrals(
structural * __restrict__ p_info_sharing, // for vertex positions & get has_periodic flag
f64 * __restrict__ p_phi,
nT * __restrict__ p_nT_elec,
long * __restrict__ p_indexneigh,
// Output:
f64_vec2 * __restrict__ p_grad_phi,
f64_vec2 * __restrict__ p_grad_Te
)
{
// Bad approach? : scatter instead of gather.
// This thread works to create grad phi on tris because we otherwise,
// having to load it in from tris, also have to load in periodic flags
// regarding them.
// Easier to compute it here -- computing it multiple times for each tri
// but that probably is cheaper. Less shared mem here than when we
// load to aggregate from tris - we then need to load area, grad phi, PB flag for tri
// vs -- phi and position for major
// Then instead of doing tri minors separately, is more efficient to put in a scatter
// data here to affect tri minors: but
// That requires that we load IndexTri and do a random write access???
// Maybe we should keep the tris routine separate -- that's simplest for now.
__shared__ f64 p_phi_shared[threadsPerTileMajor];
__shared__ f64 p_Te_shared[threadsPerTileMajor];
__shared__ f64_vec2 p_vertex_pos_shared[threadsPerTileMajor];
__shared__ long indexneigh[MAXNEIGH_d*threadsPerTileMajor]; // 1 + 2 + 6 doublesworth
long index = blockDim.x*blockIdx.x + threadIdx.x;
p_phi_shared[threadIdx.x] = p_phi[blockIdx.x*blockDim.x + threadIdx.x];
structural info = p_info_sharing[blockIdx.x*blockDim.x + threadIdx.x];
p_vertex_pos_shared[threadIdx.x] = info.pos;
{
nT nTtemp = p_nT_elec[blockIdx.x*blockDim.x + threadIdx.x];
p_Te_shared[threadIdx.x] = nTtemp.T;
}
__syncthreads();
long StartMajor = blockIdx.x*blockDim.x;
long EndMajor = StartMajor + blockDim.x;
f64 phi1, phi2, Te1, Te2;
f64_vec2 pos1, pos2;
if (info.flag == DOMAIN_VERTEX) {
// Don't bother otherwise, right?
memcpy(indexneigh + threadIdx.x*MAXNEIGH_d, p_indexneigh + MAXNEIGH_d*index, sizeof(long)*MAXNEIGH_d);
f64_vec2 grad_phi_integrated(0.0,0.0);
f64_vec2 grad_Te_integrated(0.0,0.0);
f64 grad_x_integrated_x = 0.0;
short iNeigh1 = info.neigh_len-1;
short iNeigh2 = 0;
// get phi,pos at edge -- & rotate if necessary
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh1];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
phi1 = p_phi_shared[indexNeigh-StartMajor];
pos1 = p_vertex_pos_shared[indexNeigh-StartMajor];
Te1 = p_Te_shared[indexNeigh-StartMajor];
} else {
phi1 = p_phi[indexNeigh];
structural infotemp = p_info_sharing[indexNeigh];
pos1 = infotemp.pos;
nT nTtemp = p_nT_elec[indexNeigh];
Te1 = nTtemp.T;
};
if (info.has_periodic) {
if ((pos1.x > 0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Clockwise_rotate2(pos1);
};
};
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
{
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh2];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
phi2 = p_phi_shared[indexNeigh-StartMajor];
pos2 = p_vertex_pos_shared[indexNeigh-StartMajor];
Te2 = p_Te_shared[indexNeigh-StartMajor];
} else {
phi2 = p_phi[indexNeigh];
structural infotemp = p_info_sharing[indexNeigh];
pos2 = infotemp.pos;
nT nTtemp = p_nT_elec[indexNeigh];
Te2 = nTtemp.T;
};
if (info.has_periodic) {
if ((pos2.x > 0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Clockwise_rotate2(pos2);
};
};
// Now we've got contiguous pos1, pos2, and own pos.
f64_vec2 edge_normal;
// edge_normal.x = pos1.y-info.pos.y;
// edge_normal.y = info.pos.x-pos1.x;
// if (edge_normal2.dot(pos2-info.pos) > 0.0)
// {
// edge_normal.x = -edge_normal.x;
// edge_normal.y = -edge_normal.y;
// }
// grad_phi_integrated += edge_normal*0.5*(phi0+phi1);
// grad_x_integrated_x += edge_normal.x*0.5*(info.pos.x+pos1.x);
edge_normal.x = pos1.y-pos2.y;
edge_normal.y = pos2.x-pos1.x;
if (edge_normal.dot(info.pos-pos1) > 0.0) {
edge_normal.x = -edge_normal.x;
edge_normal.y = -edge_normal.y;
}
grad_phi_integrated += edge_normal*0.5*(phi1+phi2);
grad_Te_integrated += edge_normal*0.5*(Te1+Te2);
grad_x_integrated_x += edge_normal.x*0.5*(pos1.x+pos2.x);
//edge_normal.x = info.pos.y-pos2.y;
//edge_normal.y = pos2.x-info.pos.x;
//if (edge_normal.dot(pos1-pos2) > 0.0) {
// edge_normal.x = -edge_normal.x;
// edge_normal.y = -edge_normal.y;
//}
//grad_phi_integrated += edge_normal*0.5*(phi0+phi2);
//grad_x_integrated_x += edge_normal.x*0.5*(info.pos.x+pos2.x);
// We want to sum to get the average of grad phi weighted by
// area of triangle:
// Not sure I can make sense of this now...
phi1 = phi2;
pos1 = pos2;
}
p_grad_phi[index] = grad_phi_integrated/grad_x_integrated_x;
p_grad_Te[index] = grad_Te_integrated/grad_x_integrated_x;
// Note that we accumulated edge_normal*(phi0+phi1) so that it
// cancelled out between every edge being counted each way.
// Therefore we only need the outward facing edges, the rest cancel to 0.
} else {
f64_vec2 zero(0.0,0.0);
p_grad_phi[index] = zero;
p_grad_Te[index] = zero;
}
}
__global__ void Kernel_GetThermalPressureCentrals(
structural * __restrict__ p_info_sharing, // for vertex positions & get has_periodic flag
nT * __restrict__ p_nT_neut,
nT * __restrict__ p_nT_ion,
nT * __restrict__ p_nT_elec,
long * __restrict__ p_indexneigh,
// Output:
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec
)
{
__shared__ f64 p_nT_shared[threadsPerTileMajor];
__shared__ f64_vec2 p_vertex_pos_shared[threadsPerTileMajor];
__shared__ long indexneigh[MAXNEIGH_d*threadsPerTileMajor]; // 1 + 2 + 6 doublesworth
long index = blockDim.x*blockIdx.x + threadIdx.x;
nT nT_temp = p_nT_neut[blockIdx.x*blockDim.x + threadIdx.x];
p_nT_shared[threadIdx.x] = nT_temp.n*nT_temp.T;
structural info = p_info_sharing[threadIdx.x];
p_vertex_pos_shared[threadIdx.x] = info.pos;
__syncthreads();
long StartMajor = blockIdx.x*blockDim.x;
long EndMajor = StartMajor + blockDim.x; // not needed
if (info.flag == DOMAIN_VERTEX) {
// Don't bother otherwise, right?
memcpy(indexneigh + threadIdx.x*MAXNEIGH_d, p_indexneigh + MAXNEIGH_d*index, sizeof(long)*MAXNEIGH_d);
f64_vec2 grad_nT_integrated(0.0,0.0);
f64 nT1, nT2;
f64_vec2 pos1, pos2;
// f64 areasum = 0.0;
// Now let's be careful ... we want to integrate grad nT over the central cell
// Probably our best bet is what? Divide by area out to neighs where it is found,
// multiply by central area that is known.
// * * ** * * ** * * ** * * ** * * ** * * ** * * ** * * ** * * ** * * ** * * **
short iNeigh1 = info.neigh_len-1;
short iNeigh2 = 0;
// get phi,pos -- & rotate if necessary
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh1];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
nT1 = p_nT_shared[indexNeigh-StartMajor];
pos1 = p_vertex_pos_shared[indexNeigh-StartMajor];
} else {
nT nT_temp = p_nT_neut[indexNeigh];
nT1 = nT_temp.n*nT_temp.T;
structural infotemp = p_info_sharing[indexNeigh];
pos1 = infotemp.pos;
};
if (info.has_periodic) {
if ((pos1.x > 0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Clockwise_rotate2(pos1);
};
};
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
{
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh2];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
nT2 = p_nT_shared[indexNeigh-StartMajor];
pos2 = p_vertex_pos_shared[indexNeigh-StartMajor];
} else {
nT nT_temp = p_nT_neut[indexNeigh];
nT2 = nT_temp.n*nT_temp.T;
structural infotemp = p_info_sharing[indexNeigh];
pos2 = infotemp.pos;
};
if (info.has_periodic) {
if ((pos2.x > 0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Clockwise_rotate2(pos2);
};
};
// Now we've got contiguous pos1, pos2, and own pos.
// Correctly, pos2 is the anticlockwise one, therefore edge_normal.x should be
// pos2.y-pos1.y;
f64_vec2 edge_normal;
edge_normal.x = pos2.y-pos1.y;
edge_normal.y = pos1.x-pos2.x;
// Drop this:
// if (edge_normal.dot(info.pos-pos1) > 0.0) {
// edge_normal.x = -edge_normal.x;
// edge_normal.y = -edge_normal.y;
// }
grad_nT_integrated += edge_normal*0.5*(nT1+nT2);
//grad_x_integrated_x += edge_normal.x*0.5*(pos1.x+pos2.x);
nT1 = nT2;
pos1 = pos2;
}
// Now we took it integrated over the whole union of triangles, but,
// we want to diminish this to the size of the central.
// = 1/9 as much
f64_vec3 add(-grad_nT_integrated.x/(9.0*m_n),
-grad_nT_integrated.y/(9.0*m_n),
0.0);
p_MAR_neut[index] += add;
// Note that we accumulated edge_normal*(phi0+phi1) so that it
// cancelled out between every edge being counted each way.
// Therefore we only need the outward facing edges, the rest cancel to 0.
} else {
// Not domain vertex
// f64_vec2 zero(0.0,0.0);
// p_grad_phi[index] = zero;
// do nothing
}
__syncthreads();
// Now proceed, with shared positions already stored, to do ion. Correct?
nT_temp = p_nT_ion[blockIdx.x*blockDim.x + threadIdx.x];
p_nT_shared[threadIdx.x] = nT_temp.n*nT_temp.T;
__syncthreads();
if (info.flag == DOMAIN_VERTEX) {
// Don't bother otherwise, right?
f64_vec2 grad_nT_integrated(0.0,0.0);
f64 nT1, nT2;
f64_vec2 pos1, pos2;
short iNeigh1 = info.neigh_len-1;
short iNeigh2 = 0;
// get phi,pos -- & rotate if necessary
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh1];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
nT1 = p_nT_shared[indexNeigh-StartMajor];
pos1 = p_vertex_pos_shared[indexNeigh-StartMajor];
} else {
nT nT_temp = p_nT_ion[indexNeigh];
nT1 = nT_temp.n*nT_temp.T;
structural infotemp = p_info_sharing[indexNeigh];
pos1 = infotemp.pos;
};
if (info.has_periodic) {
if ((pos1.x > 0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Clockwise_rotate2(pos1);
};
};
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
{
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh2];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
nT2 = p_nT_shared[indexNeigh-StartMajor];
pos2 = p_vertex_pos_shared[indexNeigh-StartMajor];
} else {
nT nT_temp = p_nT_ion[indexNeigh];
nT2 = nT_temp.n*nT_temp.T;
structural infotemp = p_info_sharing[indexNeigh];
pos2 = infotemp.pos;
};
if (info.has_periodic) {
if ((pos2.x > 0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Clockwise_rotate2(pos2);
};
};
// Now we've got contiguous pos1, pos2, and own pos.
// Correctly, pos2 is the anticlockwise one, therefore edge_normal.x should be
// pos2.y-pos1.y;
f64_vec2 edge_normal;
edge_normal.x = pos2.y-pos1.y;
edge_normal.y = pos1.x-pos2.x;
// Drop this:
// if (edge_normal.dot(info.pos-pos1) > 0.0) {
// edge_normal.x = -edge_normal.x;
// edge_normal.y = -edge_normal.y;
// }
grad_nT_integrated += edge_normal*0.5*(nT1+nT2);
//grad_x_integrated_x += edge_normal.x*0.5*(pos1.x+pos2.x);
nT1 = nT2;
pos1 = pos2;
}
// Now we took it integrated over the whole union of triangles, but,
// we want to diminish this to the size of the central.
// = 1/9 as much
f64_vec3 add(-grad_nT_integrated.x/(9.0*m_ion),
-grad_nT_integrated.y/(9.0*m_ion),
0.0);
p_MAR_ion[index] += add;
};
__syncthreads();
// Now proceed, with shared positions already stored, to do ion. Correct?
nT_temp = p_nT_elec[blockIdx.x*blockDim.x + threadIdx.x];
p_nT_shared[threadIdx.x] = nT_temp.n*nT_temp.T;
__syncthreads();
if (info.flag == DOMAIN_VERTEX) {
// Don't bother otherwise, right?
f64_vec2 grad_nT_integrated(0.0,0.0);
f64 nT1, nT2;
f64_vec2 pos1, pos2;
short iNeigh1 = info.neigh_len-1;
short iNeigh2 = 0;
// get phi,pos -- & rotate if necessary
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh1];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
nT1 = p_nT_shared[indexNeigh-StartMajor];
pos1 = p_vertex_pos_shared[indexNeigh-StartMajor];
} else {
nT nT_temp = p_nT_elec[indexNeigh];
nT1 = nT_temp.n*nT_temp.T;
structural infotemp = p_info_sharing[indexNeigh];
pos1 = infotemp.pos;
};
if (info.has_periodic) {
if ((pos1.x > 0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Anticlock_rotate2(pos1);
};
if ((pos1.x < -0.5*pos1.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos1 = Clockwise_rotate2(pos1);
};
};
for (iNeigh2 = 0; iNeigh2 < info.neigh_len; iNeigh2++)
{
long indexNeigh = indexneigh[threadIdx.x*MAXNEIGH_d + iNeigh2];
if ((indexNeigh >= StartMajor) && (indexNeigh < EndMajor))
{
nT2 = p_nT_shared[indexNeigh-StartMajor];
pos2 = p_vertex_pos_shared[indexNeigh-StartMajor];
} else {
nT nT_temp = p_nT_elec[indexNeigh];
nT2 = nT_temp.n*nT_temp.T;
structural infotemp = p_info_sharing[indexNeigh];
pos2 = infotemp.pos;
};
if (info.has_periodic) {
if ((pos2.x > 0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x < -0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Anticlock_rotate2(pos2);
};
if ((pos2.x < -0.5*pos2.y*GRADIENT_X_PER_Y) &&
(info.pos.x > 0.5*info.pos.y*GRADIENT_X_PER_Y))
{
pos2 = Clockwise_rotate2(pos2);
};
}
// Now we've got contiguous pos1, pos2, and own pos.
// Correctly, pos2 is the anticlockwise one, therefore edge_normal.x should be
// pos2.y-pos1.y;
f64_vec2 edge_normal;
edge_normal.x = pos2.y-pos1.y;
edge_normal.y = pos1.x-pos2.x;
grad_nT_integrated += edge_normal*0.5*(nT1+nT2);
nT1 = nT2;
pos1 = pos2;
}
// Now we took it integrated over the whole union of triangles, but,
// we want to diminish this to the size of the central.
// = 1/9 as much
f64_vec3 add(-grad_nT_integrated.x/(9.0*m_e),
-grad_nT_integrated.y/(9.0*m_e),
0.0);
p_MAR_elec[index] += add;
};
// We divided by particle mass and left in Area_central
}
__global__ void Kernel_Compute_grad_phi_Te_tris(
structural * __restrict__ p_info_sharing, // for vertex positions
f64 * __restrict__ p_phi,
nT * __restrict__ p_nT_elec,
LONG3 * __restrict__ p_tri_corner_index,
CHAR4 * __restrict__ p_tri_perinfo,
// Output:
f64_vec2 * __restrict__ p_grad_phi,
f64_vec2 * __restrict__ p_GradTe
)
{
__shared__ f64 p_phi_shared[SIZE_OF_MAJOR_PER_TRI_TILE];
__shared__ f64 p_Te_shared[SIZE_OF_MAJOR_PER_TRI_TILE];
__shared__ f64_vec2 p_vertex_pos_shared[SIZE_OF_MAJOR_PER_TRI_TILE];
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long EndMajor = StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE;
long index = threadIdx.x + blockIdx.x * blockDim.x;
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
p_phi_shared[threadIdx.x] = p_phi[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
nT nTtemp = p_nT_elec[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
p_Te_shared[threadIdx.x] = nTtemp.T;
structural info = p_info_sharing[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
p_vertex_pos_shared[threadIdx.x] = info.pos;
}
__syncthreads();
CHAR4 perinfo = p_tri_perinfo[index];
// Take grad on triangle:
// first collect corner positions; if this is periodic triangle then we have to rotate em.
if (perinfo.flag == DOMAIN_TRIANGLE) { // ?
LONG3 corner_index = p_tri_corner_index[index];
// Do we ever require those and not the neighbours?
// Yes - this time for instance.
f64_vec2 pos0, pos1, pos2;
f64 phi0,phi1,phi2, Te0, Te1, Te2;
short iNeigh;
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
pos0 = p_vertex_pos_shared[corner_index.i1-StartMajor];
phi0 = p_phi_shared[corner_index.i1-StartMajor];
Te0 = p_Te_shared[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i1];
pos0 = info.pos;
phi0 = p_phi[corner_index.i1];
nT nTtemp = p_nT_elec[corner_index.i1];
Te0 = nTtemp.T;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
pos1 = p_vertex_pos_shared[corner_index.i2-StartMajor];
phi1 = p_phi_shared[corner_index.i2-StartMajor];
Te1 = p_Te_shared[corner_index.i2-StartMajor];
} else {
structural info = p_info_sharing[corner_index.i2];
pos1 = info.pos;
phi1 = p_phi[corner_index.i2];
nT nTtemp = p_nT_elec[corner_index.i2];
Te1 = nTtemp.T;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
pos2 = p_vertex_pos_shared[corner_index.i3-StartMajor];
phi2 = p_phi_shared[corner_index.i3-StartMajor];
Te2 = p_Te_shared[corner_index.i3-StartMajor];
} else {
structural info = p_info_sharing[corner_index.i3];
pos2 = info.pos;
phi2 = p_phi[corner_index.i3];
nT nTtemp = p_nT_elec[corner_index.i3];
Te2 = nTtemp.T;
}
// if (perinfo.periodic == 0) {
// } else {
// In this case which ones are periodic?
// Should we just store per flags?
// How it should work:
// CHAR4 perinfo: periodic, per0, per1, per2;
if (perinfo.per0 == NEEDS_ANTI)
pos0 = Anticlock_rotate2(pos0);
if (perinfo.per0 == NEEDS_CLOCK)
pos0 = Clockwise_rotate2(pos0);
if (perinfo.per1 == NEEDS_ANTI)
pos1 = Anticlock_rotate2(pos1);
if (perinfo.per1 == NEEDS_CLOCK)
pos1 = Clockwise_rotate2(pos1);
if (perinfo.per2 == NEEDS_ANTI)
pos2 = Anticlock_rotate2(pos2);
if (perinfo.per2 == NEEDS_CLOCK)
pos2 = Clockwise_rotate2(pos2);
// };
// To get grad phi:
f64_vec2 grad_phi, edge_normal0, edge_normal1, edge_normal2, GradTe;
// Integral of grad... average phi on edge . edgenormal
// This should give the same result as the plane passing through
// the 3 corners -- a few simple examples suggest yes.
edge_normal0.x = pos2.y-pos1.y;
edge_normal0.y = pos1.x-pos2.x;
// Got to make sure it points out. How? Have to take
// dot product with vector to the opposing point
if (edge_normal0.dot(pos0-pos1) > 0.0) {
// points to opposing point - wrong way
edge_normal0.x = -edge_normal0.x;
edge_normal0.y = -edge_normal0.y;
}
edge_normal1.x = pos2.y-pos0.y;
edge_normal1.y = pos0.x-pos2.x;
if (edge_normal1.dot(pos1-pos0) > 0.0) {
edge_normal1.x = -edge_normal1.x;
edge_normal1.y = -edge_normal1.y;
}
edge_normal2.x = pos1.y-pos0.y;
edge_normal2.y = pos0.x-pos1.x;
if (edge_normal2.dot(pos2-pos0) > 0.0) {
edge_normal2.x = -edge_normal2.x;
edge_normal2.y = -edge_normal2.y;
};
grad_phi =
( 0.5*(phi1 + phi2)*edge_normal0 // opposite phi0
+ 0.5*(phi0 + phi2)*edge_normal1
+ 0.5*(phi1 + phi0)*edge_normal2 );
GradTe =
( 0.5*(Te1 + Te2)*edge_normal0 // opposite phi0
+ 0.5*(Te0 + Te2)*edge_normal1
+ 0.5*(Te1 + Te0)*edge_normal2 );
// Divide by area -- easier to recalculate here than to load it in.
f64 area = fabs(0.5*(
(pos1.x+pos0.x)*edge_normal2.x
+ (pos2.x+pos1.x)*edge_normal0.x
+ (pos0.x+pos2.x)*edge_normal1.x
));
grad_phi /= area;
GradTe /= area;
// Grad of phi on tri is grad for this minor within the tri:
p_grad_phi[index] = grad_phi;
p_GradTe[index] = GradTe;
} else {
f64_vec2 zero(0.0,0.0);
p_grad_phi[index] = zero;
p_GradTe[index] = zero;
}
}
__global__ void Get_Lap_phi_on_major(
f64 * __restrict__ p_phi,
structural * __restrict__ p_info_sharing,
// f64_vec2 * __restrict__ p_tri_centroid,
long * __restrict__ pIndexNeigh,
char * __restrict__ pPBCNeigh,
// output:
f64 * __restrict__ p_Lap_phi
)
{
__shared__ f64 p_phi_shared[threadsPerTileMajor];
__shared__ f64_vec2 p_vertex_pos_shared[threadsPerTileMajor];
__shared__ long Indexneigh[MAXNEIGH_d*threadsPerTileMajor];
// So, per thread: 1 + 2 + 6 doubles = 9 doubles.
__shared__ char PBCneigh[MAXNEIGH_d*threadsPerTileMajor]; // + 1.5
//__shared__ f64_vec2 tri_centroid[SIZE_OF_TRI_TILE_FOR_MAJOR]; // + 4
// This is not good: 1 + 2 + 6 + 1.5 + 4 = 14.5 --- we said max 12 for decent throughput.
// I think we can drop PBCneigh here and use info.has_periodic
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
long const index = threadIdx.x + blockIdx.x * blockDim.x;
f64 phi_clock, phi_anti, phi_out, phi;
f64_vec2 pos_clock, pos_anti, pos_out;
char PBC;
p_phi_shared[threadIdx.x] = p_phi[index];
structural info = p_info_sharing[index];
p_vertex_pos_shared[threadIdx.x] = info.pos;
// We are going to want tri centroids to know the edge of the major cell.
//tri_centroid[threadIdx.x] = p_tri_centroid[blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + threadIdx.x];
//tri_centroid[threadIdx.x + blockDim.x] = p_tri_centroid[blockIdx.x*SIZE_OF_TRI_TILE_FOR_MAJOR + blockDim.x + threadIdx.x];
__syncthreads();
f64 Lapphi = 0.0, Area = 0.0;
if ( (info.flag != DOMAIN_VERTEX) && (info.flag != OUTERMOST) ) {
return;
}
// We might like to treat the OUTERMOST case.:
//short limit = info.neigh_len;
//if (info.flag == OUTERMOST) limit++; // Ensure it's tri 4 and 0 on edge towards neigh 0.
// Yeah that's no good ... this is neighs not tris. Pay attention.
long indexneigh;
switch(info.flag)
{
case DOMAIN_VERTEX:
// Now we've got to load up what we need for the edge of the major cell.
// Did we do this anywhere else?
phi = p_phi_shared[threadIdx.x];
memcpy(Indexneigh + MAXNEIGH_d*threadIdx.x,
pIndexNeigh + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d*threadIdx.x,
pPBCNeigh + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(char));
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len-1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = p_vertex_pos_shared[indexneigh-StartMajor];
phi_clock = p_phi_shared[indexneigh-StartMajor];
} else {
info = p_info_sharing[indexneigh];
pos_clock = info.pos;
phi_clock = p_phi[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len-1];
if (PBC == NEEDS_ANTI)
pos_clock = Anticlock_rotate2(pos_clock);
if (PBC == NEEDS_CLOCK)
pos_clock = Clockwise_rotate2(pos_clock);
// What about neighs and tris? Are they in the appropriate relationship?
// How about: load vertex positions --> work out centroids --
// we need phi from vertices anyway and we need their positions anyway. So.
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = p_vertex_pos_shared[indexneigh-StartMajor];
phi_out = p_phi_shared[indexneigh-StartMajor];
} else {
info = p_info_sharing[indexneigh];
pos_out = info.pos;
phi_out = p_phi[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI)
pos_out = Anticlock_rotate2(pos_out);
if (PBC == NEEDS_CLOCK)
pos_out = Clockwise_rotate2(pos_out);
short iNeigh;
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
int inext = iNeigh+1; if (inext == info.neigh_len) inext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + inext];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = p_vertex_pos_shared[indexneigh-StartMajor];
phi_anti = p_phi_shared[indexneigh-StartMajor];
} else {
info = p_info_sharing[indexneigh];
pos_anti = info.pos;
phi_anti = p_phi[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + inext];
if (PBC == NEEDS_ANTI)
pos_anti = Anticlock_rotate2(pos_anti);
if (PBC == NEEDS_CLOCK)
pos_anti = Clockwise_rotate2(pos_anti);
// Choice of using PBC list here. Alternative is what: just working from
// 'has_periodic' flag on our own thread, and deciding from that based on x/y.
// ___________________________________________________________________________
// Now what to do with it?
// Find the edge:
f64_vec2 edgenormal;
//vec2 = THIRD*(pos_clock + info.pos + pos_out); <--- assume this would be centroid...
edgenormal.x = THIRD*(pos_anti.y-pos_clock.y);
edgenormal.y = THIRD*(pos_clock.x-pos_anti.x);
// HERE DID NOT HAVE TO USE tri_centroid AFTER ALL.
// HOWEVER MAKE SURE WE DO THE RIGHT THING IN CASE THIS ABUTS THE INSULATOR.
// In this case, tri centroid is meant to be projected to insulator!!
// But Lap phi abutting the insulator should come out as what?
// Sides contribute azimuthally, ins side contributes 0.
// ie we should not be using Inner values to get gradient when looking left + right
// at ins.
// COMMENTED FOR DEBUGGING WHY IT LAUNCH FAILURES
if (pos_out.x*pos_out.x+pos_out.y*pos_out.y < DEVICE_INSULATOR_OUTER_RADIUS*DEVICE_INSULATOR_OUTER_RADIUS)
{
// Zero contribution, looking into insulator
} else {
if (pos_anti.x*pos_anti.x+pos_anti.y*pos_anti.y < DEVICE_INSULATOR_OUTER_RADIUS*DEVICE_INSULATOR_OUTER_RADIUS)
{
// assume we just look at the phi_out? No,
// get grad phi from 3 points.
f64 shoelacedoubled = ( (pos_clock.x + info.pos.x)*(pos_clock.y-info.pos.y) // y_anti - y_clock --- pos_clock is the highest one.
+ (pos_clock.x + pos_out.x)*(pos_out.y-pos_clock.y)
+ (pos_out.x + info.pos.x)*(info.pos.y-pos_out.y));
f64_vec2 Gradphi;
Gradphi.x = ( (phi_clock + phi)*(pos_clock.y-info.pos.y) // y_anti - y_clock --- pos_clock is the highest one.
+ (phi_clock + phi_out)*(pos_out.y-pos_clock.y)
+ (phi_out + phi)*(info.pos.y-pos_out.y) )
/ shoelacedoubled;
Gradphi.y = ( (phi_clock + phi)*(info.pos.x-pos_clock.x) // y_anti - y_clock --- pos_clock is the highest one.
+ (phi_clock + phi_out)*(pos_clock.x-pos_out.x)
+ (phi_out + phi)*(pos_out.x-info.pos.x) )
/ shoelacedoubled;
Lapphi += Gradphi.dot(edgenormal);
// We did not yet modify edgenormal, note bene.
// And what then is the contribution for shoelace?
// Should be adding up
// integral of dx/dx
//edgenormal.x = THIRD*(pos_anti.y-pos_clock.y);
//edgenormal.y = THIRD*(pos_clock.x-pos_anti.x);
Area += 0.5*(pos_clock.x + pos_anti.x)*edgenormal.x;
// Of course for sides we are not doing this quite right, by not
// modifying the centroid.
} else {
if (pos_clock.x*pos_clock.x+pos_clock.y*pos_clock.y < DEVICE_INSULATOR_OUTER_RADIUS*DEVICE_INSULATOR_OUTER_RADIUS)
{
f64 shoelacedoubled = ( (pos_anti.x + info.pos.x)*(info.pos.y-pos_anti.y) // y_anti - y_clock --- pos_clock is the highest one.
+ (pos_anti.x + pos_out.x)*(pos_anti.y-pos_out.y)
+ (pos_out.x + info.pos.x)*(pos_out.y-info.pos.y));
f64_vec2 Gradphi;
Gradphi.x = ( (phi_anti + phi)*(info.pos.y-pos_anti.y) // y_anti - y_clock --- pos_clock is the highest one.
+ (phi_anti + phi_out)*(pos_anti.y-pos_out.y)
+ (phi_out + phi)*(pos_out.y-info.pos.y) )
/ shoelacedoubled;
Gradphi.y = ( (phi_anti + phi)*(pos_anti.x-info.pos.x) // y_anti - y_clock --- pos_clock is the highest one.
+ (phi_anti + phi_out)*(pos_out.x-pos_anti.x)
+ (phi_out + phi)*(info.pos.x-pos_out.x) )
/ shoelacedoubled;
Lapphi += Gradphi.dot(edgenormal);
Area += 0.5*(pos_clock.x + pos_anti.x)*edgenormal.x;
} else {
// Default case.
//shoelace = (info.pos.x - pos_out.x)*(pos_anti.y - pos_clock.y)
// + (pos_anti.x - pos_clock.x)*(pos_out.y - info.pos.y);
// same coeff to phi for grad_x integrated as on x_0 in shoelace:
// same coeff to phi_anti for grad_y as on y_anti in shoelace:
// succeed with this out:
Lapphi += ( (phi - phi_out) * ( (pos_anti.y-pos_clock.y)*edgenormal.x
+ (pos_clock.x-pos_anti.x)*edgenormal.y )
// still fails with this out too:
+ (phi_anti-phi_clock)*( (pos_out.y - info.pos.y)*edgenormal.x
+ (info.pos.x - pos_out.x)*edgenormal.y) )
// // divide by shoelace :
/ ( (info.pos.x - pos_out.x)*(pos_anti.y - pos_clock.y)
+ (pos_anti.x - pos_clock.x)*(pos_out.y - info.pos.y) );
// Think divide by zero is the reason it crashes. Nope. Still fails without division.
Area += 0.5*(pos_clock.x + pos_anti.x)*edgenormal.x;
};
};
};
// Get away with not repositioning edge_normal ends to insulator...
// Now go round:
pos_clock = pos_out;
pos_out = pos_anti;
phi_clock = phi_out;
phi_out = phi_anti;
};
break;
case OUTERMOST:
// In this case we have e.g. if there are 4 neighs 0,1,2,3, then just 0-1-2, 1-2-3
// We can happily drop the d/dtheta, it's not a big deal.
// Start with neigh 0, not neigh N-1. End with neigh N-2 for centre.
phi = p_phi_shared[threadIdx.x];
memcpy(Indexneigh + MAXNEIGH_d*threadIdx.x,
pIndexNeigh + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d*threadIdx.x,
pPBCNeigh + MAXNEIGH_d*index,
MAXNEIGH_d*sizeof(char));
long indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = p_vertex_pos_shared[indexneigh-StartMajor];
phi_clock = p_phi_shared[indexneigh-StartMajor];
} else {
info = p_info_sharing[indexneigh];
pos_clock = info.pos;
phi_clock = p_phi[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI)
pos_clock = Anticlock_rotate2(pos_clock);
if (PBC == NEEDS_CLOCK)
pos_clock = Clockwise_rotate2(pos_clock);
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = p_vertex_pos_shared[indexneigh-StartMajor];
phi_out = p_phi_shared[indexneigh-StartMajor];
} else {
info = p_info_sharing[indexneigh];
pos_out = info.pos;
phi_out = p_phi[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 1];
if (PBC == NEEDS_ANTI)
pos_out = Anticlock_rotate2(pos_out);
if (PBC == NEEDS_CLOCK)
pos_out = Clockwise_rotate2(pos_out);
#pragma unroll MAXNEIGH_d
for (iNeigh = 1; iNeigh < info.neigh_len-1; iNeigh++)
{
int inext = iNeigh+1;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + inext];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = p_vertex_pos_shared[indexneigh-StartMajor];
phi_anti = p_phi_shared[indexneigh-StartMajor];
} else {
info = p_info_sharing[indexneigh];
pos_anti = info.pos;
phi_anti = p_phi[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + inext];
if (PBC == NEEDS_ANTI)
pos_anti = Anticlock_rotate2(pos_anti);
if (PBC == NEEDS_CLOCK)
pos_anti = Clockwise_rotate2(pos_anti);
f64_vec2 edgenormal;
edgenormal.x = THIRD*(pos_anti.y-pos_clock.y);
edgenormal.y = THIRD*(pos_clock.x-pos_anti.x);
Lapphi += ( (phi - phi_out) * ( (pos_anti.y-pos_clock.y)*edgenormal.x
+ (pos_clock.x-pos_anti.x)*edgenormal.y )
+ (phi_anti-phi_clock)*( (pos_out.y - info.pos.y)*edgenormal.x
+ (info.pos.x - pos_out.x)*edgenormal.y) )
// was:
// + (pos_anti.y-pos_clock.y)*edgenormal.y ) )
// divide by shoelace :
/ ( (info.pos.x - pos_out.x)*(pos_anti.y - pos_clock.y)
+ (pos_anti.x - pos_clock.x)*(pos_out.y - info.pos.y) );
Area += 0.5*(pos_clock.x + pos_anti.x)*edgenormal.x;
// Now go round:
pos_clock = pos_out;
pos_out = pos_anti;
phi_clock = phi_out;
phi_out = phi_anti;
};
break;
};
// integral of div f = sum of [f dot edgenormal]
// ... so here we took integral of div grad f.
// Look at previous code. Need to collect area and divide by it.
p_Lap_phi[index] = Lapphi/Area;
}
__global__ void Kernel_GetThermalPressureTris(
structural * __restrict__ p_info_sharing, // for vertex positions
nT * __restrict__ p_nT_neut,
nT * __restrict__ p_nT_ion,
nT * __restrict__ p_nT_elec,
LONG3 * __restrict__ p_tri_corner_index,
CHAR4 * __restrict__ p_tri_perinfo,
// Output:
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec
)
{
// Attention: code p_MAR_neut[index] += add;
// implies that we zero those arrays before we come here.
__shared__ f64 p_nT_shared[SIZE_OF_MAJOR_PER_TRI_TILE];
__shared__ f64_vec2 p_vertex_pos_shared[SIZE_OF_MAJOR_PER_TRI_TILE];
long StartMajor = blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE;
long EndMajor = StartMajor + SIZE_OF_MAJOR_PER_TRI_TILE;
long index = threadIdx.x + blockIdx.x * blockDim.x;
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
nT nTtemp = p_nT_neut[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
p_nT_shared[threadIdx.x] = nTtemp.n*nTtemp.T;
structural info = p_info_sharing[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
p_vertex_pos_shared[threadIdx.x] = info.pos;
}
__syncthreads();
// Take grad on triangle:
// first collect corner positions; if this is periodic triangle then we have to rotate em.
LONG3 corner_index;
f64_vec2 edge_normal0, edge_normal1, edge_normal2;
CHAR4 perinfo = p_tri_perinfo[index];
if (perinfo.flag == DOMAIN_TRIANGLE) { // ?
corner_index = p_tri_corner_index[index];
// Do we ever require those and not the neighbours?
// Yes - this time for instance.
f64 nT0, nT1, nT2;
f64_vec2 pos0, pos1, pos2;
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
pos0 = p_vertex_pos_shared[corner_index.i1-StartMajor];
nT0 = p_nT_shared[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i1];
pos0 = info.pos;
nT nTtemp = p_nT_neut[corner_index.i1];
nT0 = nTtemp.n*nTtemp.T;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
pos1 = p_vertex_pos_shared[corner_index.i2-StartMajor];
nT1 = p_nT_shared[corner_index.i2-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i2];
pos1 = info.pos;
nT nTtemp = p_nT_neut[corner_index.i2];
nT1 = nTtemp.n*nTtemp.T;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
pos2 = p_vertex_pos_shared[corner_index.i3-StartMajor];
nT2 = p_nT_shared[corner_index.i3-StartMajor];
} else {
// have to load in from global memory:
structural info = p_info_sharing[corner_index.i3];
pos2 = info.pos;
nT nTtemp = p_nT_neut[corner_index.i3];
nT2 = nTtemp.n*nTtemp.T;
}
// In this case which ones are periodic?
// Should we just store per flags?
// How it should work:
// CHAR4 perinfo: periodic, per0, per1, per2;
if (perinfo.per0 == NEEDS_ANTI)
pos0 = Anticlock_rotate2(pos0);
if (perinfo.per0 == NEEDS_CLOCK)
pos0 = Clockwise_rotate2(pos0);
if (perinfo.per1 == NEEDS_ANTI)
pos1 = Anticlock_rotate2(pos1);
if (perinfo.per1 == NEEDS_CLOCK)
pos1 = Clockwise_rotate2(pos1);
if (perinfo.per2 == NEEDS_ANTI)
pos2 = Anticlock_rotate2(pos2);
if (perinfo.per2 == NEEDS_CLOCK)
pos2 = Clockwise_rotate2(pos2);
// Integral of grad... average phi on edge . edgenormal
// This should give the same result as the plane passing through
// the 3 corners -- a few simple examples suggest yes.
edge_normal0.x = pos2.y-pos1.y;
edge_normal0.y = pos1.x-pos2.x;
// Got to make sure it points out. How? Have to take
// dot product with vector to the opposing point
if (edge_normal0.dot(pos0-pos1) > 0.0) {
// points to opposing point - wrong way
edge_normal0.x = -edge_normal0.x;
edge_normal0.y = -edge_normal0.y;
}
edge_normal1.x = pos2.y-pos0.y;
edge_normal1.y = pos0.x-pos2.x;
if (edge_normal1.dot(pos1-pos0) > 0.0) {
edge_normal1.x = -edge_normal1.x;
edge_normal1.y = -edge_normal1.y;
}
edge_normal2.x = pos1.y-pos0.y;
edge_normal2.y = pos0.x-pos1.x;
if (edge_normal2.dot(pos2-pos0) > 0.0) {
edge_normal2.x = -edge_normal2.x;
edge_normal2.y = -edge_normal2.y;
};
f64_vec2 grad_nT_integrated =
( 0.5*(nT1 + nT2)*edge_normal0 // opposite phi0
+ 0.5*(nT0 + nT2)*edge_normal1
+ 0.5*(nT1 + nT0)*edge_normal2 );
// Grad of phi on tri is grad for this minor within the tri:
//p_grad_nT_neut_integrated[index] = grad_nT_integrated;
// NOTE WE DO NOW DIVIDE BY PARTICLE MASS
f64_vec3 add(-grad_nT_integrated.x/m_n,
-grad_nT_integrated.y/m_n,
0.0); // MINUS
p_MAR_neut[index] += add;
} else {
if (perinfo.flag == CROSSING_INS) {
// We don't know if it's got 1 point outside ins or 2.
// If 1 then not a lot we can do ??
// Contribute zero to MAR for now...
} else {
// leave MAR unaffected
};
}
__syncthreads();
// Now load in ion nT info:
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
nT nTtemp = p_nT_ion[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
p_nT_shared[threadIdx.x] = nTtemp.n*nTtemp.T;
}
__syncthreads();
if (perinfo.flag == DOMAIN_TRIANGLE) { // ?
f64 nT0, nT1, nT2;
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
nT0 = p_nT_shared[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
nT nTtemp = p_nT_ion[corner_index.i1];
nT0 = nTtemp.n*nTtemp.T;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
nT1 = p_nT_shared[corner_index.i2-StartMajor];
} else {
// have to load in from global memory:
nT nTtemp = p_nT_ion[corner_index.i2];
nT1 = nTtemp.n*nTtemp.T;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
nT2 = p_nT_shared[corner_index.i3-StartMajor];
} else {
// have to load in from global memory:
nT nTtemp = p_nT_ion[corner_index.i3];
nT2 = nTtemp.n*nTtemp.T;
}
// Integral of grad... average phi on edge . edgenormal
// This should give the same result as the plane passing through
// the 3 corners -- a few simple examples suggest yes.
f64_vec2 grad_nT_integrated =
( 0.5*(nT1 + nT2)*edge_normal0 // opposite phi0
+ 0.5*(nT0 + nT2)*edge_normal1
+ 0.5*(nT1 + nT0)*edge_normal2 );
// Grad of phi on tri is grad for this minor within the tri:
//p_grad_nT_ion_integrated[index] = grad_nT_integrated;
f64_vec3 add(-grad_nT_integrated.x/m_ion,
-grad_nT_integrated.y/m_ion,
0.0);
p_MAR_ion[index] += add;
} else {
f64_vec2 zero(0.0,0.0);
//p_grad_nT_ion_integrated[index] = zero;
}
__syncthreads();
if (threadIdx.x < SIZE_OF_MAJOR_PER_TRI_TILE)
{
nT nTtemp = p_nT_elec[blockIdx.x*SIZE_OF_MAJOR_PER_TRI_TILE + threadIdx.x];
p_nT_shared[threadIdx.x] = nTtemp.n*nTtemp.T;
}
__syncthreads();
if (perinfo.flag == DOMAIN_TRIANGLE) { // ?
f64 nT0, nT1, nT2;
if ((corner_index.i1 >= StartMajor) && (corner_index.i1 < EndMajor))
{
nT0 = p_nT_shared[corner_index.i1-StartMajor];
} else {
// have to load in from global memory:
nT nTtemp = p_nT_elec[corner_index.i1];
nT0 = nTtemp.n*nTtemp.T;
}
if ((corner_index.i2 >= StartMajor) && (corner_index.i2 < EndMajor))
{
nT1 = p_nT_shared[corner_index.i2-StartMajor];
} else {
// have to load in from global memory:
nT nTtemp = p_nT_elec[corner_index.i2];
nT1 = nTtemp.n*nTtemp.T;
}
if ((corner_index.i3 >= StartMajor) && (corner_index.i3 < EndMajor))
{
nT2 = p_nT_shared[corner_index.i3-StartMajor];
} else {
// have to load in from global memory:
nT nTtemp = p_nT_elec[corner_index.i3];
nT2 = nTtemp.n*nTtemp.T;
}
// Integral of grad... average phi on edge . edgenormal
// This should give the same result as the plane passing through
// the 3 corners -- a few simple examples suggest yes.
f64_vec2 grad_nT_integrated =
( 0.5*(nT1 + nT2)*edge_normal0 // opposite phi0
+ 0.5*(nT0 + nT2)*edge_normal1
+ 0.5*(nT1 + nT0)*edge_normal2 );
// Grad of phi on tri is grad for this minor within the tri:
//p_grad_nT_elec_integrated[index] = grad_nT_integrated;
f64_vec3 add(-grad_nT_integrated.x/m_e,
-grad_nT_integrated.y/m_e,
0.0);
p_MAR_elec[index] += add;
} else {
}
}
__global__ void Kernel_Advance_Antiadvect_phidot(
f64 * __restrict__ p_phidot,
f64_vec2 * __restrict__ p_v_overall,
f64 h_use,
f64_vec2 * __restrict__ p_grad_phidot,
f64 * __restrict__ p_Lap_phi,
nT * __restrict__ p_nT_ion,
nT * __restrict__ p_nT_elec,
// out:
f64 * __restrict__ p_phidot_out
)
{
long index = blockDim.x*blockIdx.x + threadIdx.x;
f64_vec2 move = h_use*p_v_overall[index];
f64 Lap_phi = p_Lap_phi[index];
nT nT_ion = p_nT_ion[index];
nT nT_elec = p_nT_elec[index];
f64 phidot = p_phidot[index];
f64_vec2 grad_phidot = p_grad_phidot[index];
// What it has to do:
p_phidot_out[index] =
phidot + move.dot(grad_phidot)
+ h_use*csq*(Lap_phi + FOURPI_Q*(nT_ion.n-nT_elec.n));
// CHECK SIGNS
// We are giving the value at the moved point.
// The existence of this routine is a clear inefficiency.
// It's basically so that computing grad_phi can be separate and repeated.
// Could we combine it with Get_Lap_phi_on_major?
}
__global__ void Kernel_Advance_Antiadvect_phi
(
f64 * __restrict__ p_phi,
f64_vec2 * p_v_overall_major,
f64 h_use,
f64_vec2 * __restrict__ p_grad_phi_major,
f64 * __restrict__ p_phidot ,
f64 * __restrict__ p_phi_out
)
{
long index = blockDim.x*blockIdx.x + threadIdx.x;
f64_vec2 move = h_use*p_v_overall_major[index];
f64 phidot = p_phidot[index];
f64 phi = p_phi[index];
f64_vec2 grad_phi = p_grad_phi_major[index];
p_phi_out[index] =
phi + move.dot(grad_phi) + h_use*phidot;
}
__global__ void Kernel_Antiadvect_A_allminor
(
f64_vec3 * __restrict__ p_A,
f64_vec2 * __restrict__ p_v_overall_minor,
f64 h_use,
f64_vec2 * __restrict__ p_grad_Ax,
f64_vec2 * __restrict__ p_grad_Ay,
f64_vec2 * __restrict__ p_grad_Az,
f64_vec3 * __restrict__ p_A_out,
bool bAdd,
f64_vec3 * __restrict__ p_Adot
)
{
long index = blockDim.x*blockIdx.x + threadIdx.x;
f64_vec2 move = h_use*p_v_overall_minor[index];
f64_vec3 A_out;
f64_vec3 A_in = p_A[index];
A_out.x = A_in.x + move.dot(p_grad_Ax[index]);
A_out.y = A_in.y + move.dot(p_grad_Ay[index]);
A_out.z = A_in.z + move.dot(p_grad_Az[index]);
if (bAdd) {
f64_vec3 Adot = p_Adot[index];
A_out += h_use*Adot;
}
p_A_out[index] = A_out;
}
__global__ void Kernel_Ionisation(
f64 const h,
structural * __restrict__ p_info,
f64 * __restrict__ p_area,
nT * __restrict__ p_nT_neut_src,
nT * __restrict__ p_nT_ion_src,
nT * __restrict__ p_nT_elec_src,
nT * __restrict__ p_nT_neut_use,
nT * __restrict__ p_nT_ion_use,
nT * __restrict__ p_nT_elec_use,
nn * __restrict__ p_nn_ionise_recombine,
// Where are these used and how to avoid storing?
// There isn't a way: we have to spread this information out to minor cells.
bool b2ndpass
)
{
long index = blockIdx.x*blockDim.x + threadIdx.x;
structural info = p_info[index]; // 3 doubles?
nT nT_elec_use, nT_ion_use, nT_neut_use;
nT nT_elec_src, nT_ion_src, nT_neut_src;
//f64 n_n_plus, n_ion_plus, n_e_plus;
nn nirec;
nT_elec_src = p_nT_elec_src[index];
nT_neut_src = p_nT_neut_src[index];
nT_ion_src = p_nT_ion_src[index];
if (b2ndpass) {
nT_elec_use = p_nT_elec_use[index];
nT_ion_use = p_nT_ion_use[index];
nT_neut_use = p_nT_neut_use[index];
} else {
nT_elec_use = nT_elec_src;
nT_ion_use = nT_ion_src;
nT_neut_use = nT_neut_src;
};
if (info.flag == DOMAIN_VERTEX)
{
// . Do ionisation --> know how much ionisation change in mom
// (or v) of each species and rate of adding to T.
{
// My clever way: anticipate some change only in some of the
// T values. But use estimated T 1/2 on the 2nd pass.
f64 S, R, sqrtTeeV;
if (b2ndpass == 0) {
// Use a deliberate underestimate that takes into account some
// expected change in Te from ionisation:
// For sqrt(T) we use sqrt((T_k+T_k+1)/2).
f64 T_eV_k = nT_elec_src.T/kB;
f64 buildingblock = 1.0e-5*exp(-13.6/T_eV_k)/
(13.6*(6.0*13.6+T_eV_k));
buildingblock = buildingblock*buildingblock;
f64 temp = 0.25*h*nT_neut_src.n*TWOTHIRDS*13.6*buildingblock;
S = - temp + sqrt( temp*temp + T_eV_k*buildingblock );
// The 2nd pass, T will be a little less if ionisation
// is important, and we then go ahead and use that.
sqrtTeeV = sqrt(T_eV_k);
R = nT_elec_src.n*8.75e-27/
((T_eV_k*T_eV_k)*(T_eV_k*T_eV_k)*sqrtTeeV); // take n_i*n_e*R
// Nothing fancy for recombination rate.
// It should only be a problem in case that we are weakly ionised,
// and at least in that case the first go will be limited by the
// measure to avoid n < 0.
nirec.n_ionise = nT_neut_src.n*nT_elec_src.n*S*h;
nirec.n_recombine = nT_elec_src.n*nT_ion_src.n*R*h;
f64 netionise = nirec.n_ionise - nirec.n_recombine;
if ((nT_neut_src.n-netionise < 0.0) || (nT_ion_src.n+netionise < 0.0) || (nT_elec_src.n+netionise < 0.0))
{
// in denom goes n_ionise/n_n and n_recombine/n_lowest
if (nT_ion_src.n < nT_elec_src.n) {
f64 denom = (1.0 + h*nT_elec_src.n*S + h*nT_elec_src.n*R);
nirec.n_ionise /= denom;
nirec.n_recombine /= denom;
} else {
f64 denom = (1.0 + h*nT_elec_src.n*S + h*nT_ion_src.n*R);
nirec.n_ionise /= denom;
nirec.n_recombine /= denom;
};
netionise = nirec.n_ionise - nirec.n_recombine;
};
// n_ion_plus = nT_ion_src.n + netionise;
// n_n_plus = nT_neut_src.n - netionise;
// n_e_plus = nT_elec_src.n + netionise;
} else {
// Use Te_1/2 throughout:
f64 T_eV = nT_elec_use.T/kB; // large negative
sqrtTeeV = sqrt(T_eV);
S = 1.0e-5*sqrtTeeV*exp(-13.6/T_eV)/(13.6*(6.0*13.6+T_eV));
R = nT_elec_use.n*8.75e-27/((T_eV*T_eV)*(T_eV*T_eV)*sqrtTeeV);
nirec.n_ionise = nT_neut_use.n*nT_elec_use.n*S*h;
nirec.n_recombine = nT_elec_use.n*nT_ion_use.n*R*h;
f64 netionise = nirec.n_ionise - nirec.n_recombine;
// Am I right that they are getting wiped out -- so that makes a difference here
// drastically reducing the amt of recombination because it recognises there are less there.
if ((nT_neut_src.n-netionise < 0.0) || (nT_ion_src.n+netionise < 0.0) || (nT_elec_src.n+netionise < 0.0))
{
f64 denom;
if (nT_ion_src.n < nT_elec_src.n) {
denom = (1.0 + h*nT_elec_src.n*S + h*nT_elec_src.n*R);
} else {
denom = (1.0 + h*nT_elec_src.n*S + h*nT_ion_src.n*R);
};
nirec.n_ionise = nT_neut_src.n*nT_elec_src.n*S*h/denom;
nirec.n_recombine = nT_elec_src.n*nT_ion_src.n*R*h/denom;
netionise = nirec.n_ionise - nirec.n_recombine;
};
// n_ion_plus = nT_ion_src.n + netionise;
// n_n_plus = nT_neut_src.n - netionise;
// n_e_plus = nT_elec_src.n + netionise;
};
} // end of "do ionisation"
// We now got: n_ion_plus, n_n_plus, n_e_plus, n_ionise, n_recombine.
} else {
// (info.flag == DOMAIN_VERTEX)
// n_e_plus = nT_elec_src.n;
// n_n_plus = nT_neut_src.n;
// n_ion_plus = nT_ion_src.n;
};
// Save output...
p_nn_ionise_recombine[index] = nirec;
// nT_elec_out ? We do not need it for midpoint v routine, because
// we load n_ionise_recombine to recreate.
//p_nT_elec_out[index].n = n_e_plus;
//p_nT_ion_out[index].n = n_ion_plus;
//p_nT_neut_out[index].n = n_n_plus;
// Therefore we should probably use the midpoint routine to do this save,
// because we will be doing a save-off of T anyway.
// NOPE - midpoint routine applies to minor not major.
}
// Note: unroll can increase register pressure!
__global__ void Kernel_Midpoint_v_and_Adot (
f64 const h,
CHAR4 * __restrict__ p_tri_perinfo,
nT * __restrict__ p_nT_neut_src,
nT * __restrict__ p_nT_ion_src,
nT * __restrict__ p_nT_elec_src,
// n_k appears because it is needed as part of the midpoint step.
// Am I serious about __restrict__ ? Yes if pass 0 as use on 1st pass
// On 2nd pass you do need different n for half time?
// n basically changes with ionisation
nT * __restrict__ p_nT_neut_use, // k+1/2 on 2nd pass; on 1st pass n is adjusted by ionisation - correct?
nT * __restrict__ p_nT_ion_use, // k or k+1/2
nT * __restrict__ p_nT_elec_use, // k or k+1/2 --- for forming nu etc...
nn * __restrict__ p_nn_ionise_recombine,
// Have to load 2 additional doubles due to doing ionisation outside.
f64_vec2 * __restrict__ p_tri_centroid,
// CAN WE MAKE THIS BE EXTENDED TO APPLY FOR CENTRAL VALUES ALSO?
// For now have to include separate set of positions:
structural * __restrict__ p_info,
// We use this when we assume we are adding to v's momentum and for doing the n_k+1 part of the midpt formula
f64_vec3 * __restrict__ p_B,
f64_vec3 * __restrict__ p_v_n_src,
f64_vec3 * __restrict__ p_v_ion_src,
f64_vec3 * __restrict__ p_v_e_src,
f64 * __restrict__ p_area, // It's assumed to be area_k+1 but I guess it's area_k+1/2 ... too bad?
f64_vec2 * __restrict__ p_grad_phi_half,
f64_vec3 * __restrict__ p_Lap_A_half,
f64_vec3 * __restrict__ p_Adot_k,
f64_vec3 * __restrict__ p_MomAdditionRate_neut,
f64_vec3 * __restrict__ p_MomAdditionRate_ion,
f64_vec3 * __restrict__ p_MomAdditionRate_elec,
// OKay let's check out if aTP was even correct.
f64_vec2 * __restrict__ p_grad_Te,
f64_vec3 * __restrict__ p_v_neut_out,
f64_vec3 * __restrict__ p_v_ion_out,
f64_vec3 * __restrict__ p_v_elec_out,
f64 * __restrict__ p_resistive_heat_neut, // additions to NT
f64 * __restrict__ p_resistive_heat_ion,
f64 * __restrict__ p_resistive_heat_elec,
f64_vec3 * p_dAdt_out,
bool b2ndPass,
f64 const EzTuning,
f64 * __restrict__ p_Iz,
f64 * __restrict__ p_sigma_zz
)
{
// Still going to need to know the Ez linear relationship
// from this function:
__shared__ f64 Iz[threadsPerTileMinor];
__shared__ f64 sigma_zz[threadsPerTileMinor];
// Only dimension here what will have def been used by the time we hit the
// largest memory footprint:
f64 nu_ne_MT_over_n, nu_ni_MT_over_n, nu_eiBar, nu_ieBar, nu_eHeart; // 5 double
Vector3 omega_ce, v_e_k, v_ion_k, v_n_k, v_n_0; // 15 double
Vector3 v_ion_plus, v_n_plus, v_e_plus; // 9 doubles
CHAR4 per_info;
f64 n_e_plus, n_ion_plus, n_n_plus;
nT nT_elec_use,nT_ion_use,nT_neut_use;
nn n_ionrec;
Vector3 Lap_A_half;
long index = threadIdx.x + blockIdx.x*blockDim.x;
omega_ce = p_B[index];
omega_ce *= eovermc; // Trying to avoid 3 separate accesses [which would not even be contig!!]
// Shane Cook p.176 We do not need to put the reads where they are needed, can put first.
// It is UNCLEAR where I read that we have to put reads outside of branches.
// I do not find corroborating sources.
// index < Nverts is more or less guaranteed but it would of course be nice not to
// do fetches for info.flag != DOMAIN_VERTEX.
v_e_k = p_v_e_src[index];
v_ion_k = p_v_ion_src[index];
v_n_k = p_v_n_src[index];
f64 area = p_area[index];
f64_vec2 centroid;
if (index < BEGINNING_OF_CENTRAL)
{
centroid = p_tri_centroid[index];
} else {
centroid = p_info[index-BEGINNING_OF_CENTRAL].pos;
};
per_info = p_tri_perinfo[index];
n_ionrec = p_nn_ionise_recombine[index];
//n_n_plus = p_n_n_plus[index].n;
//n_ion_plus = p_n_ion_plus[index].n;
//n_e_plus = p_n_e_plus[index].n;
if ((OUTPUT) && (index == REPORT)) {
printf("v_k %1.5E %1.5E %1.5E\n",v_n_k.z,v_ion_k.z,v_e_k.z);
printf("Bxy %1.5E %1.5E omega.z %1.5E \n",omega_ce.x/eovermc,omega_ce.y/eovermc,omega_ce.z);
};
if ((per_info.flag == DOMAIN_TRIANGLE)) // try loading inside, outside
{
// Now the v calcs:
{
nT nT_elec_src, nT_ion_src, nT_neut_src; // register pressure?
nT_elec_src = p_nT_elec_src[index];
nT_ion_src = p_nT_ion_src[index];
nT_neut_src = p_nT_neut_src[index];
// Question whether these should be inside brace.
n_n_plus = nT_neut_src.n + n_ionrec.n_recombine-n_ionrec.n_ionise;
n_ion_plus = nT_ion_src.n + n_ionrec.n_ionise-n_ionrec.n_recombine;
n_e_plus = nT_elec_src.n + n_ionrec.n_ionise-n_ionrec.n_recombine;
if (b2ndPass) {
nT_elec_use = p_nT_elec_use[index];
nT_ion_use = p_nT_ion_use[index];
nT_neut_use = p_nT_neut_use[index];
} else {
nT_elec_use = nT_elec_src;
nT_ion_use = nT_ion_src;
nT_neut_use = nT_neut_src;
};
// Try to make do with 3 tensors: 27 doubles.
{
f64 sqrt_Te,ionneut_thermal, electron_thermal,
lnLambda, s_in_MT, s_en_MT, s_en_visc;
sqrt_Te = sqrt(nT_elec_use.T);
ionneut_thermal = sqrt(nT_ion_use.T/m_ion+nT_neut_use.T/m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te*over_sqrt_m_e;
lnLambda = Get_lnLambda_d(nT_ion_use.n,nT_elec_use.T);
s_in_MT = Estimate_Neutral_MT_Cross_section(nT_ion_use.T*one_over_kB);
s_en_MT = Estimate_Neutral_MT_Cross_section(nT_elec_use.T*one_over_kB);
s_en_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(nT_elec_use.T*one_over_kB);
// Need nu_ne etc to be defined:
nu_ne_MT_over_n = s_en_MT*electron_thermal; // have to multiply by n_e for nu_ne_MT
nu_ni_MT_over_n = s_in_MT*ionneut_thermal;
nu_eiBar = nu_eiBarconst*kB_to_3halves*nT_ion_use.n*lnLambda/(nT_elec_use.T*sqrt_Te);
nu_ieBar = nT_elec_use.n*nu_eiBar/nT_ion_use.n;
nu_eHeart = 1.87*nu_eiBar +
//nu_en_visc; // Why used visc??
nT_neut_use.n*s_en_visc*electron_thermal;
}
// Can avoid 6 doubles on 1st pass if we put these defined above and do not use nT_use from here.
f64 Beta_ni, Beta_ne;
// Get v_n (v_e, v_i):
Beta_ne = h*0.5*(m_e/(m_e+m_n))*nu_ne_MT_over_n*nT_elec_use.n; // avoid division with a #define!
Beta_ni = h*0.5*(m_ion/(m_ion+m_n))*nu_ni_MT_over_n*nT_ion_use.n;
v_n_0 =
// ionisation addition to neutral momentum:
((nT_neut_src.n - n_ionrec.n_ionise)*v_n_k
+ n_ionrec.n_recombine*(m_i_over_m_n*v_ion_k+m_e_over_m_n*v_e_k))/n_n_plus;
- Beta_ne*(v_n_k-v_e_k)
- Beta_ni*(v_n_k-v_ion_k);
{
//Vector2 grad_nT_neut = p_grad_nT_neut[index];
Vector3 MomAdditionRate = p_MomAdditionRate_neut[index];
// We can avoid a fetch if we just store the sum(diff) of these in 1 Vector3
// But be careful : how do we work out visc heating? Do that first.
// We stored [gradnTintegrated / m_s] = d/dt Nv
v_n_0 += h*( //-grad_nT_neut.x + ViscMomAdditionRate_neut.x)/(n_n_plus*m_n);
MomAdditionRate/(n_n_plus*area));
f64 over = 1.0/(1.0 + Beta_ne + Beta_ni);
v_n_0 *= over;
Beta_ni *= over;
Beta_ne *= over;
}
// Now get v_i (v_e):
f64 total =
(nu_eHeart*nu_eHeart + omega_ce.x*omega_ce.x+omega_ce.y*omega_ce.y+omega_ce.z*omega_ce.z);
Vector3 vec_e, vec_i, dAdt_k;
f64 EzShape;
{
Vector2 grad_phi, GradTe;
Vector3 MomAdditionRate; // We could use it first as this, union with dAdt_k
grad_phi = p_grad_phi_half[index];
Lap_A_half = p_Lap_A_half[index];
dAdt_k = p_Adot_k[index];
MomAdditionRate = p_MomAdditionRate_ion[index];
// TRY putting this stuff outside the branch to see what happens.
// ***************************************************************
EzShape = GetEzShape(centroid.modulus());
// Set up most of vec_e, vec_i here:
vec_i = // Ionisation affected v_i_k:
((nT_ion_src.n-n_ionrec.n_recombine)*v_ion_k + n_ionrec.n_ionise*v_n_k)/n_ion_plus
- h*0.5*moverM*omega_ce.cross(v_ion_k);
vec_i +=
h*qoverM*( //- grad_phi [[below]]
- dAdt_k/c - h*c*0.5*Lap_A_half
- h*M_PI*e*(nT_ion_src.n*v_ion_k - nT_elec_src.n*v_e_k))
// nu_ni/n * n_n = nu_in
- h*0.5*(m_n/(m_ion+m_n))*nu_ni_MT_over_n*nT_neut_use.n*(v_ion_k-v_n_k-v_n_0)
- h*0.5*moverM*nu_ieBar*(v_ion_k-v_e_k);
vec_i.x -= h*qoverM*grad_phi.x;
vec_i.y -= h*qoverM*grad_phi.y;
vec_i.z += h*qoverM*EzShape*EzTuning;
if ((OUTPUT) && (index == REPORT)) printf("vec_i %1.5E %1.5E %1.5E\n",vec_i.x,vec_i.y,vec_i.z);
// -grad_nT_ion.x + ViscMomAdditionRate_ion.x
// Vector3 ViscMomAdditionRate_ion = p_visc_mom_addition_rate_ion[index];
// We can avoid a fetch if we just store the sum(diff) of these in 1 Vector3
// But be careful : how do we work out visc heating? It has to be fetched separately anyway.
vec_i += h*((MomAdditionRate)/(n_ion_plus*area));
if ((OUTPUT) && (index == REPORT)) printf("vec_i w/press %1.5E %1.5E %1.5E\n",vec_i.x,vec_i.y,vec_i.z);
MomAdditionRate = p_MomAdditionRate_elec[index];
// We almost certainly should take v += (ViscMomAddition/n_k+1)
// The same applies to grad_nT_ion : integrate this over [t_k,t_k+1]
// and we get the addition to momentum.
GradTe = p_grad_Te[index];
// Add thermal force on ions:
f64 fac = 1.5*h*(nu_ieBar/(m_ion*nu_eHeart*total));
vec_i.x += fac* (// (Upsilon.xx)*GradTe.x + Upsilon.xy*GradTe.y
(omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart)*GradTe.x
+ (omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z)*GradTe.y);
// divide by (nu*nu+omega*omega) already in fac
vec_i.y += fac* (
(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z)*GradTe.x
+ (omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart)*GradTe.y);
vec_i.z += fac* (
(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y)*GradTe.x
+ (omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x)*GradTe.y);
// if (index == 15936) printf("vec_i %1.5E \n",vec_i.z);
// Add Upsilon part of collisional term:
fac = h*0.5*0.9*moverM*nu_eiBar*nu_ieBar/(nu_eHeart*total);
vec_i.x += fac*( (omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart)*(v_ion_k.x-v_e_k.x)
+ (omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z)*(v_ion_k.y-v_e_k.y)
+ (omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y)*(v_ion_k.z-v_e_k.z)
);
vec_i.y += fac*( (omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z)*(v_ion_k.x-v_e_k.x)
+ (omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart)*(v_ion_k.y-v_e_k.y)
+ (omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x)*(v_ion_k.z-v_e_k.z)
);
vec_i.z += fac*( (omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y)*(v_ion_k.x-v_e_k.x)
+ (omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x)*(v_ion_k.y-v_e_k.y)
+ (omega_ce.z*omega_ce.z + nu_eHeart*nu_eHeart)*(v_ion_k.z-v_e_k.z)
);
if ((OUTPUT) && (index == REPORT)) {
printf("%d vik %1.4E %1.4E %1.4E Vi %1.4E %1.4E %1.4E \n",
index,v_ion_k.x,v_ion_k.y,v_ion_k.z,vec_i.x,vec_i.y,vec_i.z);
};
vec_e = ((nT_elec_src.n-n_ionrec.n_recombine)*v_e_k + n_ionrec.n_ionise*v_n_k)/n_e_plus;
vec_e += h*0.5*omega_ce.cross(v_e_k)
- h*eoverm*(// -grad_phi // below
- dAdt_k/c - h*c*0.5*Lap_A_half
- h*M_PI*e*(nT_ion_src.n*v_ion_k - nT_elec_src.n*v_e_k))
- 0.5*h*(m_n/(m_e+m_n))*nu_ne_MT_over_n*nT_neut_use.n*(v_e_k-v_n_k-v_n_0)
- 0.5*h*nu_eiBar*(v_e_k-v_ion_k);
vec_e.x += h*eoverm*grad_phi.x ;
vec_e.y += h*eoverm*grad_phi.y;
vec_e.z += -h*eoverm*EzShape*EzTuning;
if ((OUTPUT) && (index == REPORT))
printf("vec_e %1.6E %1.6E %1.6E\n",vec_e.x,vec_e.y,vec_e.z);
//vec_e.x += h*( (-grad_nT_e.x )/(n_e_plus*m_e));
vec_e += h*(MomAdditionRate/(n_e_plus*area)); // MAR = d/dt (Neve)
// Add thermal force to electrons:
fac = -(1.5*h*nu_eiBar/(m_e*nu_eHeart*total));
vec_e.x += fac*(
(omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart)*GradTe.x
+ (omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z)*GradTe.y);
vec_e.y += fac*(
(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z)*GradTe.x
+ (omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart)*GradTe.y);
vec_e.z += fac*(
(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y)*GradTe.x
+ (omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x)*GradTe.y);
if ((OUTPUT) && (index == REPORT)) {
printf("vec_e intermed %1.6E %1.6E %1.6E\n",vec_e.x,vec_e.y,vec_e.z);
printf("h*eoverm*grad_phi %1.6E %1.6E \n",h*eoverm*grad_phi.x,h*eoverm*grad_phi.y);
printf("h*(MomAdditionRate/(n_e_plus*area)) %1.6E %1.6E \n",
h*(MomAdditionRate.x/(n_e_plus*area)),
h*(MomAdditionRate.y/(n_e_plus*area)));
printf("h*0.5*omega_ce.cross(v_e_k).z %1.10E \n",
h*0.5*(omega_ce.cross(v_e_k)).z);
printf("h*eoverm*dAdt_k/c %1.6E \n h*eoverm*h*c*0.5*Lap_A_half %1.6E\n"
" h*eoverm*h*M_PI*e*() %1.6E\n"
"0.5*h*...*(v_e_k-v_n_k-v_n_0) %1.6E\n"
"0.5*h*nu_eiBar*(v_e_k-v_ion_k) %1.6E\n",
h*eoverm*dAdt_k.z/c ,h*eoverm*h*c*0.5*Lap_A_half.z,
h*eoverm*h*M_PI*e*(nT_ion_src.n*v_ion_k.z - nT_elec_src.n*v_e_k.z),// comes out -1.033e6.
// = 8e-18*2e17*(viz-vez) = 1.6 (viz-vez) = 1.6(-vez) = 1e6.
// Where is the term that cancels its impact?
0.5*h*(m_n/(m_e+m_n))*nu_ne_MT_over_n*nT_neut_use.n*(v_e_k-v_n_k-v_n_0).z,
0.5*h*nu_eiBar*(v_e_k-v_ion_k).z);
printf("-h*eoverm*EzShape*EzTuning %1.6E\n",
-h*eoverm*EzShape*EzTuning);
printf("thermal contrib %1.6E\n",
fac*(
(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y)*GradTe.x
+ (omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x)*GradTe.y));
};
// Add Upsilon part of collisional term:
fac = 0.5*h*0.9*nu_eiBar*nu_eiBar/(nu_eHeart*total);
vec_e.x += fac*(
(omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart)*(v_e_k.x-v_ion_k.x)
+ (omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z)*(v_e_k.y-v_ion_k.y)
+ (omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y)*(v_e_k.z-v_ion_k.z));
vec_e.y += fac*(
(omega_ce.y*omega_ce.x + nu_eHeart*omega_ce.z)*(v_e_k.x-v_ion_k.x)
+ (omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart)*(v_e_k.y-v_ion_k.y)
+ (omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x)*(v_e_k.z-v_ion_k.z));
vec_e.z += fac*(
(omega_ce.z*omega_ce.x - nu_eHeart*omega_ce.y)*(v_e_k.x-v_ion_k.x)
+ (omega_ce.z*omega_ce.y + nu_eHeart*omega_ce.x)*(v_e_k.y-v_ion_k.y)
+ (omega_ce.z*omega_ce.z + nu_eHeart*nu_eHeart)*(v_e_k.z-v_ion_k.z));
if ((OUTPUT) && (index == REPORT))
{
printf("contrib_upsilon %1.6E\n",
fac*(
(omega_ce.z*omega_ce.x - nu_eHeart*omega_ce.y)*(v_e_k.x-v_ion_k.x)
+ (omega_ce.z*omega_ce.y + nu_eHeart*omega_ce.x)*(v_e_k.y-v_ion_k.y)
+ (omega_ce.z*omega_ce.z + nu_eHeart*nu_eHeart)*(v_e_k.z-v_ion_k.z)));
printf("%d vek %1.4E %1.4E %1.4E \nVe %1.4E %1.4E %1.4E \n",
index,v_e_k.x,v_e_k.y,v_e_k.z,vec_e.x,vec_e.y,vec_e.z);
};
}
Tensor3 Tens1, Tens2, Tens3;
// We are going to need Tens1, Tens2 again
// BUT have to reallocate BECAUSE ...
// we don't want them to be created prior to this.
// and we can't stand to put heating in this same scope
// which also has Tens3.
// Tens1 is going to be "G"
// Set Tens3 = Upsilon_eHeart:
//// nu = nu_eHeart, omega =
//{
// f64 total = nu_eHeart*nu_eHeart+
// omega_ce.x*omega_ce.x + omega_ce.y*omega_ce.y
// +omega_ce.x*omega_ce.z;
// Upsilon_eHeart.xx = (nu_eHeart*nu_eHeart +omega_ce.x*omega_ce.x)/total;
// Upsilon_eHeart.xy = (omega_ce.x*omega_ce.y-nu_eHeart*omega_ce.z)/total;
// Upsilon_eHeart.xz = (omega_ce.x*omega_ce.z+nu_eHeart*omega_ce.y)/total;
// Upsilon_eHeart.yx =
//}
// Upsilon is used 8 times.
// But it would keep getting wiped out.
// So it's a real problem. Storing it is another 9 doubles which is bad.
// Try to edit to at least copy-paste the code...
f64 fac = h*0.5*moverM*0.9*nu_eiBar*nu_ieBar/(nu_eHeart*total);
// We inserted 1/total into fac.
Tens1.xx = 1.0
// + no contrib from omega_ci x
+ (h*0.5*m_n/(m_ion+m_n))*nu_ni_MT_over_n*nT_neut_use.n
*(1.0-Beta_ni)
+ h*0.5*moverM*nu_ieBar
+ h*h*e*e*M_PI* n_ion_plus / m_ion
;
Tens1.yy = Tens1.xx;
Tens1.zz = Tens1.xx;
Tens1.xx -= fac*//Upsilon_eHeart.xx/total;
// division by "total = nu*nu+omega*omega" is in fac.
(nu_eHeart*nu_eHeart + omega_ce.x*omega_ce.x);
Tens1.yy -= fac*(nu_eHeart*nu_eHeart + omega_ce.y*omega_ce.y);
Tens1.zz -= fac*(nu_eHeart*nu_eHeart + omega_ce.z*omega_ce.z);
Tens1.xy = -h*0.5*moverM*omega_ce.z
- fac*(omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z);
Tens1.xz = h*0.5*moverM*omega_ce.y
- fac*(omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y);
Tens1.yx = h*0.5*moverM*omega_ce.z
- fac*(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z);
Tens1.yz = -h*0.5*moverM*omega_ce.x
- fac*(omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x);
Tens1.zx = -h*0.5*moverM*omega_ce.y
- fac*(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y);
Tens1.zy = h*0.5*moverM*omega_ce.x
- fac*(omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x);
// ... replace omega_ci = omega_ce*moverM ...
// Formula for Upsilon_eHeart comes from Krook model subsection in model document.
// We will prefer not to create omega_ci vector of course!!!
Tens1.Inverse(Tens2); // Tens2 now = G^-1
// Now create F:
fac = h*0.5*0.9*nu_eiBar*nu_eiBar/(nu_eHeart*total);
Tens3.xx = -h*0.5*(m_n/(m_e+m_n))*nu_ne_MT_over_n*nT_neut_use.n*Beta_ni
-h*0.5*nu_eiBar
- (h*h*e*e*M_PI*over_m_e) * n_ion_plus;
Tens3.yy = Tens3.xx;
Tens3.zz = Tens3.xx;
Tens3.xx += fac*(nu_eHeart*nu_eHeart + omega_ce.x*omega_ce.x);
Tens3.yy += fac*(nu_eHeart*nu_eHeart + omega_ce.y*omega_ce.y);
Tens3.zz += fac*(nu_eHeart*nu_eHeart + omega_ce.z*omega_ce.z);
Tens3.xy = fac*(omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z);
Tens3.xz = fac*(omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y);
Tens3.yx = fac*(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z);
Tens3.yz = fac*(omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x);
Tens3.zx = fac*(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y);
Tens3.zy = fac*(omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x);
Tens1 = Tens3*Tens2;
// Contents now: { F G^-1, G^-1, F }
// Now create the vector for v_e:
// vec_e = d' - F G^-1 d
vec_e -= Tens1*vec_i;
if ((OUTPUT) && (index == REPORT))
printf("modified vec_e \n %1.6E %1.6E %1.6E \n",
vec_e.x,vec_e.y,vec_e.z);
// Let's watch out:
// this means if we change EzExt then we change vec_e.z and vec_i.z
// directly, but we also change vec_e via vec_i.
// We need to store that from this point because we are about to wipe out Tens1.
Vector3 vec_e_effect_of_EzTuning;
vec_e_effect_of_EzTuning.x = -Tens1.xz*(h*qoverM*EzShape);
vec_e_effect_of_EzTuning.y = -Tens1.yz*(h*qoverM*EzShape);
vec_e_effect_of_EzTuning.z = -h*eoverm*EzShape-Tens1.zz*(h*qoverM*EzShape);
// Contents now: { F G^-1, G^-1, F }
// Populate Tens3 as U. Multiply to get Tens2 = FG^-1 U
Tens3.xx = -0.5*h*(m_n/(m_ion+m_n))*nu_ni_MT_over_n*nT_neut_use.n*Beta_ne
- 0.5*h*moverM*nu_ieBar
- h*h*e*qoverM* M_PI * n_e_plus;
Tens3.yy = Tens3.xx;
Tens3.zz = Tens3.xx;
fac = 0.5*h*moverM*0.9*nu_eiBar*nu_ieBar/(nu_eHeart*total);
Tens3.xx += fac*(omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart);
Tens3.yy += fac*(omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart);
Tens3.zz += fac*(omega_ce.z*omega_ce.z + nu_eHeart*nu_eHeart);
Tens3.xy = fac*(omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z);
Tens3.xz = fac*(omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y);
Tens3.yx = fac*(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z);
Tens3.yz = fac*(omega_ce.z*omega_ce.y - nu_eHeart*omega_ce.x);
Tens3.zx = fac*(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y);
Tens3.zy = fac*(omega_ce.z*omega_ce.y + nu_eHeart*omega_ce.x);
// We really could do with storing Upsilon somehow.
Tens2 = Tens1*Tens3;
// Tens1 = V - F G^-1 U
// V:
Tens1.xx = 1.0 + h*0.5*(m_n/(m_e+m_n))*nu_ne_MT_over_n*nT_neut_use.n
*(1.0-Beta_ne)
+ h*0.5*nu_eiBar + h*h*e*eoverm* M_PI* n_e_plus;
Tens1.yy = Tens1.xx;
Tens1.zz = Tens1.xx;
fac = -0.5*h*0.9*nu_eiBar*nu_eiBar/(nu_eHeart*total);
Tens1.xx += fac*(omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart);
Tens1.yy += fac*(omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart);
Tens1.zz += fac*(omega_ce.z*omega_ce.z + nu_eHeart*nu_eHeart);
Tens1.xy = h*0.5*omega_ce.z
+ fac*(omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z);
Tens1.xz = -h*0.5*omega_ce.y
+ fac*(omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y);
Tens1.yx = -h*0.5*omega_ce.z
+ fac*(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z);
Tens1.yz = h*0.5*omega_ce.x
+ fac*(omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x);
Tens1.zx = h*0.5*omega_ce.y
+ fac*(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y);
Tens1.zy = -h*0.5*omega_ce.x
+ fac*(omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x);
if ((OUTPUT) && (index == REPORT))
printf( "nu_eiBar %1.5E n_e_plus %1.5E nu_en_MT %1.5E\n"
"V \n %1.6E %1.6E %1.6E \n%1.6E %1.6E %1.6E \n%1.6E %1.6E %1.6E \n\n",
nu_eiBar,n_e_plus,nu_ne_MT_over_n*nT_neut_use.n,
Tens1.xx,Tens1.xy,Tens1.xz,
Tens1.yx,Tens1.yy,Tens1.yz,
Tens1.zx,Tens1.zy,Tens1.zz);
Tens1 -= Tens2;
if ((OUTPUT) && (index == REPORT))
printf("V-FG^-1U \n %1.6E %1.6E %1.6E \n%1.6E %1.6E %1.6E \n%1.6E %1.6E %1.6E \n\n",
Tens1.xx,Tens1.xy,Tens1.xz,
Tens1.yx,Tens1.yy,Tens1.yz,
Tens1.zx,Tens1.zy,Tens1.zz);
// Now calculate v_e:
// Two cases: on 1st pass we should
// -- insert the
Tens1.Inverse(Tens2);
v_e_plus = Tens2*vec_e;
// DEBUG:
// f64_vec3 vec_e0 = vec_e;
// vec_e0.z += h*eoverm*EzShape*EzTuning;
// f64_vec3 v_e_0 = Tens2*vec_e0;
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if ((OUTPUT) && (index == REPORT)) {
printf("(V-FG^-1U)^-1 \n %1.6E %1.6E %1.6E ) %1.6E = %1.6E\n%1.6E %1.6E %1.6E ) %1.6E = %1.6E \n%1.6E %1.6E %1.6E ) %1.6E = %1.6E \n\n",
Tens2.xx,Tens2.xy,Tens2.xz,vec_e.x,v_e_plus.x,
Tens2.yx,Tens2.yy,Tens2.yz,vec_e.y,v_e_plus.y,
Tens2.zx,Tens2.zy,Tens2.zz,vec_e.z,v_e_plus.z);
printf("\n");
// Test relationship:
printf("(1-hh) %1.4E (1-..)/(1+..) %1.4E\n",
(1.0-h*h*e*eoverm*M_PI*nT_elec_src.n),(1.0-h*h*e*eoverm*M_PI*nT_elec_src.n)/(1.0+h*h*e*eoverm*M_PI*n_e_plus));
printf("vekz %1.4E rat*vekz %1.4E vez %1.4E \n************\n",
v_e_k.z,v_e_k.z*(1.0-h*h*e*eoverm*M_PI*nT_elec_src.n)/(1.0+h*h*e*eoverm*M_PI*n_e_plus),v_e_plus.z);
};
// Effect of EzTuning:
// We have to record a couple of extra values here:
{
{
Vector3 ve_plus_of_EzTuning = Tens2*vec_e_effect_of_EzTuning;
real viz_plus_of_EzTuning;
{
Vector3 temp = Tens3*ve_plus_of_EzTuning;
viz_plus_of_EzTuning = Tens2.zz*h*qoverM*EzShape
- Tens2.zx*temp.x
- Tens2.zy*temp.y
- Tens2.zz*temp.z;// Where are we getting area?
}
sigma_zz[threadIdx.x] = q*area*(viz_plus_of_EzTuning*n_ion_plus - ve_plus_of_EzTuning.z*n_e_plus);
}
// Some changes resulted in lower stack frame, higher loads+stores.
// We should realise that way outside L1, this is a worsening. NVM.
// Can we preserve U = Tens3?
// Now recreate G which we overwrote:
Tens1.xx = 1.0
// + no contrib from omega_ci x
+ (h*0.5*m_n/(m_ion+m_n))*nu_ni_MT_over_n*nT_neut_use.n
*(1.0-Beta_ni)
+ h*0.5*moverM*nu_ieBar
+ h*h*e*e* M_PI* n_ion_plus / m_ion
;
Tens1.yy = Tens1.xx;
Tens1.zz = Tens1.xx;
Tens1.xx -= fac*//Upsilon_eHeart.xx/total;
// division by "total = nu*nu+omega*omega" is in fac.
(nu_eHeart*nu_eHeart + omega_ce.x*omega_ce.x);
Tens1.yy -= fac*(nu_eHeart*nu_eHeart + omega_ce.y*omega_ce.y);
Tens1.zz -= fac*(nu_eHeart*nu_eHeart + omega_ce.z*omega_ce.z);
Tens1.xy = -h*0.5*moverM*omega_ce.z
- fac*(omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z);
Tens1.xz = h*0.5*moverM*omega_ce.y
- fac*(omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y);
Tens1.yx = h*0.5*moverM*omega_ce.z
- fac*(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z);
Tens1.yz = -h*0.5*moverM*omega_ce.x
- fac*(omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x);
Tens1.zx = -h*0.5*moverM*omega_ce.y
- fac*(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y);
Tens1.zy = h*0.5*moverM*omega_ce.x
- fac*(omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x);
// ... replace omega_ci = omega_ce*moverM ...
// Formula for Upsilon_eHeart comes from Krook model subsection in model document.
// We will prefer not to create omega_ci vector of course!!!
Tens1.Inverse(Tens2); // Tens2 now = G^-1
v_ion_plus = Tens2*(vec_i - Tens3*v_e_plus);
Iz[threadIdx.x] = q*area*(v_ion_plus.z*n_ion_plus - v_e_plus.z*n_e_plus);
if //((OUTPUT) && (index == REPORT)) {
(//(Iz[threadIdx.x] > 1.0e6) ||
(Iz[threadIdx.x] != Iz[threadIdx.x]) && (index < BEGINNING_OF_CENTRAL)){
printf("%d Iz %1.5E sig %1.4E ne %1.4E vez %1.4E\n",
index,
//q*area*(v_ion_plus.z*n_ion_plus - v_e_0.z*n_e_plus),
Iz[threadIdx.x],
sigma_zz[threadIdx.x],
n_e_plus, v_e_plus.z);
};
} // ve_plus_of_EzTuning goes out of scope
v_n_plus = v_n_0 + Beta_ne*v_e_plus + Beta_ni*v_ion_plus;
// v_e = (V-F G^-1 U) ^-1 ( vec_e_0 )
// + EzTuning (V-F G^-1 U) ^-1 ( vec_e_1 )
// v_i = G^-1 (d - U ve)
// Now:
if (b2ndPass) {
p_dAdt_out[index] = dAdt_k + h*c*c*(Lap_A_half + TWO_PI_OVER_C*
//(J_k+J_k+1)
q*(nT_ion_src.n*v_ion_k-nT_elec_src.n*v_e_k +
n_ion_plus*v_ion_plus-n_e_plus*v_e_plus)
);
// The Jk comes from what was implied earlier: our n_plus as figured here.
// . v_e_plus needs here to be the estimate from our "best guess" Ez_ext.
// . Both J's, k and k+1, need to correspond to the evolution of rho.
};
// Lap_A_half is the only variable that is only in scope in this bracket.
// We really should try putting writes outside braces.
}
if (b2ndPass == 0) {
// WE NO LONGER WANT TO DO THIS: No save-off of n,T on minor cells.
// nT_neut_use.n = (nT_neut_src.n+nT_neut_use.n)*0.5;
// nT_neut_use.T = (nT_neut_src.T+nT_neut_use.T)*0.5;
// nT_ion_use.n = (nT_ion_src.n+nT_ion_use.n)*0.5;
// nT_ion_use.T = (nT_ion_src.T+nT_ion_use.T)*0.5;
// nT_elec_use.n = (nT_elec_src.n+nT_elec_use.n)*0.5;
// nT_elec_use.T = (nT_elec_src.T+nT_elec_use.T)*0.5;
v_n_plus = 0.5*(v_n_plus+v_n_k);
v_ion_plus = 0.5*(v_ion_plus+v_ion_k);
v_e_plus = 0.5*(v_e_plus+v_e_k);
// Tween back to output half-time system
}
// p_nT_neut_out[index] = nT_neut_use;
// p_nT_ion_out[index] = nT_ion_use;
// p_nT_elec_out[index] = nT_elec_use;
// Save them off in the heating routine that takes place on majors, not here.
p_v_neut_out[index] = v_n_plus;
p_v_ion_out[index] = v_ion_plus;
p_v_elec_out[index] = v_e_plus;
// On 1st pass we use this v to calculate viscosity.
// Time to sort out heating contribution:
f64 NnTn_addition, NiTi_addition, NeTe_addition;
// Inelastic friction heating:
NiTi_addition = area* THIRD*m_ion*n_ionrec.n_ionise*((v_ion_k-v_n_k).dot(v_ion_k-v_n_k));
NnTn_addition = area* THIRD*m_ion*n_ionrec.n_recombine*((v_ion_k-v_n_k).dot(v_ion_k-v_n_k));
NeTe_addition = area* THIRD*m_e*(n_ionrec.n_ionise + n_ionrec.n_recombine)*((v_e_k-v_n_k).dot(v_e_k-v_n_k));
{
f64 total =
(nu_eHeart*nu_eHeart + omega_ce.x*omega_ce.x+omega_ce.y*omega_ce.y+omega_ce.z*omega_ce.z);
Tensor3 Tens1;
Tens1.xx = h*nu_eiBar ;
Tens1.yy = Tens1.xx;
Tens1.zz = Tens1.xx;
f64 fac = -h*0.9*nu_eiBar*nu_eiBar/(nu_eHeart*total);
Tens1.xx += fac*(omega_ce.x*omega_ce.x + nu_eHeart*nu_eHeart);
Tens1.yy += fac*(omega_ce.y*omega_ce.y + nu_eHeart*nu_eHeart);
Tens1.zz += fac*(omega_ce.z*omega_ce.z + nu_eHeart*nu_eHeart);
Tens1.xy = fac*(omega_ce.x*omega_ce.y - nu_eHeart*omega_ce.z);
Tens1.xz = fac*(omega_ce.x*omega_ce.z + nu_eHeart*omega_ce.y);
Tens1.yx = fac*(omega_ce.x*omega_ce.y + nu_eHeart*omega_ce.z);
Tens1.yz = fac*(omega_ce.y*omega_ce.z - nu_eHeart*omega_ce.x);
Tens1.zx = fac*(omega_ce.x*omega_ce.z - nu_eHeart*omega_ce.y);
Tens1.zy = fac*(omega_ce.y*omega_ce.z + nu_eHeart*omega_ce.x);
// This was e-i resistive heating:
NeTe_addition +=
area* SIXTH*n_e_plus*TWOTHIRDS*m_e*(
// rate of change of ve. dot(ve-vi), integrated:
(Tens1*(v_e_k-v_ion_k)).dot(v_e_k-v_ion_k)
+
(Tens1*(v_e_k-v_ion_k+v_e_plus-v_ion_plus)).dot
(v_e_k-v_ion_k+v_e_plus-v_ion_plus) // 0.25 cancels with 4
+ (Tens1*(v_e_plus-v_ion_plus)).dot(v_e_plus-v_ion_plus)
);
}
{
// Inelastic frictional heating:
// Maybe this is actually FRICTIONAL heating e-n, i-n ;
// I think that's what we're actually looking at here.
f64 M_in = m_n*m_ion/((m_n+m_ion)*(m_n+m_ion));
// f64 M_en = m_n*m_e/((m_n+m_e)*(m_n+m_e));
// f64 M_ie = m_ion*m_e/((m_ion+m_e)*(m_ion+m_e));
NeTe_addition += area * SIXTH*n_e_plus*TWOTHIRDS*m_e*(
h*(m_n/(m_e+m_n))*nu_ne_MT_over_n*nT_neut_use.n*(
(v_e_k-v_n_k).dot(v_e_k-v_n_k)
+ (v_e_k-v_n_k + v_e_plus - v_n_plus).dot(v_e_k-v_n_k + v_e_plus - v_n_plus)
+ (v_e_plus-v_n_plus).dot(v_e_plus-v_n_plus)
));
f64 v_ni_diff_sq = SIXTH*((v_n_k-v_ion_k).dot(v_n_k-v_ion_k)
+ (v_n_k-v_ion_k+v_n_plus-v_ion_plus).dot(v_n_k-v_ion_k+v_n_plus-v_ion_plus)
+ (v_n_plus-v_ion_plus).dot(v_n_plus-v_ion_plus));
NiTi_addition += area * n_ion_plus*TWOTHIRDS*m_n*
h*M_in*nu_ni_MT_over_n*nT_neut_use.n*v_ni_diff_sq;
NnTn_addition += area * n_n_plus*TWOTHIRDS*m_ion*
h*M_in*nu_ni_MT_over_n*nT_ion_use.n*v_ni_diff_sq;
// We can deduce T_k+1 afterwards from n_k+1 T_k+1.
// OR, we can rearrange conservative equations to be for T_k+1.
}
// NOTE HERE WE PUT " = "
// Rather than, adding -- which we might want to do if we put visc+cond+thermoelectric
// into same slots.
// This is the addition to NT.
p_resistive_heat_neut[index] = NnTn_addition;
p_resistive_heat_ion[index] = NiTi_addition;
p_resistive_heat_elec[index] = NeTe_addition;
} else { // (info.flag == DOMAIN_VERTEX) ...
p_resistive_heat_neut[index] = 0.0;
p_resistive_heat_ion[index] = 0.0;
p_resistive_heat_elec[index] = 0.0; // Or save some writes by doing cudaMemset beforehand.
if (per_info.flag == OUTERMOST) {
// p_nT_neut_out[index] = nT_neut_src;
// p_nT_ion_out[index] = nT_ion_src;
// p_nT_elec_out[index] = nT_elec_src;
p_v_neut_out[index] = v_n_k;
p_v_ion_out[index] = v_ion_k;
p_v_elec_out[index] = v_e_k;
// Populate with something to avoid mishaps.
}
Vector3 dAdt_k,four_pi_over_c_J;
// Lap_A_half = p_Lap_A_half[index];
dAdt_k = p_Adot_k[index];
// ReverseJ calc:
four_pi_over_c_J.x = 0.0;
four_pi_over_c_J.y = 0.0;
four_pi_over_c_J.z = 0.0;
if ((index >= ReverseJzIndexStart) && (index < ReverseJzIndexEnd))
{
four_pi_over_c_J.z = four_pi_over_c_ReverseJz;
}
Vector3 Adot_plus = dAdt_k + h*c*c*(Lap_A_half + four_pi_over_c_J);
// if ((OUTPUT) && (index == REPORT))
// printf("Adot %1.5E Lap_A_half %1.5E 4pi/cJ %1.5E Adot+ %1.5E\n",
// dAdt_k.z, Lap_A_half.z, four_pi_over_c_J.z, Adot_plus.z);
p_dAdt_out[index] = Adot_plus;
sigma_zz[threadIdx.x] = 0.0;
Iz[threadIdx.x] = 0.0;
};
//} else { // index < Nverts
// sigma_zz[threadIdx.x] = 0.0;
// Iz[threadIdx.x] = 0.0;
//};
// Aggregate:
__syncthreads();
int s = blockDim.x;
int k = s/2;
while (s != 1) {
if (threadIdx.x < k)
{
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + k];
Iz[threadIdx.x] += Iz[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k-1)){
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x+s-1];
Iz[threadIdx.x] += Iz[threadIdx.x+s-1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s/2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sigma_zz[blockIdx.x] = sigma_zz[0];
p_Iz[blockIdx.x] = Iz[0];
}
}
__global__ void Kernel_Heating_routine(
f64 const h,
structural * __restrict__ p_info,
long * __restrict__ p_IndexTri,
nT * __restrict__ p_nT_neut_src,
nT * __restrict__ p_nT_ion_src,
nT * __restrict__ p_nT_elec_src,
nn * __restrict__ p_nn_ionrec,
// If we want "use" then it comes in as the output variable.
f64_vec3 * __restrict__ p_B_major,
// f64 * __restrict__ p_visccond_heatrate_neut,
// f64 * __restrict__ p_visccond_heatrate_ion,
// f64 * __restrict__ p_visccond_heatrate_elec,
// We could get rid and use the central slots from the resistive heating.
// Defined on minor:
f64 * __restrict__ p_resistive_heat_neut,
f64 * __restrict__ p_resistive_heat_ion,
f64 * __restrict__ p_resistive_heat_elec, // to include inelastic frictional effects.
// What about ion-neutral frictional heating? Where was that included??
f64 * __restrict__ p_area_cell,
nT * __restrict__ p_nT_neut_out,
nT * __restrict__ p_nT_ion_out,
nT * __restrict__ p_nT_elec_out,
bool b2ndPass // on '2ndpass', load nT_neut_use.
)
{
// Temperature advance:
__shared__ f64 resistive_neut[SIZE_OF_TRI_TILE_FOR_MAJOR];
__shared__ f64 resistive_ion[SIZE_OF_TRI_TILE_FOR_MAJOR];
__shared__ f64 resistive_elec[SIZE_OF_TRI_TILE_FOR_MAJOR]; // 6 doubles equiv
__shared__ long indextri[MAXNEIGH_d*threadsPerTileMajor]; // 6 doubles equiv
resistive_neut[threadIdx.x]
= p_resistive_heat_neut[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x];
resistive_neut[threadIdx.x + threadsPerTileMajor]
= p_resistive_heat_neut[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadsPerTileMajor + threadIdx.x];
resistive_ion[threadIdx.x]
= p_resistive_heat_ion[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x];
resistive_ion[threadIdx.x + threadsPerTileMajor]
= p_resistive_heat_ion[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadsPerTileMajor + threadIdx.x];
resistive_elec[threadIdx.x]
= p_resistive_heat_elec[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadIdx.x];
resistive_elec[threadIdx.x + threadsPerTileMajor]
= p_resistive_heat_elec[SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x + threadsPerTileMajor + threadIdx.x];
__syncthreads();
f64 niTi, nnTn, neTe;
nT nT_ion_src, nT_elec_src, nT_neut_src,
nT_neut_use, nT_ion_use, nT_elec_use;
f64 n_e_plus, n_ion_plus, n_n_plus, area;
long index = blockIdx.x*blockDim.x + threadIdx.x;
long StartTri = SIZE_OF_TRI_TILE_FOR_MAJOR*blockIdx.x;
memcpy(indextri + MAXNEIGH_d*threadIdx.x,
p_IndexTri + index*MAXNEIGH_d, sizeof(long)*MAXNEIGH_d);
// Do we also want to gather v ? No, we can use from centrals.
// Remember to collect resistive heat from centrals as well.
nn n_ionrec = p_nn_ionrec[index];
structural info = p_info[index];
nT_neut_src = p_nT_neut_src[index];
nT_ion_src = p_nT_ion_src[index];
nT_elec_src = p_nT_elec_src[index];
area = p_area_cell[index];
n_n_plus = nT_neut_src.n + n_ionrec.n_recombine-n_ionrec.n_ionise;
n_ion_plus = nT_ion_src.n + n_ionrec.n_ionise-n_ionrec.n_recombine;
n_e_plus = nT_elec_src.n + n_ionrec.n_ionise-n_ionrec.n_recombine;
if (b2ndPass) {
nT_neut_use = p_nT_neut_out[index];
nT_ion_use = p_nT_ion_out[index];
nT_elec_use = p_nT_elec_out[index];
} else {
nT_neut_use = nT_neut_src;
nT_ion_use = nT_ion_src;
nT_elec_use = nT_elec_src;
}
niTi = (nT_ion_src.n-n_ionrec.n_recombine)*nT_ion_src.T
+ 0.5*n_ionrec.n_ionise*nT_neut_src.T;
nnTn = (nT_neut_src.n-n_ionrec.n_ionise)*nT_neut_src.T
+ n_ionrec.n_recombine*(nT_elec_src.T+nT_ion_src.T )
+ n_ionrec.n_recombine*TWOTHIRDS*13.6*kB;
neTe = (nT_elec_src.n-n_ionrec.n_recombine)*nT_elec_src.T
+ 0.5*n_ionrec.n_ionise*nT_neut_src.T
- n_ionrec.n_ionise*TWOTHIRDS*13.6*kB;
if ((OUTPUT) && (index == REPORT)){
printf(
"Tsrc %1.5E %1.5E %1.5E \n"
"nT ionise %1.5E %1.5E %1.5E \n",
nT_neut_src.T,nT_ion_src.T,nT_elec_src.T,
nnTn,niTi,neTe);
};
// This will serve as part of the right hand side for including heat transfers.
// Visc+cond heat addition:
// ------------------------
// DKE = 1/2 m n v.v
// We should associate a heating amount with each wall that will be positive.
// ( That works out nicely for offset! )
// That means we need to do a fetch. We can't work out visc htg without knowing
// neighbour v, which means we might as well store it - correct?
// If we are adding to v then we are increasing or decreasing DKE here -- but
// then we want net + heating appearing in this and the neighbour.
// So that leaves us having to do a fetch always.
// & Include heat conduction heat addition in same step.
// ------------------------
{
// CAREFUL ABOUT WHETHER THESE WERE CREATED DIVIDING BY AREA.
// Either we will reinstate these here, or,
// we will proceed by putting the necessary heat into
// what is now the "resistive" variable, major/central part.
// nnTn += h*p_visccond_heatrate_neut[index];
// niTi += h*p_visccond_heatrate_ion[index];
// neTe += h*p_visccond_heatrate_elec[index];
}
// Now drag in the resistive heat rates INCLUDING its own central.
{
f64 neut_resistive, ion_resistive, elec_resistive;
long iTri;
for (iTri = 0; iTri < info.neigh_len; iTri++)
{
// CAREFUL of cases where we are at the edge.
long index_tri = indextri[threadIdx.x];
if ((index_tri >= StartTri) && (index_tri < StartTri + SIZE_OF_TRI_TILE_FOR_MAJOR))
{
neut_resistive += resistive_neut[index_tri-StartTri];
ion_resistive += resistive_ion[index_tri-StartTri];
elec_resistive += resistive_elec[index_tri-StartTri];
} else {
neut_resistive += p_resistive_heat_neut[index_tri];
ion_resistive += p_resistive_heat_ion[index_tri];
elec_resistive += p_resistive_heat_elec[index_tri];
};
}
neut_resistive *= THIRD;
ion_resistive *= THIRD;
elec_resistive *= THIRD;
// Try __syncthreads here...
// Add the values for central cell:
neut_resistive += p_resistive_heat_neut[BEGINNING_OF_CENTRAL + index];
ion_resistive += p_resistive_heat_ion[BEGINNING_OF_CENTRAL + index];
elec_resistive += p_resistive_heat_elec[BEGINNING_OF_CENTRAL + index];
nnTn += neut_resistive/area;
niTi += ion_resistive/area;
neTe += elec_resistive/area; // These were the additions to NT
}
// So we have now to collect things like:
// nu_eHeart, nu_eiBar :
f64 nu_ne_MT_over_n, nu_ni_MT_over_n, nu_eiBar, nu_ieBar, nu_eHeart; // 5 double
Vector3 omega_ce = eovermc*p_B_major[index];
{
f64 sqrt_Te = sqrt(nT_elec_use.T);
f64 s_en_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(nT_elec_use.T*one_over_kB);
f64 electron_thermal = sqrt_Te*over_sqrt_m_e;
f64 ionneut_thermal = sqrt(nT_ion_use.T/m_ion+nT_neut_use.T/m_n); // hopefully not sqrt(0)
f64 lnLambda = Get_lnLambda_d(nT_ion_use.n,nT_elec_use.T);
f64 s_in_MT = Estimate_Neutral_MT_Cross_section(nT_ion_use.T*one_over_kB);
f64 s_en_MT = Estimate_Neutral_MT_Cross_section(nT_elec_use.T*one_over_kB);
nu_ne_MT_over_n = s_en_MT*electron_thermal; // have to multiply by n_e for nu_ne_MT
nu_ni_MT_over_n = s_in_MT*ionneut_thermal;
nu_eiBar = nu_eiBarconst*kB_to_3halves*nT_ion_use.n*lnLambda/(nT_elec_use.T*sqrt_Te);
nu_ieBar = nT_elec_use.n*nu_eiBar/nT_ion_use.n;
nu_eHeart = 1.87*nu_eiBar +
nT_neut_use.n*s_en_visc*electron_thermal;
}
// From here on doing the inter-species heat exchange:
Tensor3 Tens1;
{
f64 M_in = m_n*m_ion/((m_n+m_ion)*(m_n+m_ion));
f64 M_en = m_n*m_e/((m_n+m_e)*(m_n+m_e));
f64 M_ie = m_ion*m_e/((m_ion+m_e)*(m_ion+m_e));
// See section 10.3.1, June 2016 doc.
// Seems good idea to do this in heat, or manipulate equivalently.
// d/dt(NT) = U NT
// Add to the RH vector, h*0.5*U*NT_k:
Tens1.xx = -2.0*(M_in*nu_ni_MT_over_n*nT_ion_use.n + M_en*nu_ne_MT_over_n*nT_elec_use.n);
Tens1.xy = 2.0*M_in*nu_ni_MT_over_n*nT_neut_use.n;
Tens1.xz = 2.0*M_en*nu_ne_MT_over_n*nT_neut_use.n;
Tens1.yx = 2.0*M_in*nu_ni_MT_over_n*nT_ion_use.n;
Tens1.yy = -2.0*(M_in*nu_ni_MT_over_n*nT_neut_use.n
+ M_ie*nu_ieBar);
Tens1.yz = 2.0*M_ie*nu_eiBar;
Tens1.zx = 2.0*M_en*nu_ne_MT_over_n*nT_elec_use.n;
Tens1.zy = 2.0*M_ie*nu_ieBar;
Tens1.zz = -2.0*(M_ie*nu_eiBar + M_en*nu_ne_MT_over_n*nT_neut_use.n);
}
// Midpoint:
// d/dt (nT) = U
// (nT)_k+1 = (1 - h/2 U)^-1 (1+h/2 U) (nT)_k
nnTn += h*0.5*(Tens1.xx*(nT_neut_src.n*nT_neut_src.T)
+ Tens1.xy*(nT_ion_src.n*nT_ion_src.T)
+ Tens1.xz*(nT_elec_src.n*nT_elec_src.T)
);
niTi += h*0.5*(Tens1.yx*(nT_neut_src.n*nT_neut_src.T)
+ Tens1.yy*(nT_ion_src.n*nT_ion_src.T)
+ Tens1.yz*(nT_elec_src.n*nT_elec_src.T)
);
neTe += h*0.5*(Tens1.zx*(nT_neut_src.n*nT_neut_src.T)
+ Tens1.zy*(nT_ion_src.n*nT_ion_src.T)
+ Tens1.zz*(nT_elec_src.n*nT_elec_src.T)
);
// Matrix is 1 - h*0.5*U
Tens1.xx = 1.0-h*0.5*Tens1.xx;
Tens1.xy = -h*0.5*Tens1.xy;
Tens1.xz = -h*0.5*Tens1.xz;
Tens1.yx = -h*0.5*Tens1.yx;
Tens1.yy = 1.0-h*0.5*Tens1.yy;
Tens1.yz = -h*0.5*Tens1.yz;
Tens1.zx = -h*0.5*Tens1.zx;
Tens1.zy = -h*0.5*Tens1.zy;
Tens1.zz = 1.0-h*0.5*Tens1.zz;
if ((OUTPUT) && (index == REPORT)) {
printf("nT_before %1.5E %1.5E %1.5E \n",
nnTn,niTi,neTe);
};
{
Tensor3 Tens2;
Tens1.Inverse(Tens2);
Vector3 RH,LH;
RH.x = nnTn;
RH.y = niTi;
RH.z = neTe;
LH = Tens2*RH;
nnTn = LH.x;
niTi = LH.y;
neTe = LH.z;
}
if ((OUTPUT) && (index == REPORT)) {
printf("nT_after %1.5E %1.5E %1.5E \n",
nnTn,niTi,neTe);
};
// Overwrite any old rubbish in memory so that we can save off the output:
nT_neut_use.n = n_n_plus;
nT_neut_use.T = nnTn/n_n_plus;
nT_ion_use.n = n_ion_plus;
nT_ion_use.T = niTi/n_ion_plus;
nT_elec_use.n = n_e_plus;
nT_elec_use.T = neTe/n_e_plus;
if (b2ndPass == false) {
// Tween back to halfway if this is the first pass:
nT_neut_use.n = 0.5*(nT_neut_src.n + nT_neut_use.n);
nT_ion_use.n = 0.5*(nT_ion_src.n + nT_ion_use.n);
nT_elec_use.n = 0.5*(nT_elec_src.n + nT_elec_use.n);
nT_neut_use.T = 0.5*(nT_neut_src.T + nT_neut_use.T);
nT_ion_use.T = 0.5*(nT_ion_src.T + nT_ion_use.T);
nT_elec_use.T = 0.5*(nT_elec_src.T + nT_elec_use.T);
};
//if ((OUTPUT) && (index == REPORT))
// printf("Te %1.5E \n################\n",nT_elec_use.T);
p_nT_neut_out[index] = nT_neut_use;
p_nT_ion_out[index] = nT_ion_use;
p_nT_elec_out[index] = nT_elec_use;
}
|
e8e0c8669b98abbb364c823b5db34dfd036650de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "j2d9pt-512-8-128_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 18
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
( 7.1f * A[t%2][i-2][j] + 5.1f * A[t%2][i-1][j] + 9.2f * A[t%2][i][j-2] +
12.1f * A[t%2][i][j-1] + 15.f * A[t%2][i][j] + 12.2f * A[t%2][i][j+1] +
9.1f * A[t%2][i][j+2] + 5.2f * A[t%2][i+1][j] + 7.2f * A[t%2][i+2][j]) / 118;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| e8e0c8669b98abbb364c823b5db34dfd036650de.cu | #include <assert.h>
#include <stdio.h>
#include "j2d9pt-512-8-128_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 18
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
( 7.1f * A[t%2][i-2][j] + 5.1f * A[t%2][i-1][j] + 9.2f * A[t%2][i][j-2] +
12.1f * A[t%2][i][j-1] + 15.f * A[t%2][i][j] + 12.2f * A[t%2][i][j+1] +
9.1f * A[t%2][i][j+2] + 5.2f * A[t%2][i+1][j] + 7.2f * A[t%2][i+2][j]) / 118;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
a993cbcd8e1686437226b2fa32a5264065885f06.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <math.h>
void readFile(char* fname, int* N, int* iter, float** A, float** b)
{
FILE *fp;
char buf[100];
int i, j;
fp = fopen(fname, "r");
if(!fp)
{
*N = 0;
*iter = 0;
printf("Stale File Handle\n");
return;
}
if(fscanf(fp, "%s", buf) > 0) *N = atoi(buf);
if(fscanf(fp, "%s", buf) > 0) *iter = atoi(buf);
printf("N = %d\nIterations = %d\n", *N, *iter);
*b = (float*) malloc(*N*sizeof(float));
*A = (float*) malloc((*N)*(*N)*sizeof(float));
for(i = 0; i < *N; i++)
{
for(j = 0; j < *N; j++)
{
fscanf(fp, "%s", buf);
(*A)[ ((*N*i)+j) ] = (float)atoi(buf);
}
}
fscanf(fp, "%s", buf); // Ignore the "solution" in the text
for(i = 0; i < *N; i++)
{
fscanf(fp, "%s", buf);
(*b)[i] = (float)atoi(buf);
}
fclose(fp);
}
__global__
void iloop(float* A, float* b, int N, float* x, float* y)
{
float t;
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int stride = (gridDim.x * blockDim.x);
//printf("<<< %d, %f, %f >>>\n", N, A[0], b[0]);
for(int i = index; i < N; i += stride)
{
t = 0.0;
for(int j = 0; j < N; j++)
{
if(i != j)
{
t = t + (( A[ ((N*i)+j) ] ) * x[j]);
}
}
y[i] = ((b[i] - t)/(A[ ((N*i)+i) ]));
}
}
int main(int argc, char* argv[])
{
float time = 0.0;
float maxError = 0.0;
float* d_A;
float* d_b;
float* d_x;
float* d_y;
int k;
int blocksize;
int numblocks;
float* A;
float* b;
float* x;
float* y;
float* c;
char* fname;
int N, M, iter, i, j;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
if(argc >= 2) fname = argv[1];
else fname = "../inputs/8.txt";
if(argc >= 3) M = atoi(argv[2]);
else M = 32;
readFile(fname, &N, &iter, &A, &b);
printf("CUDA : Parsed file %s\n", fname);
x = (float*) malloc(N*sizeof(float));
y = (float*) malloc(N*sizeof(float));
for(i = 0; i < N; i++)
{
x[i] = 0.0;
y[i] = 0.0;
}
if( hipMallocManaged(&d_A, N*N*sizeof(float)) != hipSuccess ) printf("CUDA : memory allocation error!\n");
if( hipMallocManaged(&d_b, N*sizeof(float)) != hipSuccess ) printf("CUDA : memory allocation error!\n");
if( hipMallocManaged(&d_x, N*sizeof(float)) != hipSuccess ) printf("CUDA : memory allocation error!\n");
if( hipMallocManaged(&d_y, N*sizeof(float)) != hipSuccess ) printf("CUDA : memory allocation error!\n");
for(i = 0; i < N*N; i++)
{
d_A[i] = A[i];
}
for(i = 0; i < N; i++)
{
d_b[i] = b[i];
d_x[i] = x[i];
d_y[i] = y[i];
}
blocksize = M;
numblocks = (N+blocksize-1)/blocksize;
printf("CUDA : Grid Size %d, Block size %d\n", numblocks, blocksize);
hipEventRecord(start);
for(k = 0; k < iter; k++)
{
hipLaunchKernelGGL(( iloop), dim3(numblocks), dim3(blocksize) , 0, 0, d_A, d_b, N, d_x, d_y); // kernel launch on GPU
hipDeviceSynchronize();
for(j = 0; j < N; j++) d_x[j] = d_y[j];
}
//hipDeviceSynchronize();
//for(j = 0; j < N; j++) printf("CUDA : %f\n", d_y[j]);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
printf("CUDA : Done Computing Jacobi on GPU\n");
//Verify : sdiff -s <output> inputs/<input>
//for(i = 0; i < N; i++)
//{
// for(j = 0; j < N; j++)
// {
// printf("%d", (int)A[i][j]);
// if(j < N-1) printf(" ");
// }
// printf("\n");
//}
//for(i = 0; i < N; i++)
//{
// printf("%d\n", (int)b[i]);
//}
//printf("\n\n");
//for(i = 0; i < N; i++)
//{
// printf("%f\n", x[i]);
//}
//printf("\n\n");
for(i = 0; i < N; i++) x[i] = d_x[i];
c = (float*) malloc(N*sizeof(float));
for(i = 0; i < N; i++)
{
c[i] = 0;
for(j = 0; j < N; j++)
{
c[i] += A[ ((N*i)+j) ] * x[j];
}
//printf("%0.2f\n", c[i]);
maxError = fmax(maxError, fabs(c[i] - b[i]));
}
printf("\nCUDA : Time %f ms\n", time);
printf("CUDA : MaxError = %f\n\n\n", maxError);
hipFree(d_A);
hipFree(d_b);
hipFree(d_x);
hipFree(d_y);
free(A);
free(b);
free(x);
free(y);
free(c);
return 0;
}
| a993cbcd8e1686437226b2fa32a5264065885f06.cu | #include <cuda.h>
#include <stdio.h>
#include <math.h>
void readFile(char* fname, int* N, int* iter, float** A, float** b)
{
FILE *fp;
char buf[100];
int i, j;
fp = fopen(fname, "r");
if(!fp)
{
*N = 0;
*iter = 0;
printf("Stale File Handle\n");
return;
}
if(fscanf(fp, "%s", buf) > 0) *N = atoi(buf);
if(fscanf(fp, "%s", buf) > 0) *iter = atoi(buf);
printf("N = %d\nIterations = %d\n", *N, *iter);
*b = (float*) malloc(*N*sizeof(float));
*A = (float*) malloc((*N)*(*N)*sizeof(float));
for(i = 0; i < *N; i++)
{
for(j = 0; j < *N; j++)
{
fscanf(fp, "%s", buf);
(*A)[ ((*N*i)+j) ] = (float)atoi(buf);
}
}
fscanf(fp, "%s", buf); // Ignore the "solution" in the text
for(i = 0; i < *N; i++)
{
fscanf(fp, "%s", buf);
(*b)[i] = (float)atoi(buf);
}
fclose(fp);
}
__global__
void iloop(float* A, float* b, int N, float* x, float* y)
{
float t;
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int stride = (gridDim.x * blockDim.x);
//printf("<<< %d, %f, %f >>>\n", N, A[0], b[0]);
for(int i = index; i < N; i += stride)
{
t = 0.0;
for(int j = 0; j < N; j++)
{
if(i != j)
{
t = t + (( A[ ((N*i)+j) ] ) * x[j]);
}
}
y[i] = ((b[i] - t)/(A[ ((N*i)+i) ]));
}
}
int main(int argc, char* argv[])
{
float time = 0.0;
float maxError = 0.0;
float* d_A;
float* d_b;
float* d_x;
float* d_y;
int k;
int blocksize;
int numblocks;
float* A;
float* b;
float* x;
float* y;
float* c;
char* fname;
int N, M, iter, i, j;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if(argc >= 2) fname = argv[1];
else fname = "../inputs/8.txt";
if(argc >= 3) M = atoi(argv[2]);
else M = 32;
readFile(fname, &N, &iter, &A, &b);
printf("CUDA : Parsed file %s\n", fname);
x = (float*) malloc(N*sizeof(float));
y = (float*) malloc(N*sizeof(float));
for(i = 0; i < N; i++)
{
x[i] = 0.0;
y[i] = 0.0;
}
if( cudaMallocManaged(&d_A, N*N*sizeof(float)) != cudaSuccess ) printf("CUDA : memory allocation error!\n");
if( cudaMallocManaged(&d_b, N*sizeof(float)) != cudaSuccess ) printf("CUDA : memory allocation error!\n");
if( cudaMallocManaged(&d_x, N*sizeof(float)) != cudaSuccess ) printf("CUDA : memory allocation error!\n");
if( cudaMallocManaged(&d_y, N*sizeof(float)) != cudaSuccess ) printf("CUDA : memory allocation error!\n");
for(i = 0; i < N*N; i++)
{
d_A[i] = A[i];
}
for(i = 0; i < N; i++)
{
d_b[i] = b[i];
d_x[i] = x[i];
d_y[i] = y[i];
}
blocksize = M;
numblocks = (N+blocksize-1)/blocksize;
printf("CUDA : Grid Size %d, Block size %d\n", numblocks, blocksize);
cudaEventRecord(start);
for(k = 0; k < iter; k++)
{
iloop<<< numblocks, blocksize >>>(d_A, d_b, N, d_x, d_y); // kernel launch on GPU
cudaDeviceSynchronize();
for(j = 0; j < N; j++) d_x[j] = d_y[j];
}
//cudaDeviceSynchronize();
//for(j = 0; j < N; j++) printf("CUDA : %f\n", d_y[j]);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("CUDA : Done Computing Jacobi on GPU\n");
//Verify : sdiff -s <output> inputs/<input>
//for(i = 0; i < N; i++)
//{
// for(j = 0; j < N; j++)
// {
// printf("%d", (int)A[i][j]);
// if(j < N-1) printf(" ");
// }
// printf("\n");
//}
//for(i = 0; i < N; i++)
//{
// printf("%d\n", (int)b[i]);
//}
//printf("\n\n");
//for(i = 0; i < N; i++)
//{
// printf("%f\n", x[i]);
//}
//printf("\n\n");
for(i = 0; i < N; i++) x[i] = d_x[i];
c = (float*) malloc(N*sizeof(float));
for(i = 0; i < N; i++)
{
c[i] = 0;
for(j = 0; j < N; j++)
{
c[i] += A[ ((N*i)+j) ] * x[j];
}
//printf("%0.2f\n", c[i]);
maxError = fmax(maxError, fabs(c[i] - b[i]));
}
printf("\nCUDA : Time %f ms\n", time);
printf("CUDA : MaxError = %f\n\n\n", maxError);
cudaFree(d_A);
cudaFree(d_b);
cudaFree(d_x);
cudaFree(d_y);
free(A);
free(b);
free(x);
free(y);
free(c);
return 0;
}
|
4345ec323c611163c9fed02f68612370495538e1.hip | // !!! This is a file automatically generated by hipify!!!
#ifdef ENABLE_CURD
#include<curd_lib_host.h>
#else
#endif
#ifdef ENABLE_CURD
#define CURD_ALLOC(a, b) allocateReadWriteSets(a, b)
#define CURD_FREE(a, b) freeReadWriteSets(a, b)
#else
#define CURD_ALLOC(a, b)
#define CURD_FREE(a, b)
#endif
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Simple example of DeviceReduce::Sum().
*
* Sums an array of int keys.
*
* To compile using the command line:
* nvcc -arch=sm_XX example_device_reduce.cu -I../.. -lcudart -O3
*
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false; // Whether to display input/output to console
CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem
*/
void Initialize(
int *h_in,
int num_items)
{
for (int i = 0; i < num_items; ++i)
h_in[i] = i;
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/**
* Compute solution
*/
void Solve(
int *h_in,
int &h_reference,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
if (i == 0)
h_reference = h_in[0];
else
h_reference += h_in[i];
}
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = 150;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--device=<device-id>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
printf("hipcub::DeviceReduce::Sum() %d items (%d-byte elements)\n",
num_items, (int) sizeof(int));
fflush(stdout);
// Allocate host arrays
int* h_in = new int[num_items];
int h_reference;
// Initialize problem and solution
Initialize(h_in, num_items);
Solve(h_in, h_reference, num_items);
// Allocate problem device arrays
int *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items));
// Initialize device input
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(int) * num_items, hipMemcpyHostToDevice));
// Allocate device output array
int *d_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * 1));
// Request and allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Run
CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(&h_reference, d_out, 1, g_verbose, g_verbose);
printf("\t%s", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Cleanup
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
printf("\n\n");
return 0;
}
| 4345ec323c611163c9fed02f68612370495538e1.cu | #ifdef ENABLE_CURD
#include<curd_lib_host.h>
#else
#endif
#ifdef ENABLE_CURD
#define CURD_ALLOC(a, b) allocateReadWriteSets(a, b)
#define CURD_FREE(a, b) freeReadWriteSets(a, b)
#else
#define CURD_ALLOC(a, b)
#define CURD_FREE(a, b)
#endif
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Simple example of DeviceReduce::Sum().
*
* Sums an array of int keys.
*
* To compile using the command line:
* nvcc -arch=sm_XX example_device_reduce.cu -I../.. -lcudart -O3
*
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <cub/util_allocator.cuh>
#include <cub/device/device_reduce.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false; // Whether to display input/output to console
CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem
*/
void Initialize(
int *h_in,
int num_items)
{
for (int i = 0; i < num_items; ++i)
h_in[i] = i;
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/**
* Compute solution
*/
void Solve(
int *h_in,
int &h_reference,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
if (i == 0)
h_reference = h_in[0];
else
h_reference += h_in[i];
}
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = 150;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--device=<device-id>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
printf("cub::DeviceReduce::Sum() %d items (%d-byte elements)\n",
num_items, (int) sizeof(int));
fflush(stdout);
// Allocate host arrays
int* h_in = new int[num_items];
int h_reference;
// Initialize problem and solution
Initialize(h_in, num_items);
Solve(h_in, h_reference, num_items);
// Allocate problem device arrays
int *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(int) * num_items));
// Initialize device input
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(int) * num_items, cudaMemcpyHostToDevice));
// Allocate device output array
int *d_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(int) * 1));
// Request and allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Run
CubDebugExit(DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items));
// Check for correctness (and display results, if specified)
int compare = CompareDeviceResults(&h_reference, d_out, 1, g_verbose, g_verbose);
printf("\t%s", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Cleanup
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
printf("\n\n");
return 0;
}
|
3aea57dc22499868915515d02cbd3faba4ba3195.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* author: ck
* created: 16.02.2012
* advisor: atc
*/
#include <string.h>
#include <sstream>
#include "../common/utils.cuh"
#include "cutil_inline.h"
#include "../common/kernels.cuh"
#include "../common/cuPrintf.cuh"
size_t call_calculate_C_mops_opnum=0;
void call_calculate_C_mops(size_t ndims, size_t operand_num, operands* ops, std::string output_tensor, bool print, int* d_to_power = NULL){
hipLaunchKernelGGL(( calculate_C_mops), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, (size_t) ndims,
(size_t) (operand_num),
(size_t**) (ops->d_strides_operand_pointers),
(size_t*) (get_d_obj_strides()[output_tensor]),
(size_t*) (get_d_obj_cards()["F"]), // used to calculate contraction indices of required dimensions
(size_t**) (ops->d_cards_operand_pointers),
(double**) (ops->d_operand_pointers),
(double*) (get_d_obj_data()[output_tensor]),
//(double*) (get_d_obj_data()["Z0"]),
(size_t) (h_objs[output_tensor]->element_number),
(size_t) 1,
print, call_calculate_C_mops_opnum,
d_to_power);
call_calculate_C_mops_opnum++;
}
void call_genFullResult(size_t ndims, std::string A, std::string B, std::string F, int to_power_A=1, int to_power_B=1){
hipLaunchKernelGGL(( genFullResult), dim3(NUM_BLOCKS),dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_cards()[F], ndims,
get_d_obj_strides()[A], get_d_obj_strides()[B], get_d_obj_strides()[F],
get_d_obj_data()[A], get_d_obj_data()[B], get_d_obj_data()[F],
h_objs[F]->element_number, h_objs[A]->element_number, h_objs[B]->element_number,
1, false, to_power_A, to_power_B);
}
void call_contractFintoC(size_t ndims, std::string F, std::string C){
// prepare range permutation vector //////////////////////////////////////////////////////
size_t zero_cardinality_dim_tuple_size_C = 0;
size_t zero_cardinality_dim_tuples_C_element_number = 0;
size_t* h_zero_cardinality_dim_tuples_C = NULL;
size_t* d_zero_cardinality_dim_tuples_C = NULL;
std::vector<size_t> zero_cardinality_dims;
for ( size_t dim=0; dim<ndims; dim++ ){
if ( h_objs[C]->cardinalities[dim] == 0 && h_objs[F]->cardinalities[dim] != 0 ){
zero_cardinality_dims.push_back(h_objs[F]->cardinalities[dim]);
}
}
if ( COUT ) {
std::cout << "zero_cardinality_dims" << std::endl;
for ( size_t j=0; j<zero_cardinality_dims.size(); j++){
std::cout << zero_cardinality_dims.at(j) << std::endl;
}
}
zero_cardinality_dim_tuple_size_C = zero_cardinality_dims.size();
h_zero_cardinality_dim_tuples_C =
gen_range_permutation(zero_cardinality_dims,
&(zero_cardinality_dim_tuples_C_element_number));
// transfer to device
cutilSafeCall(hipMalloc((void**)&(d_zero_cardinality_dim_tuples_C),
sizeof(size_t)*zero_cardinality_dim_tuples_C_element_number));
cutilSafeCall(hipMemcpy(d_zero_cardinality_dim_tuples_C, h_zero_cardinality_dim_tuples_C,
sizeof(size_t)*zero_cardinality_dim_tuples_C_element_number, hipMemcpyHostToDevice));
////////////////////////////////////////////////////////////////////////////////////////
hipLaunchKernelGGL(( contractFintoC), dim3(NUM_BLOCKS),dim3(THREADS_FOR_BLOCK), 0, 0, ndims,
get_d_obj_strides()[F], get_d_obj_strides()[C],
get_d_obj_data()[F], get_d_obj_data()[C],
h_objs[C]->element_number,
d_zero_cardinality_dim_tuples_C,
zero_cardinality_dim_tuple_size_C,
zero_cardinality_dim_tuples_C_element_number,
CUPRINTF);
}
void umut01(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[], bool is_parallel){
// prepare model elements //////////////////////////////////////////////////////
size_t op_iter_count = ((double *)mxGetData(prhs[0]))[0];
mxChar* V_card_sym = mxGetChars(prhs[1]);
size_t ndims = mxGetNumberOfElements(prhs[1]);
double* V_cards = (double*) mxGetData(prhs[2]);
size_t p = ((double *)mxGetData(prhs[3]))[0];
size_t max_v = mxGetM(prhs[4]);
size_t max_alpha = mxGetN(prhs[4]);
bool* R = (bool*) malloc( sizeof(bool) * max_v * max_alpha); // dynamic allocation may not be initialized with = {0} syntax
if(COUT) std::cout << "init R" << std::endl;
for (size_t i=0; i<max_v; i++){
for (size_t j=0; j<max_alpha; j++){
R[i + j*max_v] = (bool) (((double *)mxGetData(prhs[4]))[i + j*max_v]);
if(COUT) std::cout << R[i + j*max_v] << " ";
}
if(COUT) std::cout << std::endl;
}
std::vector<m_tensor> observed_elements;
//size_t observed_element_num = max_v;
size_t m_index=5;
for (size_t t=0; t<max_v; t++){ // need to fill in v number of observed_elements
const mxArray* m_observed_cards = prhs[m_index];
m_index++;
const mxArray* m_observed_data = prhs[m_index];
m_index++;
m_tensor tmp_m_tensor;
tmp_m_tensor.is_updateable = false; // used with latent tensors only
size_t m_observed_cards_elnum = mxGetNumberOfElements(m_observed_cards);
tmp_m_tensor.cards_char = (char*) malloc( m_observed_cards_elnum + 1 );
for (size_t i=0; i<=m_observed_cards_elnum ; i++)
if ( i == m_observed_cards_elnum )
tmp_m_tensor.cards_char[i] = '\0';
else
tmp_m_tensor.cards_char[i] = (char) mxGetChars(m_observed_cards)[i] ;
if ( mxGetNumberOfElements(m_observed_data) == 0 ){
// tensor init data is not given
tmp_m_tensor.data = NULL;
}else{
// tensor init data is given, save pointer
tmp_m_tensor.data = (double*) mxGetData(m_observed_data);
if (COUT) std::cout << "found factor with init data. Data size " << mxGetNumberOfElements(m_observed_data) << std::endl;
}
observed_elements.push_back(tmp_m_tensor);
}
std::vector<m_tensor> latent_elements;
//size_t latent_element_num = max_v;
for (size_t t=0; t<max_alpha; t++){ // need to fill in alpha number of latent_elements
const mxArray* m_latent_cards = prhs[m_index];
m_index++;
const mxArray* m_latent_data = prhs[m_index];
m_index++;
m_tensor tmp_m_tensor;
tmp_m_tensor.is_updateable = (bool) (((double *)mxGetData(prhs[m_index]))[0]);
m_index++;
size_t m_latent_cards_elnum = mxGetNumberOfElements(m_latent_cards);
tmp_m_tensor.cards_char = (char*) malloc( m_latent_cards_elnum + 1 );
for (size_t i=0; i<=m_latent_cards_elnum ; i++)
if ( i == m_latent_cards_elnum )
tmp_m_tensor.cards_char[i] = '\0';
else
tmp_m_tensor.cards_char[i] = (char) mxGetChars(m_latent_cards)[i] ;
if ( mxGetNumberOfElements(m_latent_data) == 0 ){
// tensor init data is not given
tmp_m_tensor.data = NULL;
}else{
// tensor init data is given, save pointer
tmp_m_tensor.data = (double*) mxGetData(m_latent_data);
if (COUT) std::cout << "found factor with init data. Data size " << mxGetNumberOfElements(m_latent_data) << std::endl;
}
latent_elements.push_back(tmp_m_tensor);
}
// prepare cards_numeric indices of model elements
// input arrives like so:
// A['i','k'], B['k', 'j'], C['i','j'] where V is ['i','k','j'] = [2 3 4]
// here we convert indices to internal format:
// A[2, 3, 0], B[0, 3, 4], C[2, 0, 4]
for (size_t m=0; m<observed_elements.size(); m++){
assign_m_tensor_cards_numeric(&(observed_elements[m]), V_card_sym, V_cards, ndims);
}
for (size_t m=0; m<latent_elements.size(); m++){
assign_m_tensor_cards_numeric(&(latent_elements[m]), V_card_sym, V_cards, ndims);
}
if (COUT) {
print_model_elements_text(&observed_elements, "printing observed model elements");
print_model_elements_text(&latent_elements, "printing latent model elements");
}
// now all tensors have correct internal cardinalities.
// all numeric cardinality arrays (m_tensor.char_numeric) are of same size as V
// -> ndims
// prepare output tensor in matlab //////////////////////////////////////////////////////
std::vector<double*> output_data_ptr;
for (size_t t=0; t<latent_elements.size(); t++){
mwSize argMatDims[ndims];
for (size_t i=0; i<ndims; i++) {
size_t val = latent_elements[t].cards_numeric[i];
if (val == 0) argMatDims[i] = 1; // MATLAB needs to get 1 instead of 0
else argMatDims[i] = val;
}
plhs[t] = mxCreateNumericArray(ndims, argMatDims, mxDOUBLE_CLASS, mxREAL);
output_data_ptr.push_back( (double*) mxGetPr(plhs[t]) );
}
// prepare host memory for tensors ///////////////////////////////////////////////////////
h_full_cardinalities = (size_t*) calloc(ndims, sizeof(size_t)); // defined in mct_tensorop_utils.cuh
///// cards_numeric are alligned according to the V cardinalities ///// above //
for (size_t dim=0; dim<ndims; dim++){ // for each dimension
size_t max_dim_card = 0;
for (size_t t=0; t<observed_elements.size(); t++){ // for each model
for (size_t card=0; card<strlen(observed_elements[t].cards_char); card++){ // for each dimension of the model
if (observed_elements[t].cards_char[card] == V_card_sym[dim]){ // if this dimension character matches current dimension's
size_t tensor_dim_card = observed_elements[t].cards_numeric[dim]; //see above//
if ( max_dim_card < tensor_dim_card )
max_dim_card = tensor_dim_card;
break; // only one dimension of each model can match with current dimension
}
}
}
for (size_t t=0; t<latent_elements.size(); t++){ // for each model
for (size_t card=0; card<strlen(latent_elements[t].cards_char); card++){ // for each dimension of the model
if (latent_elements[t].cards_char[card] == V_card_sym[dim]){ // if this dimension character matches current dimension's
size_t tensor_dim_card = latent_elements[t].cards_numeric[dim]; //see above//
if ( max_dim_card < tensor_dim_card )
max_dim_card = tensor_dim_card;
break; // only one dimension of each model can match with current dimension
}
}
}
h_full_cardinalities[dim] = max_dim_card;
}
if(COUT)
for (int i=0; i<ndims; i++)
std::cout << "h_full_cardinalities " << i << " " << h_full_cardinalities[i] << std::endl;
// initialize random seed for random initialization of objects
//srand((unsigned)time(NULL));
srand(123);
std::vector<ct> X_tensors;
std::vector<ct> A_tensors;
std::vector<ct> hat_X_tensors;
for (size_t el=0; el<observed_elements.size(); el++){
ct tmp_ct_X;
ct tmp_ct_A;
ct tmp_ct_hatX;
size_t X_card[ndims];
for (size_t i=0; i<ndims; i++) X_card[i] = observed_elements[el].cards_numeric[i];
std::stringstream x, hatx, xa;
x << "Host X" << el;
hatx << "Host hatX" << el;
xa << "Host A_X" << el;
prepareHostTensorFromCpp(&tmp_ct_X, observed_elements[el].data, X_card, ndims, x.str().c_str(), true); // init with given data, if null init with rand
prepareHostTensorFromCpp(&tmp_ct_A, NULL, X_card, ndims, xa.str().c_str(), false, true); // rand=false, init_to_one=true -> init with 1
prepareHostTensorFromCpp(&tmp_ct_hatX, NULL, X_card, ndims, hatx.str().c_str(), true);
X_tensors.push_back(tmp_ct_X);
A_tensors.push_back(tmp_ct_A);
hat_X_tensors.push_back(tmp_ct_hatX);
}
std::vector<ct> Z_tensors;
std::vector<ct> Z_update_tensors; // stores each one of sub-update equation results, then all are summed together
std::vector<ct> D_tensors;
for (size_t el=0; el<latent_elements.size(); el++){
ct tmp_ct;
size_t Z_card[ndims];
for (size_t i=0; i<ndims; i++) Z_card[i] = latent_elements[el].cards_numeric[i];
std::stringstream z;
z << "Host Z" << el;
prepareHostTensorFromCpp(&tmp_ct, latent_elements[el].data, Z_card, ndims, z.str().c_str(), true); // init with given data, if null init with rand
Z_tensors.push_back(tmp_ct);
for (size_t v=0; v<max_v; v++){
ct tmp_ct_D1;
ct tmp_ct_D2;
std::stringstream d1;
d1 << "Host D1_Z" << el << "X" << v;
prepareHostTensorFromCpp(&tmp_ct_D1, NULL, Z_card, ndims, d1.str().c_str());
std::stringstream d2;
d2 << "Host D2_Z" << el << "X" << v;
prepareHostTensorFromCpp(&tmp_ct_D2, NULL, Z_card, ndims, d2.str().c_str());
D_tensors.push_back(tmp_ct_D1);
D_tensors.push_back(tmp_ct_D2);
// ct tmp_ct_update;
// std::stringstream z_update;
// z_update << "Host Z_update" << el << "X" << v;
// prepareHostTensorFromCpp(&tmp_ct_update, NULL, Z_card, ndims, z_update.str().c_str(), false, false); // rand=false, init_to_one=false -> init with 0
// Z_update_tensors.push_back(tmp_ct_update);
}
// for summation of division operands
// ct tmp_ct_D1_sum;
// ct tmp_ct_D2_sum;
// std::stringstream d1;
// d1 << "Host D1_Z" << el << "sum";
// prepareHostTensorFromCpp(&tmp_ct_D1_sum, NULL, Z_card, ndims, d1.str().c_str());
// std::stringstream d2;
// d2 << "Host D2_Z" << el << "sum";
// prepareHostTensorFromCpp(&tmp_ct_D2_sum, NULL, Z_card, ndims, d2.str().c_str());
// D_tensors.push_back(tmp_ct_D1_sum);
// D_tensors.push_back(tmp_ct_D2_sum);
}
ct F;
prepareHostTensorFromCpp(&F, NULL, h_full_cardinalities, ndims, "Host F", true, true, false);
///////////////////////////////////////////////////////////////////////////////////////////
// register & transfer objects to device //////////////////////////////////////////////////
size_t k=0;
for (size_t alpha=0; alpha<max_alpha; alpha++){
for (size_t v=0; v<max_v; v++){
std::stringstream d_name1;
d_name1 << "D1_Z" << alpha << "X" << v;
register_ct( d_name1.str().c_str(), &D_tensors[k]);
k++;
std::stringstream d_name2;
d_name2 << "D2_Z" << alpha << "X" << v;
register_ct( d_name2.str().c_str(), &D_tensors[k]);
k++;
// std::stringstream name_update;
// name_update << "Zup" << alpha << "X" << v;
// register_ct( name_update.str().c_str(), &(Z_update_tensors[k]) );
}
// std::stringstream d_name1, d_name2;
// d_name1 << "D1_Z" << alpha << "sum";
// d_name2 << "D2_Z" << alpha << "sum";
// register_ct( d_name2.str().c_str(), &D_tensors[k]);
// k++;
// register_ct( d_name2.str().c_str(), &D_tensors[k]);
// k++;
}
for (size_t z=0; z<Z_tensors.size(); z++){
std::stringstream name;
name << 'Z' << z;
register_ct( name.str().c_str(), &(Z_tensors[z]) );
}
for (size_t x=0; x<X_tensors.size(); x++){
std::stringstream name;
name << "X" << x;
register_ct( name.str().c_str(), &(X_tensors[x]) );
std::stringstream a_name;
a_name << "A" << x;
register_ct( a_name.str().c_str(), &(A_tensors[x]) );
std::stringstream hat_X_name;
hat_X_name << "hatX" << x;
register_ct( hat_X_name.str().c_str(), &(hat_X_tensors[x]) );
}
// 'f','i','k','t','m','n'
ct BC, BC_F, BZ, FT;
size_t* BC_card = (size_t*) calloc(ndims, sizeof(size_t));
// BC(i,t) others 0
BC_card[1] = V_cards[1]; // i
BC_card[3] = V_cards[3]; // t
prepareHostTensorFromCpp(&BC, NULL, BC_card, ndims, "Host BC");
BC_card[2] = V_cards[2]; // k
prepareHostTensorFromCpp(&BC_F, NULL, BC_card, ndims, "Host BC_F");
size_t* BZ_card = (size_t*) calloc(ndims, sizeof(size_t));
// BZ(i,k) others 0
BZ_card[1] = V_cards[1]; // i
BZ_card[2] = V_cards[2]; // k
prepareHostTensorFromCpp(&BZ, NULL, BZ_card, ndims, "Host BZ");
size_t* FT_card = (size_t*) calloc(ndims, sizeof(size_t));
// FT(i,n) others 0
FT_card[1] = V_cards[1]; // i
FT_card[5] = V_cards[5]; // n
prepareHostTensorFromCpp(&FT, NULL, FT_card, ndims, "Host FT");
ct X0_ones, X0_tmp1, X0_tmp2;
size_t X0_cards[ndims];
for (size_t i=0; i<ndims; i++) X0_cards[i] = observed_elements[0].cards_numeric[i];
prepareHostTensorFromCpp(&X0_ones, NULL, X0_cards, ndims, "Host X0_ones", false, true);
prepareHostTensorFromCpp(&X0_tmp1, NULL, X0_cards, ndims, "Host X0_tmp1", false, true);
prepareHostTensorFromCpp(&X0_tmp2, NULL, X0_cards, ndims, "Host X0_tmp2", false, true);
ct X1_ones, X1_tmp1, X1_tmp2;
size_t X1_cards[ndims];
for (size_t i=0; i<ndims; i++) X1_cards[i] = observed_elements[1].cards_numeric[i];
prepareHostTensorFromCpp(&X1_ones, NULL, X1_cards, ndims, "Host X1_ones", false, true);
prepareHostTensorFromCpp(&X1_tmp1, NULL, X1_cards, ndims, "Host X1_tmp1", false, true);
prepareHostTensorFromCpp(&X1_tmp2, NULL, X1_cards, ndims, "Host X1_tmp2", false, true);
ct X2_ones, X2_tmp1, X2_tmp2;
size_t X2_cards[ndims];
for (size_t i=0; i<ndims; i++) X2_cards[i] = observed_elements[2].cards_numeric[i];
prepareHostTensorFromCpp(&X2_ones, NULL, X2_cards, ndims, "Host X2_ones", false, true);
prepareHostTensorFromCpp(&X2_tmp1, NULL, X2_cards, ndims, "Host X2_tmp1", false, true);
prepareHostTensorFromCpp(&X2_tmp2, NULL, X2_cards, ndims, "Host X2_tmp2", false, true);
ct fit;
size_t* fit_card = (size_t*) calloc(ndims, sizeof(size_t));
fit_card[0] = V_cards[0]; //f
fit_card[1] = V_cards[1]; //i
fit_card[3] = V_cards[3]; //t
prepareHostTensorFromCpp(&fit, NULL, fit_card, ndims, "Host fit", false, false, false);
ct fin;
size_t* fin_card = (size_t*) calloc(ndims, sizeof(size_t));
fin_card[0] = V_cards[0]; //f
fin_card[1] = V_cards[1]; //i
fin_card[5] = V_cards[5]; //n
prepareHostTensorFromCpp(&fin, NULL, fin_card, ndims, "Host fin", false, false, false);
REGISTER_CT(F);
REGISTER_CT(fit);
REGISTER_CT(fin);
REGISTER_CT(BC); REGISTER_CT(BC_F); REGISTER_CT(BZ); REGISTER_CT(FT);
REGISTER_CT(X0_ones); REGISTER_CT(X1_ones); REGISTER_CT(X2_ones);
REGISTER_CT(X0_tmp1); REGISTER_CT(X1_tmp1); REGISTER_CT(X2_tmp1);
REGISTER_CT(X0_tmp2); REGISTER_CT(X1_tmp2); REGISTER_CT(X2_tmp2);
if (CUPRINTF == true)
cudaPrintfInit();
//std::cout << " selam 1 " << std::endl;
size_t cur_mem;
if (is_parallel)
cur_mem = transferToDevice(ndims);
if( COUT ) std::cout << "transferToDevice " << cur_mem << " bytes " << std::endl;
///////////////////////////////////////////////////////////////////////////////////////////
// perform GCTF operation //////////////////////////////////////////////////////////////////
// std::vector<std::string> sops_1;
// operands ops_1;
// sops_1.push_back("BZ");
// sops_1.push_back("Z3");
// cur_mem = gen_operation_arguments( sops_1, &ops_1, cur_mem );
std::vector<std::string> sops_2;
operands ops_2;
sops_2.push_back("Z0");
sops_2.push_back("BC");
cur_mem = gen_operation_arguments( sops_2, &ops_2, cur_mem );
std::vector<std::string> sops_3;
operands ops_3;
sops_3.push_back("X0_tmp1");
sops_3.push_back("BC");
cur_mem = gen_operation_arguments( sops_3, &ops_3, cur_mem );
std::vector<std::string> sops_4;
operands ops_4;
sops_4.push_back("X0_tmp2");
sops_4.push_back("BC");
cur_mem = gen_operation_arguments( sops_4, &ops_4, cur_mem );
std::vector<std::string> sops_5;
operands ops_5;
sops_5.push_back("Z0");
sops_5.push_back("FT");
cur_mem = gen_operation_arguments( sops_5, &ops_5, cur_mem );
std::vector<std::string> sops_6;
operands ops_6;
sops_6.push_back("X2_tmp1");
sops_6.push_back("FT");
cur_mem = gen_operation_arguments( sops_6, &ops_6, cur_mem );
std::vector<std::string> sops_7;
operands ops_7;
sops_7.push_back("hatX2");
sops_7.push_back("FT");
int to_power_7[2];
to_power_7[0] = 1-p;
to_power_7[1] = 1;
cur_mem = gen_operation_arguments( sops_7, &ops_7, cur_mem, to_power_7 );
for (int iter=0; iter<op_iter_count; iter++){
std::cout << "iter " << iter << std::endl;
// D -> Z0
// B -> Z1
// Z -> Z2
// C -> Z3
// G -> Z4
// Y -> Z5
// F -> Z6
// T -> Z7
// B .* Z -> BZ
//std::cout << " NUM_BLOCKS " << NUM_BLOCKS << " THREADS_FOR_BLOCK " << THREADS_FOR_BLOCK << std::endl;
hipLaunchKernelGGL(( hadamard_mul), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["Z1"],
get_d_obj_data()["Z2"],
get_d_obj_data()["BZ"],
h_objs["BZ"]->element_number,
CUPRINTF);
// BZ(i,k) * C(k,t) -> BC(i,k,t)
call_genFullResult(ndims, "BZ", "Z3", "BC_F");
call_contractFintoC(ndims, "BC_F", "BC");
// X1hat(f,t) = D(f,i) * BC(i,t);
// full -> f,i,t
// call_genFullResult(ndims, "Z0", "BC", "fit");
// call_contractFintoC(ndims, "fit", "hatX0");
call_calculate_C_mops(ndims, 2, &ops_2, "hatX0", CUPRINTF);
//arg_D_n_1 = M1.* X1 .* (X1hat.^(-p));
hipLaunchKernelGGL(( hadamard_mul), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["X0"],
get_d_obj_data()["hatX0"],
get_d_obj_data()["X0_tmp1"],
h_objs["X0"]->element_number,
CUPRINTF, 1, -p);
//arg_D_d_1 = M1.* (X1hat.^(1-p));
hipLaunchKernelGGL(( hadamard_mul), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["X0_ones"],
get_d_obj_data()["hatX0"],
get_d_obj_data()["X0_tmp2"],
h_objs["X0"]->element_number,
CUPRINTF, 1, 1-p);
//deltaD_n_1(f,t) = arg_D_n_1(f,t) * (BC(i,t))'
call_calculate_C_mops(ndims, 2, &ops_3, "D1_Z0X0", CUPRINTF);
//deltaD_d_1 = arg_D_d_1 * (BC)';
call_calculate_C_mops(ndims, 2, &ops_4, "D2_Z0X0", CUPRINTF);
// skip mask
//Compute X3hat
// FT = F.*T;
hipLaunchKernelGGL(( hadamard_mul), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["Z6"],
get_d_obj_data()["Z7"],
get_d_obj_data()["FT"],
h_objs["Z6"]->element_number,
CUPRINTF);
// X3hat(f,n) = D(f,i) * FT(i,n);
call_calculate_C_mops(ndims, 2, &ops_5, "hatX2", CUPRINTF);
//deltaD_n_2 = arg_D_n_2 * (FT)';
call_calculate_C_mops(ndims, 2, &ops_6, "D1_Z0X2", CUPRINTF);
//deltaD_d_2 = arg_D_d_2 * (FT)';
call_calculate_C_mops(ndims, 2, &ops_7, "D2_Z0X2", CUPRINTF, ops_7.d_to_power);
//D = D.* ( (deltaD_n_1 + deltaD_n_2 ) ./ (deltaD_d_1 + deltaD_d_2 ));
hipLaunchKernelGGL(( hadamard_sum), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["D1_Z0X0"],
get_d_obj_data()["D1_Z0X2"],
get_d_obj_data()["D1_Z0X0"],
h_objs["D1_Z0X0"]->element_number,
CUPRINTF);
hipLaunchKernelGGL(( hadamard_sum), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["D2_Z0X0"],
get_d_obj_data()["D2_Z0X2"],
get_d_obj_data()["D2_Z0X0"],
h_objs["D2_Z0X0"]->element_number,
CUPRINTF);
hipLaunchKernelGGL(( hadamard_div), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["D1_Z0X0"],
get_d_obj_data()["D2_Z0X0"],
get_d_obj_data()["D1_Z0X0"],
h_objs["D1_Z0X0"]->element_number,
CUPRINTF);
hipLaunchKernelGGL(( hadamard_mul), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["Z0"],
get_d_obj_data()["D1_Z0X0"],
get_d_obj_data()["Z0"],
h_objs["Z0"]->element_number,
CUPRINTF);
// // update D
// // compute x1hat
// // B.*Z -> BZ
//hipLaunchKernelGGL(( hadamard_mul), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["Z1"],
// get_d_obj_data()["Z2"],
// get_d_obj_data()["BZ"],
// h_objs["BZ"]->element_number,
// CUPRINTF);
// // BZ(i,k)*C(k,t) -> BC(i,k,t)
//hipLaunchKernelGGL(( calculate_C_mops), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, (size_t) 3,
// (size_t) (sops_1.size()),
// (size_t**) (ops_1.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()["BC"]),
// (size_t*) (get_d_obj_cards()["ikt"]),
// (size_t**) (ops_1.d_cards_operand_pointers),
// (double**) (ops_1.d_operand_pointers),
// (double*) (get_d_obj_data()["BC"]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs["BC"]->element_number),
// (size_t) 1,
// CUPRINTF,1);
// break;
// // X1hat = D*BC;
//hipLaunchKernelGGL(( calculate_C_mops), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, (size_t) ndims,
// (size_t) (sops_2.size()),
// (size_t**) (ops_2.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()["hatX0"]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_2.d_cards_operand_pointers),
// (double**) (ops_2.d_operand_pointers),
// (double*) (get_d_obj_data()["hatX0"]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs["hatX0"]->element_number),
// (size_t) 1,
// CUPRINTF,1);
// //arg_D_n_1 = M1.* X1 .* (X1hat.^(-p));
//hipLaunchKernelGGL(( hadamard_mul), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["X0"],
// get_d_obj_data()["hatX0"],
// get_d_obj_data()["X0_tmp1"],
// h_objs["X0"]->element_number,
// CUPRINTF, 1, -p);
// //arg_D_d_1 = M1.* (X1hat.^(1-p));
//hipLaunchKernelGGL(( hadamard_mul), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["X0_ones"],
// get_d_obj_data()["hatX0"],
// get_d_obj_data()["X0_tmp2"],
// h_objs["X0"]->element_number,
// CUPRINTF, 1, 1-p);
// // deltaD_n_1 = arg_D_n_1 * (BC)';
//hipLaunchKernelGGL(( calculate_C_mops), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, (size_t) ndims,
// (size_t) (sops_3.size()),
// (size_t**) (ops_3.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()["D1_Z0X0"]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_3.d_cards_operand_pointers),
// (double**) (ops_3.d_operand_pointers),
// (double*) (get_d_obj_data()["D1_Z0X0"]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs["D1_Z0X0"]->element_number),
// (size_t) 1,
// CUPRINTF,1);
// //deltaD_d_1 = arg_D_d_1 * (BC)';
//hipLaunchKernelGGL(( calculate_C_mops), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, (size_t) ndims,
// (size_t) (sops_4.size()),
// (size_t**) (ops_4.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()["D2_Z0X0"]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_4.d_cards_operand_pointers),
// (double**) (ops_4.d_operand_pointers),
// (double*) (get_d_obj_data()["D2_Z0X0"]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs["D2_Z0X0"]->element_number),
// (size_t) 1,
// CUPRINTF,1);
// //Compute X3hat
// // FT = F.*T;
//hipLaunchKernelGGL(( hadamard_mul), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["Z6"],
// get_d_obj_data()["Z7"],
// get_d_obj_data()["FT"],
// h_objs["Z6"]->element_number,
// CUPRINTF);
// // X3hat = D*FT;
//hipLaunchKernelGGL(( calculate_C_mops), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, (size_t) ndims,
// (size_t) (sops_5.size()),
// (size_t**) (ops_5.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()["hatX2"]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_5.d_cards_operand_pointers),
// (double**) (ops_5.d_operand_pointers),
// (double*) (get_d_obj_data()["hatX2"]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs["hatX2"]->element_number),
// (size_t) 1,
// CUPRINTF,1);
// // arg_D_n_2 = X3 .* (X3hat.^(-p));
//hipLaunchKernelGGL(( hadamard_mul), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["X2"],
// get_d_obj_data()["hatX2"],
// get_d_obj_data()["X2_tmp0"],
// h_objs["X2"]->element_number,
// CUPRINTF, 1, -p);
// // arg_D_d_2 = X3hat.^(1-p);
// // skip
// //hipLaunchKernelGGL(( hadamard_mul), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["X2"],
// // get_d_obj_data()["hatX2"],
// // get_d_obj_data()["D2_Z0X2"],
// // h_objs["X2"]->element_number,
// // CUPRINTF, 1, 1-p);
// //deltaD_n_2 = arg_D_n_2 * (FT)';
// call_calculate_C_mops(ndims, 2, &ops_6, "D1_Z0X2", CUPRINTF);
// //deltaD_d_2 = arg_D_d_2 * (FT)';
// call_calculate_C_mops(ndims, 2, &ops_7, "D2_Z0X2", CUPRINTF);
// //D = D.* ( (deltaD_n_1 + deltaD_n_2 ) ./ (deltaD_d_1 + deltaD_d_2 ));
//hipLaunchKernelGGL(( hadamard_sum), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["D1_Z0X0"],
// get_d_obj_data()["D1_Z0X2"],
// get_d_obj_data()["D1_Z0X0"],
// h_objs["D1_Z0X0"]->element_number,
// CUPRINTF);
//hipLaunchKernelGGL(( hadamard_sum), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["D2_Z0X0"],
// get_d_obj_data()["D2_Z0X2"],
// get_d_obj_data()["D2_Z0X0"],
// h_objs["D2_Z0X0"]->element_number,
// CUPRINTF);
//hipLaunchKernelGGL(( hadamard_div), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["D1_Z0X0"],
// get_d_obj_data()["D2_Z0X0"],
// get_d_obj_data()["D1_Z0X0"],
// h_objs["D1_Z0X0"]->element_number,
// CUPRINTF);
//hipLaunchKernelGGL(( hadamard_mul), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()["Z0"],
// get_d_obj_data()["D1_Z0X0"],
// get_d_obj_data()["Z0"],
// h_objs["Z0"]->element_number,
// CUPRINTF);
// break;
// // calculate all hatX_v and A_v
// for (size_t alpha=0; alpha<max_alpha; alpha++){
// if ( latent_elements[alpha].is_updateable == false) continue;
// // update all hatX
// for ( size_t cur_v=0; cur_v<max_v; cur_v++){
// std::stringstream hat_Xv;
// hat_Xv << "hatX" << cur_v;
// operands ops_Z0_ZN_Xhat;
// std::vector<std::string> z_tensors_str;
// for (size_t tmp_alpha=0; tmp_alpha<max_alpha; tmp_alpha++){
// if ( R[cur_v + tmp_alpha*max_v] == false ) continue;
// std::stringstream name;
// name << 'Z' << tmp_alpha;
// z_tensors_str.push_back(name.str());
// }
// std::cout << "operand num z_tensors_str.size() " << z_tensors_str.size() << std::endl;
// for( size_t i=0; i<z_tensors_str.size(); i++){
// std::cout << "z_tensors_str[" << i << "] = " << z_tensors_str[i] << std::endl;
// }
// cur_mem = gen_operation_arguments(z_tensors_str, &ops_Z0_ZN_Xhat, cur_mem);
// // Z0 * Z1 * ... * ZN -> Xhat
// //std::cout << "Z0 * Z1 * ... * ZN -> " << hat_Xv.str() << std::endl;
// hipLaunchKernelGGL(( calculate_C_mops), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, (size_t) ndims,
// (size_t) (z_tensors_str.size()),
// (size_t**) (ops_Z0_ZN_Xhat.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()[hat_Xv.str()]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_Z0_ZN_Xhat.d_cards_operand_pointers),
// (double**) (ops_Z0_ZN_Xhat.d_operand_pointers),
// (double*) (get_d_obj_data()[hat_Xv.str()]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs[hat_Xv.str()]->element_number),
// (size_t) 1,
// CUPRINTF,1);
// ////std::cout << " z0 * z1 * .. * zn -> " << hat_Xv << " done " << std::endl;
// std::stringstream Xv;
// Xv << 'X' << cur_v ;
// std::stringstream Av;
// Av << 'A' << cur_v;
// hipLaunchKernelGGL(( hadamard_div), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()[hat_Xv.str().c_str()],
// get_d_obj_data()[Xv.str().c_str()],
// get_d_obj_data()[Av.str().c_str()],
// h_objs[Av.str().c_str()]->element_number,
// CUPRINTF,
// p, 1);
// }
// return;
// // for each Xv
// for (size_t cur_v=0; cur_v<max_v; cur_v++){
// if ( R[cur_v + alpha*max_v] == false ) continue; // if this Xv does not have this Zalpha dothing to do
// std::stringstream hat_Xv;
// hat_Xv << "hatX" << cur_v;
// // calculate D1_Zalpha_Xv
// std::stringstream d1;
// d1 << "D1_Z" << alpha << "X" << cur_v;
// // calculate D2_Zalpha_Xv
// std::stringstream d2;
// d2 << "D2_Z" << alpha << "X" << cur_v;
// std::stringstream Av;
// Av << 'A' << cur_v;
// operands ops_A;
// operands ops_M;
// std::vector<std::string> tmp_A;
// std::vector<std::string> tmp_M;
// tmp_A.push_back(Av.str());
// tmp_M.push_back(hat_Xv.str());
// for (size_t other_z=0; other_z < max_alpha; other_z++){
// //std::cout << " process alpha " << alpha << " other_z " << other_z << std::endl;
// if (other_z == alpha || R[cur_v + other_z*max_v] == false ) continue;
// std::stringstream other_z_name;
// other_z_name << "Z" << other_z;
// tmp_A.push_back(other_z_name.str());
// tmp_M.push_back(other_z_name.str());
// //std::cout << "pushing to tmp_A and tmp_M: " << other_z_name.str() << std::endl;
// }
// //std::cout << "operand num tmp_A.size() " << tmp_A.size() << std::endl;
// for( size_t i=0; i<tmp_A.size(); i++){
// }
// cur_mem = gen_operation_arguments(tmp_A, &ops_A, cur_mem);
// //oc_push_back(&operation_chain, GMULT, ndims, Av.str().c_str(), other_z_name.str().c_str(), d1.str().c_str(), is_parallel);
// hipLaunchKernelGGL(( calculate_C_mops), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, (size_t) ndims,
// (size_t) (tmp_A.size()),
// (size_t**) (ops_A.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()[d1.str().c_str()]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_A.d_cards_operand_pointers),
// (double**) (ops_A.d_operand_pointers),
// (double*) (get_d_obj_data()[d1.str().c_str()]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs[d1.str()]->element_number),
// (size_t) 1,
// CUPRINTF,2);
// //oc_push_back(&operation_chain, GMULT, ndims, hat_Xv.str().c_str(), other_z_name.str().c_str(), d2.str().c_str(), is_parallel, "F", p+1, 1);
// int to_power[tmp_M.size()];
// to_power[0]=p+1;
// for (size_t i=0; i<tmp_M.size(); i++){
// to_power[i] = 0;
// }
// cur_mem = gen_operation_arguments(tmp_M, &ops_M, cur_mem, to_power);
// hipLaunchKernelGGL(( calculate_C_mops), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, (size_t) ndims,
// (size_t) (tmp_M.size()),
// (size_t**) (ops_M.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()[d2.str().c_str()]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_M.d_cards_operand_pointers),
// (double**) (ops_M.d_operand_pointers),
// (double*) (get_d_obj_data()[d2.str().c_str()]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs[d2.str()]->element_number),
// (size_t) 1,
// CUPRINTF,3,
// ops_M.d_to_power
// );
// }
// // sum D1_Zalpha_Xv and D2_Zalpha_Xv for all v to update Zalpha
// std::stringstream D1_Zalpha_sum, D2_Zalpha_sum; // will sum into these
// bool first = true;
// for (size_t v=0; v<max_v; v++){
// if ( R[v + alpha*max_v] ){
// if ( first ){
// D1_Zalpha_sum << "D1_Z" << alpha << "X" << v;
// D2_Zalpha_sum << "D2_Z" << alpha << "X" << v;
// first = false;
// }else{
// std::stringstream other_d1, other_d2;
// other_d1 << "D1_Z" << alpha << "X" << v;
// other_d2 << "D2_Z" << alpha << "X" << v;
// hipLaunchKernelGGL(( hadamard_sum), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()[D1_Zalpha_sum.str().c_str()],
// get_d_obj_data()[other_d1.str().c_str()],
// get_d_obj_data()[D1_Zalpha_sum.str().c_str()],
// h_objs[D1_Zalpha_sum.str().c_str()]->element_number,
// CUPRINTF);
// hipLaunchKernelGGL(( hadamard_sum), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()[D2_Zalpha_sum.str().c_str()],
// get_d_obj_data()[other_d2.str().c_str()],
// get_d_obj_data()[D2_Zalpha_sum.str().c_str()],
// h_objs[D2_Zalpha_sum.str().c_str()]->element_number,
// CUPRINTF);
// }
// }
// }
// hipLaunchKernelGGL(( hadamard_div), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()[D1_Zalpha_sum.str().c_str()],
// get_d_obj_data()[D2_Zalpha_sum.str().c_str()],
// get_d_obj_data()[D1_Zalpha_sum.str().c_str()],
// h_objs[D1_Zalpha_sum.str().c_str()]->element_number,
// CUPRINTF);
// std::stringstream Zalpha;
// Zalpha << 'Z' << alpha ;
// hipLaunchKernelGGL(( hadamard_mul), dim3(NUM_BLOCKS), dim3(THREADS_FOR_BLOCK), 0, 0, get_d_obj_data()[Zalpha.str().c_str()],
// get_d_obj_data()[D1_Zalpha_sum.str().c_str()],
// get_d_obj_data()[Zalpha.str().c_str()],
// h_objs[Zalpha.str().c_str()]->element_number,
// CUPRINTF);
//}
}
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
hipDeviceSynchronize();
if ( CUPRINTF == true ){
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
}
///////////////////////////////////////////////////////////////////////////////////////////
// transfer results to matlab /////////////////////////////////////////////////////////////
if ( is_parallel ){
for (size_t z=0; z<latent_elements.size(); z++){
std::stringstream Zn;
Zn << 'Z' << z;
transferFromDevice(output_data_ptr[z], Zn.str().c_str());
}
//transferFromDevice(m_Z1, "Z1");
//transferFromDevice(m_Z2, "Z2");
}else{
for (size_t z=0; z<latent_elements.size(); z++){
memcpy(output_data_ptr[z], Z_tensors[z].data, Z_tensors[z].mem_size);
}
//memcpy(m_Z1, Z1.data, Z1.mem_size);
//memcpy(m_Z2, Z2.data, Z2.mem_size);
}
///////////////////////////////////////////////////////////////////////////////////////////
// reset device
if ( is_parallel )
resetDevice();
hipDeviceReset();
}
| 3aea57dc22499868915515d02cbd3faba4ba3195.cu | /*
* author: ck
* created: 16.02.2012
* advisor: atc
*/
#include <string.h>
#include <sstream>
#include "../common/utils.cuh"
#include "cutil_inline.h"
#include "../common/kernels.cuh"
#include "../common/cuPrintf.cuh"
size_t call_calculate_C_mops_opnum=0;
void call_calculate_C_mops(size_t ndims, size_t operand_num, operands* ops, std::string output_tensor, bool print, int* d_to_power = NULL){
calculate_C_mops<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>((size_t) ndims,
(size_t) (operand_num),
(size_t**) (ops->d_strides_operand_pointers),
(size_t*) (get_d_obj_strides()[output_tensor]),
(size_t*) (get_d_obj_cards()["F"]), // used to calculate contraction indices of required dimensions
(size_t**) (ops->d_cards_operand_pointers),
(double**) (ops->d_operand_pointers),
(double*) (get_d_obj_data()[output_tensor]),
//(double*) (get_d_obj_data()["Z0"]),
(size_t) (h_objs[output_tensor]->element_number),
(size_t) 1,
print, call_calculate_C_mops_opnum,
d_to_power);
call_calculate_C_mops_opnum++;
}
void call_genFullResult(size_t ndims, std::string A, std::string B, std::string F, int to_power_A=1, int to_power_B=1){
genFullResult<<<NUM_BLOCKS,THREADS_FOR_BLOCK>>>(get_d_obj_cards()[F], ndims,
get_d_obj_strides()[A], get_d_obj_strides()[B], get_d_obj_strides()[F],
get_d_obj_data()[A], get_d_obj_data()[B], get_d_obj_data()[F],
h_objs[F]->element_number, h_objs[A]->element_number, h_objs[B]->element_number,
1, false, to_power_A, to_power_B);
}
void call_contractFintoC(size_t ndims, std::string F, std::string C){
// prepare range permutation vector //////////////////////////////////////////////////////
size_t zero_cardinality_dim_tuple_size_C = 0;
size_t zero_cardinality_dim_tuples_C_element_number = 0;
size_t* h_zero_cardinality_dim_tuples_C = NULL;
size_t* d_zero_cardinality_dim_tuples_C = NULL;
std::vector<size_t> zero_cardinality_dims;
for ( size_t dim=0; dim<ndims; dim++ ){
if ( h_objs[C]->cardinalities[dim] == 0 && h_objs[F]->cardinalities[dim] != 0 ){
zero_cardinality_dims.push_back(h_objs[F]->cardinalities[dim]);
}
}
if ( COUT ) {
std::cout << "zero_cardinality_dims" << std::endl;
for ( size_t j=0; j<zero_cardinality_dims.size(); j++){
std::cout << zero_cardinality_dims.at(j) << std::endl;
}
}
zero_cardinality_dim_tuple_size_C = zero_cardinality_dims.size();
h_zero_cardinality_dim_tuples_C =
gen_range_permutation(zero_cardinality_dims,
&(zero_cardinality_dim_tuples_C_element_number));
// transfer to device
cutilSafeCall(cudaMalloc((void**)&(d_zero_cardinality_dim_tuples_C),
sizeof(size_t)*zero_cardinality_dim_tuples_C_element_number));
cutilSafeCall(cudaMemcpy(d_zero_cardinality_dim_tuples_C, h_zero_cardinality_dim_tuples_C,
sizeof(size_t)*zero_cardinality_dim_tuples_C_element_number, cudaMemcpyHostToDevice));
////////////////////////////////////////////////////////////////////////////////////////
contractFintoC<<<NUM_BLOCKS,THREADS_FOR_BLOCK>>>(ndims,
get_d_obj_strides()[F], get_d_obj_strides()[C],
get_d_obj_data()[F], get_d_obj_data()[C],
h_objs[C]->element_number,
d_zero_cardinality_dim_tuples_C,
zero_cardinality_dim_tuple_size_C,
zero_cardinality_dim_tuples_C_element_number,
CUPRINTF);
}
void umut01(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[], bool is_parallel){
// prepare model elements //////////////////////////////////////////////////////
size_t op_iter_count = ((double *)mxGetData(prhs[0]))[0];
mxChar* V_card_sym = mxGetChars(prhs[1]);
size_t ndims = mxGetNumberOfElements(prhs[1]);
double* V_cards = (double*) mxGetData(prhs[2]);
size_t p = ((double *)mxGetData(prhs[3]))[0];
size_t max_v = mxGetM(prhs[4]);
size_t max_alpha = mxGetN(prhs[4]);
bool* R = (bool*) malloc( sizeof(bool) * max_v * max_alpha); // dynamic allocation may not be initialized with = {0} syntax
if(COUT) std::cout << "init R" << std::endl;
for (size_t i=0; i<max_v; i++){
for (size_t j=0; j<max_alpha; j++){
R[i + j*max_v] = (bool) (((double *)mxGetData(prhs[4]))[i + j*max_v]);
if(COUT) std::cout << R[i + j*max_v] << " ";
}
if(COUT) std::cout << std::endl;
}
std::vector<m_tensor> observed_elements;
//size_t observed_element_num = max_v;
size_t m_index=5;
for (size_t t=0; t<max_v; t++){ // need to fill in v number of observed_elements
const mxArray* m_observed_cards = prhs[m_index];
m_index++;
const mxArray* m_observed_data = prhs[m_index];
m_index++;
m_tensor tmp_m_tensor;
tmp_m_tensor.is_updateable = false; // used with latent tensors only
size_t m_observed_cards_elnum = mxGetNumberOfElements(m_observed_cards);
tmp_m_tensor.cards_char = (char*) malloc( m_observed_cards_elnum + 1 );
for (size_t i=0; i<=m_observed_cards_elnum ; i++)
if ( i == m_observed_cards_elnum )
tmp_m_tensor.cards_char[i] = '\0';
else
tmp_m_tensor.cards_char[i] = (char) mxGetChars(m_observed_cards)[i] ;
if ( mxGetNumberOfElements(m_observed_data) == 0 ){
// tensor init data is not given
tmp_m_tensor.data = NULL;
}else{
// tensor init data is given, save pointer
tmp_m_tensor.data = (double*) mxGetData(m_observed_data);
if (COUT) std::cout << "found factor with init data. Data size " << mxGetNumberOfElements(m_observed_data) << std::endl;
}
observed_elements.push_back(tmp_m_tensor);
}
std::vector<m_tensor> latent_elements;
//size_t latent_element_num = max_v;
for (size_t t=0; t<max_alpha; t++){ // need to fill in alpha number of latent_elements
const mxArray* m_latent_cards = prhs[m_index];
m_index++;
const mxArray* m_latent_data = prhs[m_index];
m_index++;
m_tensor tmp_m_tensor;
tmp_m_tensor.is_updateable = (bool) (((double *)mxGetData(prhs[m_index]))[0]);
m_index++;
size_t m_latent_cards_elnum = mxGetNumberOfElements(m_latent_cards);
tmp_m_tensor.cards_char = (char*) malloc( m_latent_cards_elnum + 1 );
for (size_t i=0; i<=m_latent_cards_elnum ; i++)
if ( i == m_latent_cards_elnum )
tmp_m_tensor.cards_char[i] = '\0';
else
tmp_m_tensor.cards_char[i] = (char) mxGetChars(m_latent_cards)[i] ;
if ( mxGetNumberOfElements(m_latent_data) == 0 ){
// tensor init data is not given
tmp_m_tensor.data = NULL;
}else{
// tensor init data is given, save pointer
tmp_m_tensor.data = (double*) mxGetData(m_latent_data);
if (COUT) std::cout << "found factor with init data. Data size " << mxGetNumberOfElements(m_latent_data) << std::endl;
}
latent_elements.push_back(tmp_m_tensor);
}
// prepare cards_numeric indices of model elements
// input arrives like so:
// A['i','k'], B['k', 'j'], C['i','j'] where V is ['i','k','j'] = [2 3 4]
// here we convert indices to internal format:
// A[2, 3, 0], B[0, 3, 4], C[2, 0, 4]
for (size_t m=0; m<observed_elements.size(); m++){
assign_m_tensor_cards_numeric(&(observed_elements[m]), V_card_sym, V_cards, ndims);
}
for (size_t m=0; m<latent_elements.size(); m++){
assign_m_tensor_cards_numeric(&(latent_elements[m]), V_card_sym, V_cards, ndims);
}
if (COUT) {
print_model_elements_text(&observed_elements, "printing observed model elements");
print_model_elements_text(&latent_elements, "printing latent model elements");
}
// now all tensors have correct internal cardinalities.
// all numeric cardinality arrays (m_tensor.char_numeric) are of same size as V
// -> ndims
// prepare output tensor in matlab //////////////////////////////////////////////////////
std::vector<double*> output_data_ptr;
for (size_t t=0; t<latent_elements.size(); t++){
mwSize argMatDims[ndims];
for (size_t i=0; i<ndims; i++) {
size_t val = latent_elements[t].cards_numeric[i];
if (val == 0) argMatDims[i] = 1; // MATLAB needs to get 1 instead of 0
else argMatDims[i] = val;
}
plhs[t] = mxCreateNumericArray(ndims, argMatDims, mxDOUBLE_CLASS, mxREAL);
output_data_ptr.push_back( (double*) mxGetPr(plhs[t]) );
}
// prepare host memory for tensors ///////////////////////////////////////////////////////
h_full_cardinalities = (size_t*) calloc(ndims, sizeof(size_t)); // defined in mct_tensorop_utils.cuh
///// cards_numeric are alligned according to the V cardinalities ///// above //
for (size_t dim=0; dim<ndims; dim++){ // for each dimension
size_t max_dim_card = 0;
for (size_t t=0; t<observed_elements.size(); t++){ // for each model
for (size_t card=0; card<strlen(observed_elements[t].cards_char); card++){ // for each dimension of the model
if (observed_elements[t].cards_char[card] == V_card_sym[dim]){ // if this dimension character matches current dimension's
size_t tensor_dim_card = observed_elements[t].cards_numeric[dim]; //see above//
if ( max_dim_card < tensor_dim_card )
max_dim_card = tensor_dim_card;
break; // only one dimension of each model can match with current dimension
}
}
}
for (size_t t=0; t<latent_elements.size(); t++){ // for each model
for (size_t card=0; card<strlen(latent_elements[t].cards_char); card++){ // for each dimension of the model
if (latent_elements[t].cards_char[card] == V_card_sym[dim]){ // if this dimension character matches current dimension's
size_t tensor_dim_card = latent_elements[t].cards_numeric[dim]; //see above//
if ( max_dim_card < tensor_dim_card )
max_dim_card = tensor_dim_card;
break; // only one dimension of each model can match with current dimension
}
}
}
h_full_cardinalities[dim] = max_dim_card;
}
if(COUT)
for (int i=0; i<ndims; i++)
std::cout << "h_full_cardinalities " << i << " " << h_full_cardinalities[i] << std::endl;
// initialize random seed for random initialization of objects
//srand((unsigned)time(NULL));
srand(123);
std::vector<ct> X_tensors;
std::vector<ct> A_tensors;
std::vector<ct> hat_X_tensors;
for (size_t el=0; el<observed_elements.size(); el++){
ct tmp_ct_X;
ct tmp_ct_A;
ct tmp_ct_hatX;
size_t X_card[ndims];
for (size_t i=0; i<ndims; i++) X_card[i] = observed_elements[el].cards_numeric[i];
std::stringstream x, hatx, xa;
x << "Host X" << el;
hatx << "Host hatX" << el;
xa << "Host A_X" << el;
prepareHostTensorFromCpp(&tmp_ct_X, observed_elements[el].data, X_card, ndims, x.str().c_str(), true); // init with given data, if null init with rand
prepareHostTensorFromCpp(&tmp_ct_A, NULL, X_card, ndims, xa.str().c_str(), false, true); // rand=false, init_to_one=true -> init with 1
prepareHostTensorFromCpp(&tmp_ct_hatX, NULL, X_card, ndims, hatx.str().c_str(), true);
X_tensors.push_back(tmp_ct_X);
A_tensors.push_back(tmp_ct_A);
hat_X_tensors.push_back(tmp_ct_hatX);
}
std::vector<ct> Z_tensors;
std::vector<ct> Z_update_tensors; // stores each one of sub-update equation results, then all are summed together
std::vector<ct> D_tensors;
for (size_t el=0; el<latent_elements.size(); el++){
ct tmp_ct;
size_t Z_card[ndims];
for (size_t i=0; i<ndims; i++) Z_card[i] = latent_elements[el].cards_numeric[i];
std::stringstream z;
z << "Host Z" << el;
prepareHostTensorFromCpp(&tmp_ct, latent_elements[el].data, Z_card, ndims, z.str().c_str(), true); // init with given data, if null init with rand
Z_tensors.push_back(tmp_ct);
for (size_t v=0; v<max_v; v++){
ct tmp_ct_D1;
ct tmp_ct_D2;
std::stringstream d1;
d1 << "Host D1_Z" << el << "X" << v;
prepareHostTensorFromCpp(&tmp_ct_D1, NULL, Z_card, ndims, d1.str().c_str());
std::stringstream d2;
d2 << "Host D2_Z" << el << "X" << v;
prepareHostTensorFromCpp(&tmp_ct_D2, NULL, Z_card, ndims, d2.str().c_str());
D_tensors.push_back(tmp_ct_D1);
D_tensors.push_back(tmp_ct_D2);
// ct tmp_ct_update;
// std::stringstream z_update;
// z_update << "Host Z_update" << el << "X" << v;
// prepareHostTensorFromCpp(&tmp_ct_update, NULL, Z_card, ndims, z_update.str().c_str(), false, false); // rand=false, init_to_one=false -> init with 0
// Z_update_tensors.push_back(tmp_ct_update);
}
// for summation of division operands
// ct tmp_ct_D1_sum;
// ct tmp_ct_D2_sum;
// std::stringstream d1;
// d1 << "Host D1_Z" << el << "sum";
// prepareHostTensorFromCpp(&tmp_ct_D1_sum, NULL, Z_card, ndims, d1.str().c_str());
// std::stringstream d2;
// d2 << "Host D2_Z" << el << "sum";
// prepareHostTensorFromCpp(&tmp_ct_D2_sum, NULL, Z_card, ndims, d2.str().c_str());
// D_tensors.push_back(tmp_ct_D1_sum);
// D_tensors.push_back(tmp_ct_D2_sum);
}
ct F;
prepareHostTensorFromCpp(&F, NULL, h_full_cardinalities, ndims, "Host F", true, true, false);
///////////////////////////////////////////////////////////////////////////////////////////
// register & transfer objects to device //////////////////////////////////////////////////
size_t k=0;
for (size_t alpha=0; alpha<max_alpha; alpha++){
for (size_t v=0; v<max_v; v++){
std::stringstream d_name1;
d_name1 << "D1_Z" << alpha << "X" << v;
register_ct( d_name1.str().c_str(), &D_tensors[k]);
k++;
std::stringstream d_name2;
d_name2 << "D2_Z" << alpha << "X" << v;
register_ct( d_name2.str().c_str(), &D_tensors[k]);
k++;
// std::stringstream name_update;
// name_update << "Zup" << alpha << "X" << v;
// register_ct( name_update.str().c_str(), &(Z_update_tensors[k]) );
}
// std::stringstream d_name1, d_name2;
// d_name1 << "D1_Z" << alpha << "sum";
// d_name2 << "D2_Z" << alpha << "sum";
// register_ct( d_name2.str().c_str(), &D_tensors[k]);
// k++;
// register_ct( d_name2.str().c_str(), &D_tensors[k]);
// k++;
}
for (size_t z=0; z<Z_tensors.size(); z++){
std::stringstream name;
name << 'Z' << z;
register_ct( name.str().c_str(), &(Z_tensors[z]) );
}
for (size_t x=0; x<X_tensors.size(); x++){
std::stringstream name;
name << "X" << x;
register_ct( name.str().c_str(), &(X_tensors[x]) );
std::stringstream a_name;
a_name << "A" << x;
register_ct( a_name.str().c_str(), &(A_tensors[x]) );
std::stringstream hat_X_name;
hat_X_name << "hatX" << x;
register_ct( hat_X_name.str().c_str(), &(hat_X_tensors[x]) );
}
// 'f','i','k','t','m','n'
ct BC, BC_F, BZ, FT;
size_t* BC_card = (size_t*) calloc(ndims, sizeof(size_t));
// BC(i,t) others 0
BC_card[1] = V_cards[1]; // i
BC_card[3] = V_cards[3]; // t
prepareHostTensorFromCpp(&BC, NULL, BC_card, ndims, "Host BC");
BC_card[2] = V_cards[2]; // k
prepareHostTensorFromCpp(&BC_F, NULL, BC_card, ndims, "Host BC_F");
size_t* BZ_card = (size_t*) calloc(ndims, sizeof(size_t));
// BZ(i,k) others 0
BZ_card[1] = V_cards[1]; // i
BZ_card[2] = V_cards[2]; // k
prepareHostTensorFromCpp(&BZ, NULL, BZ_card, ndims, "Host BZ");
size_t* FT_card = (size_t*) calloc(ndims, sizeof(size_t));
// FT(i,n) others 0
FT_card[1] = V_cards[1]; // i
FT_card[5] = V_cards[5]; // n
prepareHostTensorFromCpp(&FT, NULL, FT_card, ndims, "Host FT");
ct X0_ones, X0_tmp1, X0_tmp2;
size_t X0_cards[ndims];
for (size_t i=0; i<ndims; i++) X0_cards[i] = observed_elements[0].cards_numeric[i];
prepareHostTensorFromCpp(&X0_ones, NULL, X0_cards, ndims, "Host X0_ones", false, true);
prepareHostTensorFromCpp(&X0_tmp1, NULL, X0_cards, ndims, "Host X0_tmp1", false, true);
prepareHostTensorFromCpp(&X0_tmp2, NULL, X0_cards, ndims, "Host X0_tmp2", false, true);
ct X1_ones, X1_tmp1, X1_tmp2;
size_t X1_cards[ndims];
for (size_t i=0; i<ndims; i++) X1_cards[i] = observed_elements[1].cards_numeric[i];
prepareHostTensorFromCpp(&X1_ones, NULL, X1_cards, ndims, "Host X1_ones", false, true);
prepareHostTensorFromCpp(&X1_tmp1, NULL, X1_cards, ndims, "Host X1_tmp1", false, true);
prepareHostTensorFromCpp(&X1_tmp2, NULL, X1_cards, ndims, "Host X1_tmp2", false, true);
ct X2_ones, X2_tmp1, X2_tmp2;
size_t X2_cards[ndims];
for (size_t i=0; i<ndims; i++) X2_cards[i] = observed_elements[2].cards_numeric[i];
prepareHostTensorFromCpp(&X2_ones, NULL, X2_cards, ndims, "Host X2_ones", false, true);
prepareHostTensorFromCpp(&X2_tmp1, NULL, X2_cards, ndims, "Host X2_tmp1", false, true);
prepareHostTensorFromCpp(&X2_tmp2, NULL, X2_cards, ndims, "Host X2_tmp2", false, true);
ct fit;
size_t* fit_card = (size_t*) calloc(ndims, sizeof(size_t));
fit_card[0] = V_cards[0]; //f
fit_card[1] = V_cards[1]; //i
fit_card[3] = V_cards[3]; //t
prepareHostTensorFromCpp(&fit, NULL, fit_card, ndims, "Host fit", false, false, false);
ct fin;
size_t* fin_card = (size_t*) calloc(ndims, sizeof(size_t));
fin_card[0] = V_cards[0]; //f
fin_card[1] = V_cards[1]; //i
fin_card[5] = V_cards[5]; //n
prepareHostTensorFromCpp(&fin, NULL, fin_card, ndims, "Host fin", false, false, false);
REGISTER_CT(F);
REGISTER_CT(fit);
REGISTER_CT(fin);
REGISTER_CT(BC); REGISTER_CT(BC_F); REGISTER_CT(BZ); REGISTER_CT(FT);
REGISTER_CT(X0_ones); REGISTER_CT(X1_ones); REGISTER_CT(X2_ones);
REGISTER_CT(X0_tmp1); REGISTER_CT(X1_tmp1); REGISTER_CT(X2_tmp1);
REGISTER_CT(X0_tmp2); REGISTER_CT(X1_tmp2); REGISTER_CT(X2_tmp2);
if (CUPRINTF == true)
cudaPrintfInit();
//std::cout << " selam 1 " << std::endl;
size_t cur_mem;
if (is_parallel)
cur_mem = transferToDevice(ndims);
if( COUT ) std::cout << "transferToDevice " << cur_mem << " bytes " << std::endl;
///////////////////////////////////////////////////////////////////////////////////////////
// perform GCTF operation //////////////////////////////////////////////////////////////////
// std::vector<std::string> sops_1;
// operands ops_1;
// sops_1.push_back("BZ");
// sops_1.push_back("Z3");
// cur_mem = gen_operation_arguments( sops_1, &ops_1, cur_mem );
std::vector<std::string> sops_2;
operands ops_2;
sops_2.push_back("Z0");
sops_2.push_back("BC");
cur_mem = gen_operation_arguments( sops_2, &ops_2, cur_mem );
std::vector<std::string> sops_3;
operands ops_3;
sops_3.push_back("X0_tmp1");
sops_3.push_back("BC");
cur_mem = gen_operation_arguments( sops_3, &ops_3, cur_mem );
std::vector<std::string> sops_4;
operands ops_4;
sops_4.push_back("X0_tmp2");
sops_4.push_back("BC");
cur_mem = gen_operation_arguments( sops_4, &ops_4, cur_mem );
std::vector<std::string> sops_5;
operands ops_5;
sops_5.push_back("Z0");
sops_5.push_back("FT");
cur_mem = gen_operation_arguments( sops_5, &ops_5, cur_mem );
std::vector<std::string> sops_6;
operands ops_6;
sops_6.push_back("X2_tmp1");
sops_6.push_back("FT");
cur_mem = gen_operation_arguments( sops_6, &ops_6, cur_mem );
std::vector<std::string> sops_7;
operands ops_7;
sops_7.push_back("hatX2");
sops_7.push_back("FT");
int to_power_7[2];
to_power_7[0] = 1-p;
to_power_7[1] = 1;
cur_mem = gen_operation_arguments( sops_7, &ops_7, cur_mem, to_power_7 );
for (int iter=0; iter<op_iter_count; iter++){
std::cout << "iter " << iter << std::endl;
// D -> Z0
// B -> Z1
// Z -> Z2
// C -> Z3
// G -> Z4
// Y -> Z5
// F -> Z6
// T -> Z7
// B .* Z -> BZ
//std::cout << " NUM_BLOCKS " << NUM_BLOCKS << " THREADS_FOR_BLOCK " << THREADS_FOR_BLOCK << std::endl;
hadamard_mul<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["Z1"],
get_d_obj_data()["Z2"],
get_d_obj_data()["BZ"],
h_objs["BZ"]->element_number,
CUPRINTF);
// BZ(i,k) * C(k,t) -> BC(i,k,t)
call_genFullResult(ndims, "BZ", "Z3", "BC_F");
call_contractFintoC(ndims, "BC_F", "BC");
// X1hat(f,t) = D(f,i) * BC(i,t);
// full -> f,i,t
// call_genFullResult(ndims, "Z0", "BC", "fit");
// call_contractFintoC(ndims, "fit", "hatX0");
call_calculate_C_mops(ndims, 2, &ops_2, "hatX0", CUPRINTF);
//arg_D_n_1 = M1.* X1 .* (X1hat.^(-p));
hadamard_mul<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["X0"],
get_d_obj_data()["hatX0"],
get_d_obj_data()["X0_tmp1"],
h_objs["X0"]->element_number,
CUPRINTF, 1, -p);
//arg_D_d_1 = M1.* (X1hat.^(1-p));
hadamard_mul<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["X0_ones"],
get_d_obj_data()["hatX0"],
get_d_obj_data()["X0_tmp2"],
h_objs["X0"]->element_number,
CUPRINTF, 1, 1-p);
//deltaD_n_1(f,t) = arg_D_n_1(f,t) * (BC(i,t))'
call_calculate_C_mops(ndims, 2, &ops_3, "D1_Z0X0", CUPRINTF);
//deltaD_d_1 = arg_D_d_1 * (BC)';
call_calculate_C_mops(ndims, 2, &ops_4, "D2_Z0X0", CUPRINTF);
// skip mask
//Compute X3hat
// FT = F.*T;
hadamard_mul<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["Z6"],
get_d_obj_data()["Z7"],
get_d_obj_data()["FT"],
h_objs["Z6"]->element_number,
CUPRINTF);
// X3hat(f,n) = D(f,i) * FT(i,n);
call_calculate_C_mops(ndims, 2, &ops_5, "hatX2", CUPRINTF);
//deltaD_n_2 = arg_D_n_2 * (FT)';
call_calculate_C_mops(ndims, 2, &ops_6, "D1_Z0X2", CUPRINTF);
//deltaD_d_2 = arg_D_d_2 * (FT)';
call_calculate_C_mops(ndims, 2, &ops_7, "D2_Z0X2", CUPRINTF, ops_7.d_to_power);
//D = D.* ( (deltaD_n_1 + deltaD_n_2 ) ./ (deltaD_d_1 + deltaD_d_2 ));
hadamard_sum<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["D1_Z0X0"],
get_d_obj_data()["D1_Z0X2"],
get_d_obj_data()["D1_Z0X0"],
h_objs["D1_Z0X0"]->element_number,
CUPRINTF);
hadamard_sum<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["D2_Z0X0"],
get_d_obj_data()["D2_Z0X2"],
get_d_obj_data()["D2_Z0X0"],
h_objs["D2_Z0X0"]->element_number,
CUPRINTF);
hadamard_div<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["D1_Z0X0"],
get_d_obj_data()["D2_Z0X0"],
get_d_obj_data()["D1_Z0X0"],
h_objs["D1_Z0X0"]->element_number,
CUPRINTF);
hadamard_mul<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["Z0"],
get_d_obj_data()["D1_Z0X0"],
get_d_obj_data()["Z0"],
h_objs["Z0"]->element_number,
CUPRINTF);
// // update D
// // compute x1hat
// // B.*Z -> BZ
// hadamard_mul<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["Z1"],
// get_d_obj_data()["Z2"],
// get_d_obj_data()["BZ"],
// h_objs["BZ"]->element_number,
// CUPRINTF);
// // BZ(i,k)*C(k,t) -> BC(i,k,t)
// calculate_C_mops<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>((size_t) 3,
// (size_t) (sops_1.size()),
// (size_t**) (ops_1.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()["BC"]),
// (size_t*) (get_d_obj_cards()["ikt"]),
// (size_t**) (ops_1.d_cards_operand_pointers),
// (double**) (ops_1.d_operand_pointers),
// (double*) (get_d_obj_data()["BC"]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs["BC"]->element_number),
// (size_t) 1,
// CUPRINTF,1);
// break;
// // X1hat = D*BC;
// calculate_C_mops<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>((size_t) ndims,
// (size_t) (sops_2.size()),
// (size_t**) (ops_2.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()["hatX0"]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_2.d_cards_operand_pointers),
// (double**) (ops_2.d_operand_pointers),
// (double*) (get_d_obj_data()["hatX0"]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs["hatX0"]->element_number),
// (size_t) 1,
// CUPRINTF,1);
// //arg_D_n_1 = M1.* X1 .* (X1hat.^(-p));
// hadamard_mul<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["X0"],
// get_d_obj_data()["hatX0"],
// get_d_obj_data()["X0_tmp1"],
// h_objs["X0"]->element_number,
// CUPRINTF, 1, -p);
// //arg_D_d_1 = M1.* (X1hat.^(1-p));
// hadamard_mul<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["X0_ones"],
// get_d_obj_data()["hatX0"],
// get_d_obj_data()["X0_tmp2"],
// h_objs["X0"]->element_number,
// CUPRINTF, 1, 1-p);
// // deltaD_n_1 = arg_D_n_1 * (BC)';
// calculate_C_mops<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>((size_t) ndims,
// (size_t) (sops_3.size()),
// (size_t**) (ops_3.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()["D1_Z0X0"]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_3.d_cards_operand_pointers),
// (double**) (ops_3.d_operand_pointers),
// (double*) (get_d_obj_data()["D1_Z0X0"]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs["D1_Z0X0"]->element_number),
// (size_t) 1,
// CUPRINTF,1);
// //deltaD_d_1 = arg_D_d_1 * (BC)';
// calculate_C_mops<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>((size_t) ndims,
// (size_t) (sops_4.size()),
// (size_t**) (ops_4.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()["D2_Z0X0"]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_4.d_cards_operand_pointers),
// (double**) (ops_4.d_operand_pointers),
// (double*) (get_d_obj_data()["D2_Z0X0"]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs["D2_Z0X0"]->element_number),
// (size_t) 1,
// CUPRINTF,1);
// //Compute X3hat
// // FT = F.*T;
// hadamard_mul<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["Z6"],
// get_d_obj_data()["Z7"],
// get_d_obj_data()["FT"],
// h_objs["Z6"]->element_number,
// CUPRINTF);
// // X3hat = D*FT;
// calculate_C_mops<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>((size_t) ndims,
// (size_t) (sops_5.size()),
// (size_t**) (ops_5.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()["hatX2"]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_5.d_cards_operand_pointers),
// (double**) (ops_5.d_operand_pointers),
// (double*) (get_d_obj_data()["hatX2"]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs["hatX2"]->element_number),
// (size_t) 1,
// CUPRINTF,1);
// // arg_D_n_2 = X3 .* (X3hat.^(-p));
// hadamard_mul<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["X2"],
// get_d_obj_data()["hatX2"],
// get_d_obj_data()["X2_tmp0"],
// h_objs["X2"]->element_number,
// CUPRINTF, 1, -p);
// // arg_D_d_2 = X3hat.^(1-p);
// // skip
// // hadamard_mul<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["X2"],
// // get_d_obj_data()["hatX2"],
// // get_d_obj_data()["D2_Z0X2"],
// // h_objs["X2"]->element_number,
// // CUPRINTF, 1, 1-p);
// //deltaD_n_2 = arg_D_n_2 * (FT)';
// call_calculate_C_mops(ndims, 2, &ops_6, "D1_Z0X2", CUPRINTF);
// //deltaD_d_2 = arg_D_d_2 * (FT)';
// call_calculate_C_mops(ndims, 2, &ops_7, "D2_Z0X2", CUPRINTF);
// //D = D.* ( (deltaD_n_1 + deltaD_n_2 ) ./ (deltaD_d_1 + deltaD_d_2 ));
// hadamard_sum<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["D1_Z0X0"],
// get_d_obj_data()["D1_Z0X2"],
// get_d_obj_data()["D1_Z0X0"],
// h_objs["D1_Z0X0"]->element_number,
// CUPRINTF);
// hadamard_sum<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["D2_Z0X0"],
// get_d_obj_data()["D2_Z0X2"],
// get_d_obj_data()["D2_Z0X0"],
// h_objs["D2_Z0X0"]->element_number,
// CUPRINTF);
// hadamard_div<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["D1_Z0X0"],
// get_d_obj_data()["D2_Z0X0"],
// get_d_obj_data()["D1_Z0X0"],
// h_objs["D1_Z0X0"]->element_number,
// CUPRINTF);
// hadamard_mul<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()["Z0"],
// get_d_obj_data()["D1_Z0X0"],
// get_d_obj_data()["Z0"],
// h_objs["Z0"]->element_number,
// CUPRINTF);
// break;
// // calculate all hatX_v and A_v
// for (size_t alpha=0; alpha<max_alpha; alpha++){
// if ( latent_elements[alpha].is_updateable == false) continue;
// // update all hatX
// for ( size_t cur_v=0; cur_v<max_v; cur_v++){
// std::stringstream hat_Xv;
// hat_Xv << "hatX" << cur_v;
// operands ops_Z0_ZN_Xhat;
// std::vector<std::string> z_tensors_str;
// for (size_t tmp_alpha=0; tmp_alpha<max_alpha; tmp_alpha++){
// if ( R[cur_v + tmp_alpha*max_v] == false ) continue;
// std::stringstream name;
// name << 'Z' << tmp_alpha;
// z_tensors_str.push_back(name.str());
// }
// std::cout << "operand num z_tensors_str.size() " << z_tensors_str.size() << std::endl;
// for( size_t i=0; i<z_tensors_str.size(); i++){
// std::cout << "z_tensors_str[" << i << "] = " << z_tensors_str[i] << std::endl;
// }
// cur_mem = gen_operation_arguments(z_tensors_str, &ops_Z0_ZN_Xhat, cur_mem);
// // Z0 * Z1 * ... * ZN -> Xhat
// //std::cout << "Z0 * Z1 * ... * ZN -> " << hat_Xv.str() << std::endl;
// calculate_C_mops<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>((size_t) ndims,
// (size_t) (z_tensors_str.size()),
// (size_t**) (ops_Z0_ZN_Xhat.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()[hat_Xv.str()]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_Z0_ZN_Xhat.d_cards_operand_pointers),
// (double**) (ops_Z0_ZN_Xhat.d_operand_pointers),
// (double*) (get_d_obj_data()[hat_Xv.str()]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs[hat_Xv.str()]->element_number),
// (size_t) 1,
// CUPRINTF,1);
// ////std::cout << " z0 * z1 * .. * zn -> " << hat_Xv << " done " << std::endl;
// std::stringstream Xv;
// Xv << 'X' << cur_v ;
// std::stringstream Av;
// Av << 'A' << cur_v;
// hadamard_div<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()[hat_Xv.str().c_str()],
// get_d_obj_data()[Xv.str().c_str()],
// get_d_obj_data()[Av.str().c_str()],
// h_objs[Av.str().c_str()]->element_number,
// CUPRINTF,
// p, 1);
// }
// return;
// // for each Xv
// for (size_t cur_v=0; cur_v<max_v; cur_v++){
// if ( R[cur_v + alpha*max_v] == false ) continue; // if this Xv does not have this Zalpha dothing to do
// std::stringstream hat_Xv;
// hat_Xv << "hatX" << cur_v;
// // calculate D1_Zalpha_Xv
// std::stringstream d1;
// d1 << "D1_Z" << alpha << "X" << cur_v;
// // calculate D2_Zalpha_Xv
// std::stringstream d2;
// d2 << "D2_Z" << alpha << "X" << cur_v;
// std::stringstream Av;
// Av << 'A' << cur_v;
// operands ops_A;
// operands ops_M;
// std::vector<std::string> tmp_A;
// std::vector<std::string> tmp_M;
// tmp_A.push_back(Av.str());
// tmp_M.push_back(hat_Xv.str());
// for (size_t other_z=0; other_z < max_alpha; other_z++){
// //std::cout << " process alpha " << alpha << " other_z " << other_z << std::endl;
// if (other_z == alpha || R[cur_v + other_z*max_v] == false ) continue;
// std::stringstream other_z_name;
// other_z_name << "Z" << other_z;
// tmp_A.push_back(other_z_name.str());
// tmp_M.push_back(other_z_name.str());
// //std::cout << "pushing to tmp_A and tmp_M: " << other_z_name.str() << std::endl;
// }
// //std::cout << "operand num tmp_A.size() " << tmp_A.size() << std::endl;
// for( size_t i=0; i<tmp_A.size(); i++){
// }
// cur_mem = gen_operation_arguments(tmp_A, &ops_A, cur_mem);
// //oc_push_back(&operation_chain, GMULT, ndims, Av.str().c_str(), other_z_name.str().c_str(), d1.str().c_str(), is_parallel);
// calculate_C_mops<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>((size_t) ndims,
// (size_t) (tmp_A.size()),
// (size_t**) (ops_A.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()[d1.str().c_str()]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_A.d_cards_operand_pointers),
// (double**) (ops_A.d_operand_pointers),
// (double*) (get_d_obj_data()[d1.str().c_str()]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs[d1.str()]->element_number),
// (size_t) 1,
// CUPRINTF,2);
// //oc_push_back(&operation_chain, GMULT, ndims, hat_Xv.str().c_str(), other_z_name.str().c_str(), d2.str().c_str(), is_parallel, "F", p+1, 1);
// int to_power[tmp_M.size()];
// to_power[0]=p+1;
// for (size_t i=0; i<tmp_M.size(); i++){
// to_power[i] = 0;
// }
// cur_mem = gen_operation_arguments(tmp_M, &ops_M, cur_mem, to_power);
// calculate_C_mops<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>((size_t) ndims,
// (size_t) (tmp_M.size()),
// (size_t**) (ops_M.d_strides_operand_pointers),
// (size_t*) (get_d_obj_strides()[d2.str().c_str()]),
// (size_t*) (get_d_obj_cards()["F"]),
// (size_t**) (ops_M.d_cards_operand_pointers),
// (double**) (ops_M.d_operand_pointers),
// (double*) (get_d_obj_data()[d2.str().c_str()]),
// //(double*) (get_d_obj_data()["Z0"]),
// (size_t) (h_objs[d2.str()]->element_number),
// (size_t) 1,
// CUPRINTF,3,
// ops_M.d_to_power
// );
// }
// // sum D1_Zalpha_Xv and D2_Zalpha_Xv for all v to update Zalpha
// std::stringstream D1_Zalpha_sum, D2_Zalpha_sum; // will sum into these
// bool first = true;
// for (size_t v=0; v<max_v; v++){
// if ( R[v + alpha*max_v] ){
// if ( first ){
// D1_Zalpha_sum << "D1_Z" << alpha << "X" << v;
// D2_Zalpha_sum << "D2_Z" << alpha << "X" << v;
// first = false;
// }else{
// std::stringstream other_d1, other_d2;
// other_d1 << "D1_Z" << alpha << "X" << v;
// other_d2 << "D2_Z" << alpha << "X" << v;
// hadamard_sum<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>(get_d_obj_data()[D1_Zalpha_sum.str().c_str()],
// get_d_obj_data()[other_d1.str().c_str()],
// get_d_obj_data()[D1_Zalpha_sum.str().c_str()],
// h_objs[D1_Zalpha_sum.str().c_str()]->element_number,
// CUPRINTF);
// hadamard_sum<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>(get_d_obj_data()[D2_Zalpha_sum.str().c_str()],
// get_d_obj_data()[other_d2.str().c_str()],
// get_d_obj_data()[D2_Zalpha_sum.str().c_str()],
// h_objs[D2_Zalpha_sum.str().c_str()]->element_number,
// CUPRINTF);
// }
// }
// }
// hadamard_div<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()[D1_Zalpha_sum.str().c_str()],
// get_d_obj_data()[D2_Zalpha_sum.str().c_str()],
// get_d_obj_data()[D1_Zalpha_sum.str().c_str()],
// h_objs[D1_Zalpha_sum.str().c_str()]->element_number,
// CUPRINTF);
// std::stringstream Zalpha;
// Zalpha << 'Z' << alpha ;
// hadamard_mul<<<NUM_BLOCKS, THREADS_FOR_BLOCK>>>( get_d_obj_data()[Zalpha.str().c_str()],
// get_d_obj_data()[D1_Zalpha_sum.str().c_str()],
// get_d_obj_data()[Zalpha.str().c_str()],
// h_objs[Zalpha.str().c_str()]->element_number,
// CUPRINTF);
//}
}
// check if kernel execution generated and error
cutilCheckMsg("Kernel execution failed");
cudaDeviceSynchronize();
if ( CUPRINTF == true ){
cudaPrintfDisplay(stdout, true);
cudaPrintfEnd();
}
///////////////////////////////////////////////////////////////////////////////////////////
// transfer results to matlab /////////////////////////////////////////////////////////////
if ( is_parallel ){
for (size_t z=0; z<latent_elements.size(); z++){
std::stringstream Zn;
Zn << 'Z' << z;
transferFromDevice(output_data_ptr[z], Zn.str().c_str());
}
//transferFromDevice(m_Z1, "Z1");
//transferFromDevice(m_Z2, "Z2");
}else{
for (size_t z=0; z<latent_elements.size(); z++){
memcpy(output_data_ptr[z], Z_tensors[z].data, Z_tensors[z].mem_size);
}
//memcpy(m_Z1, Z1.data, Z1.mem_size);
//memcpy(m_Z2, Z2.data, Z2.mem_size);
}
///////////////////////////////////////////////////////////////////////////////////////////
// reset device
if ( is_parallel )
resetDevice();
cudaThreadExit();
}
|
4fda9d85b0f3f70a9841b4e5ac68605b4a9e7364.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/image_resize.h>
#include <cuda_exception.h>
namespace nd4j {
namespace ops {
namespace helpers {
struct BilinearInterpolationData {
Nd4jLong bottomIndex; // Lower source index used in the interpolation
Nd4jLong topIndex; // Upper source index used in the interpolation
// 1-D linear iterpolation scale (see:
// https://en.wikipedia.org/wiki/Bilinear_interpolation)
double interpolarValue;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// computeInterpolationWeights kernel
// outSize - output length
// inSize - input size
// scale - input scale
// interporationData - result
//
static __global__ void computeInterpolationWeights(Nd4jLong outSize,
Nd4jLong inSize,
double scale,
Nd4jLong channels,
BilinearInterpolationData* interpolationData) {
interpolationData[outSize].bottomIndex = 0;
interpolationData[outSize].topIndex = 0;
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (Nd4jLong i = outSize - tid; i >= 0; i -= step) {
double in = i * scale;
interpolationData[i].bottomIndex = static_cast<Nd4jLong>(in);
interpolationData[i].topIndex = nd4j::math::nd4j_min(interpolationData[i].bottomIndex + 1, inSize - 1);
interpolationData[i].interpolarValue = in - interpolationData[i].bottomIndex;
if (channels) {
math::atomics::nd4j_atomicMul(&interpolationData[i].bottomIndex, channels);
math::atomics::nd4j_atomicMul(&interpolationData[i].topIndex, channels);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm
//
static void resizeImage(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm kernel
//
template <typename T>
static __global__ void resizeImageKernel(T const* input, Nd4jLong const* inputShape, T* outputYptr, Nd4jLong* outputShape, Nd4jLong batchSize,
Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, Nd4jLong inRowSize, Nd4jLong outRowSize, Nd4jLong inBatchNumValues,
BilinearInterpolationData* xs_, BilinearInterpolationData* ys_) {
for (auto batch = blockIdx.x; batch < batchSize; batch += gridDim.x ) { // blockIdx.x as batch index
auto pX = input + batch * inBatchNumValues;
for (Nd4jLong y = threadIdx.x; y < outHeight; y += blockDim.x) {
const T *ys_input_lower_ptr = pX + ys_[y].bottomIndex * inRowSize;
const T *ys_input_upper_ptr = pX + ys_[y].topIndex * inRowSize;
double yVal = ys_[y].interpolarValue;
auto pZ = outputYptr + (batch * outHeight + y) * outRowSize;
for (Nd4jLong x = threadIdx.y; x < outWidth; x += blockDim.y) {
auto xsBottom = xs_[x].bottomIndex;
auto xsTop = xs_[x].topIndex;
auto xVal = xs_[x].interpolarValue;
// process interpolation for all channels
for (int c = threadIdx.z; c < channels; c += blockDim.z) {
double topLeft(ys_input_lower_ptr[xsBottom + c]);
double topRight(ys_input_lower_ptr[xsTop + c]);
double bottomLeft(ys_input_upper_ptr[xsBottom + c]);
double bottomRight(ys_input_upper_ptr[xsTop + c]);
double top = topLeft + (topRight - topLeft) * xVal;
double bottom = bottomLeft + (bottomRight - bottomLeft) * xVal;
pZ[x * channels + c] = T(top + (bottom - top) * yVal);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with
template <typename T>
static void resizeImage_(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output) {
Nd4jLong inRowSize = inWidth * channels;
Nd4jLong inBatchNumValues = inHeight * inRowSize;
Nd4jLong outRowSize = outWidth * channels;
auto stream = context->getCudaStream();
T const *input_b_ptr = reinterpret_cast<T const *>(images->getSpecialBuffer()); // this works only with 'c' direction
T *output_y_ptr = reinterpret_cast<T *>(output->specialBuffer());
dim3 batchSizeBlock(batchSize, 1, 1);
dim3 pictureBlock(outHeight, outWidth, channels);
hipLaunchKernelGGL(( resizeImageKernel<T>), dim3(256), dim3(pictureBlock), 256, *stream, input_b_ptr, images->getSpecialShapeInfo(), output_y_ptr, output->specialShapeInfo(), batchSize,
outWidth, outHeight, channels, inRowSize, outRowSize, inBatchNumValues, xs_, ys_);
auto err = hipStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeImage_: Cannot synchronize kernel execution", err);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static int resizeBilinearFunctor_(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
// Special case for TF compatibility
if((center && inHeight < 2) || (center && inWidth < 2)){
center = false;
}
if ((center && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (center && outHeight < 2) ||
(center && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// wrong input data
nd4j_printf("image.resize_bilinear: Wrong input or output size to resize\n", "");
return ND4J_STATUS_BAD_ARGUMENTS;
}
float heightScale = center ? (inHeight - 1.f) / double(outHeight - 1.f) : (inHeight / float(outHeight));
float widthScale = center ? (inWidth - 1.f) / double(outWidth - 1.f) : (inWidth / float(outWidth));
BilinearInterpolationData* xs_;// = xs.data();
BilinearInterpolationData* ys_;// = xs.data();
hipError_t err = hipMalloc(&xs_, sizeof(BilinearInterpolationData) * (outWidth + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for vertical parts rectangulars", err);
}
err = hipMalloc(&ys_, sizeof(BilinearInterpolationData) * (outHeight + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for horizontal parts rectangulars", err);
}
auto stream = context->getCudaStream();
// Compute the cached interpolation weights on the x and y dimensions.
hipLaunchKernelGGL(( computeInterpolationWeights), dim3(256), dim3(512), 512, *stream, outHeight, inHeight, heightScale, 0, ys_);
hipLaunchKernelGGL(( computeInterpolationWeights), dim3(256), dim3(512), 512, *stream, outWidth, inWidth, widthScale, channels, xs_);
NDArray::prepareSpecialUse({output}, {images});
resizeImage(context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output);
NDArray::registerSpecialUse({output}, {images});
err = hipFree(xs_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for vertical parts rectangulars", err);
}
err = hipFree(ys_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for horizontical parts rectangulars", err);
}
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize by interpolation nearest neighbor algorithm kernel
//
template <typename T>
static __global__ void resizeNeighborKernel(T const* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape,
Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool center) {
//for (int b = blockIdx.x; b < batchSize; b += gridDim.x)
if (blockIdx.x < batchSize)
{
auto b = blockIdx.x;
for (int y = threadIdx.x; y < outHeight; y += blockDim.x) {
Nd4jLong inY = nd4j::math::nd4j_min(
(center) ? static_cast<Nd4jLong>(nd4j::math::p_round<float>(y * heightScale)) : static_cast<Nd4jLong>(nd4j::math::p_floor<float>(
y * heightScale)), inHeight - 1);
for (int x = threadIdx.y; x < outWidth; x += blockDim.y) {
Nd4jLong inX = nd4j::math::nd4j_min(
(center) ? static_cast<Nd4jLong>(nd4j::math::p_round<float>(x * widthScale)) : static_cast<Nd4jLong>(nd4j::math::p_floor<float>(
x * widthScale)), inWidth - 1);
auto start = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong e = start; e < channels; e += step) {
Nd4jLong posX[] = {b, inY, inX, e};
Nd4jLong posZ[] = {b, y, x, e};
auto xIndex = shape::getOffset(inputShape, posX);
auto zIndex = shape::getOffset(outputShape, posZ);
output[zIndex] = input[xIndex];
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeNeighborFunctor - main algorithm by nearest neighbor
//
template <typename T>
int resizeNeighborFunctor_(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
if ((center && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (center && outHeight < 2) ||
(center && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// wrong input data
nd4j_printf("image.resize_nearest_neighbor: Wrong input or output size to resize\n", "");
return ND4J_STATUS_BAD_ARGUMENTS;
}
double heightScale = center ? (inHeight - 1.) / double(outHeight - 1.0) : (inHeight / double(outHeight));
double widthScale = center ? (inWidth - 1.) / double(outWidth - 1.0) : (inWidth / double(outWidth));
auto imagesBuffer = reinterpret_cast<T const*>(images->getSpecialBuffer());
auto outputBuffer = reinterpret_cast<T*>(output->specialBuffer());
auto stream = context->getCudaStream();
//T const* input, Nd4jLong const* inputShape, T* output, Nd4jLong* outputShape,
// Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool center
//input, inputShape, output, outputShape,
// batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, center
NDArray::prepareSpecialUse({output}, {images});
hipLaunchKernelGGL(( resizeNeighborKernel<T>), dim3(batchSize), dim3(outHeight * outWidth), 512, *stream, imagesBuffer, images->getSpecialShapeInfo(), outputBuffer, output->specialShapeInfo(),
batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, center);
NDArray::registerSpecialUse({output}, {images});
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeImage - resize bilinear algorithm caller
//
void resizeImage(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight,
Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth, Nd4jLong channels, BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), resizeImage_, (context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void resizeImage_,(nd4j::LaunchContext* context, NDArray const* images,
Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth,
Nd4jLong channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeBilinearFunctor(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeBilinearFunctor_, (context, images, width, height, center, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBilinearFunctor_, (nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeNeighborFunctor(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeNeighborFunctor_, (context, images, width, height, center, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeNeighborFunctor_, (nd4j::LaunchContext* context, NDArray const* images,
int width, int height, bool center, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Bicubic interpolation
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Utility functions and classes
// calculateResizeScale determines the float scaling factor.
inline float calculateResizeScale(Nd4jLong inSize, Nd4jLong outSize,
bool alignCorners) {
return (alignCorners && outSize > 1)
? (inSize - 1) / static_cast<float>(outSize - 1)
: inSize / static_cast<float>(outSize);
}
struct ImageResizerState {
explicit ImageResizerState(bool alignCorners, bool halfPixelCenters)
: _alignCorners(alignCorners),
_halfPixelCenters(halfPixelCenters) {}
// ValidateAndCalculateOutputSize checks the bounds on the input tensors
// and requested size, sets up some of the resizing state such as the
// heightScale and widthScale, and calculates the output size.
// If any of these operations fails, it sets an error status in
// the context, which the caller must check.
int validateAndCalculateOutputSize(NDArray const* input, int const width, int const height) {
//
batchSize = input->sizeAt(0);//.dim_size(0);
outHeight = height;
outWidth = width; //internal::SubtleMustCopy(Svec(1));
inHeight = static_cast<int32_t>(input->sizeAt(1));
inWidth = static_cast<int32_t>(input->sizeAt(2));
channels = input->sizeAt(3); //.dim_size(3);
heightScale = calculateResizeScale(inHeight, outHeight, _alignCorners);
widthScale = calculateResizeScale(inWidth, outWidth, _alignCorners);
// Guard against overflows
if (ceilf((outHeight - 1) * heightScale) > static_cast<float>(DataTypeUtils::max<int>())) {
nd4j_printf("resize_bicubic: Upper overflow occurs for resize height (%f)\n", ceilf((outHeight - 1) * heightScale));
return Status::CODE(ND4J_STATUS_BAD_INPUT, "resize_bicubic: Upper overflow occurs for resize height");
}
if (ceilf((outWidth - 1) * heightScale) > static_cast<float>(DataTypeUtils::max<int>())) {
nd4j_printf("resize_bicubic: Upper overflow occurs for resize height (%f)\n", ceilf((outHeight - 1) * heightScale));
return Status::CODE(ND4J_STATUS_BAD_INPUT, "resize_bicubic: Upper overflow occurs for resize width");
}
return Status::OK();
}
// Calculates all the required variables, and allocates the output.
int validateAndCreateOutput(NDArray const* input, int const width, int const height) {
return validateAndCalculateOutputSize(input, width, height);
}
Nd4jLong batchSize;
Nd4jLong outHeight;
Nd4jLong outWidth;
Nd4jLong inHeight;
Nd4jLong inWidth;
Nd4jLong channels;
float heightScale;
float widthScale;
NDArray* output = nullptr;
hipStream_t* stream;
private:
bool _alignCorners;
bool _halfPixelCenters;
};
// Half pixel scaler scales assuming that the pixel centers are at 0.5, i.e. the
// floating point coordinates of the top,left pixel is 0.5,0.5.
struct HalfPixelScaler {
_CUDA_HD HalfPixelScaler(){};
inline _CUDA_HD float operator()(const int x, const float scale) const {
// Note that we subtract 0.5 from the return value, as the existing bilinear
// sampling code etc assumes pixels are in the old coordinate system.
return (static_cast<float>(x) + 0.5f) * scale - 0.5f;
}
};
struct WeightsAndIndices {
float _weight0;
float _weight1;
float _weight2;
float _weight3;
Nd4jLong _index0;
Nd4jLong _index1;
Nd4jLong _index2;
Nd4jLong _index3;
int _advance; // advance value.
};
class CachedInterpolationCalculator {
public:
_CUDA_HD CachedInterpolationCalculator() : _indexes{-1, -1, -1, -1} {}
// Advances iteration. Returns the number of values that should be copied from
// the current point to the next point. The copying should always be done by
// copying the last <retval> values from the old point to the first <retval>
// values of the new point.
inline _CUDA_HD int Advance(const Nd4jLong x0, const Nd4jLong x1, const Nd4jLong x2,
const Nd4jLong x3) {
// We use 2 hands and walk through, copying from one to another where
// we already have values.
// Invariant, new_indicies_hand <= cached_values_hand
const Nd4jLong new_x_indices[4] = {x0, x1, x2, x3};
int cachedValuesHand = 0;
int newIndiciesHand = 0;
while (cachedValuesHand < 4) {
if (_indexes[cachedValuesHand] == new_x_indices[newIndiciesHand]) {
if (newIndiciesHand < cachedValuesHand) {
_indexes[newIndiciesHand] = _indexes[cachedValuesHand];
}
newIndiciesHand++;
}
cachedValuesHand++;
}
switch (newIndiciesHand) {
case 0:
_indexes[0] = x0;
case 1:
_indexes[1] = x1;
case 2:
_indexes[2] = x2;
case 3:
_indexes[3] = x3;
break;
}
return newIndiciesHand;
}
private:
Nd4jLong _indexes[4];
};
static __global__ void initCoefTableKernel(const double a, float* table, Nd4jLong tableSize) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i <= tableSize; i += step) {
float x = i * 1.0 / tableSize;
table[i * 2] = ((a + 2) * x - (a + 3)) * x * x + 1;
x += 1.0;
table[i * 2 + 1] = ((a * x - 5 * a) * x + 8 * a) * x - 4 * a;
}
}
static const Nd4jLong kTableSize = (1 << 10);
float* initCoeffsTable(const double a, hipStream_t* stream) {
// Allocate and initialize coefficients table using Bicubic
// convolution algorithm.
// https://en.wikipedia.org/wiki/Bicubic_interpolation
float* coeffs_table; // = new float[(kTableSize + 1) * 2];
auto err = hipMalloc(&coeffs_table, sizeof(float) * ((kTableSize + 1) * 2));
if (err != 0) {
throw cuda_exception::build("helpers::initCoeffsTable: Cannot allocate memory for vertical parts rectangulars", err);
}
hipLaunchKernelGGL(( initCoefTableKernel), dim3(128),dim3(128),128, *stream, a, coeffs_table, kTableSize);
err = hipStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::initCoeffsTable: Cannot syncronize kernel", err);
}
return coeffs_table;
}
// _CUDA_HD const float* getCoeffsTable(const bool use_keys_cubic) {
// // Static so that we initialize it on first use
// if (use_keys_cubic) {
// // http://ieeexplore.ieee.org/document/1163711/
// // R. G. Keys. Cubic convolution interpolation for digital image
// // processing. IEEE Transactions on Acoustics, Speech, and Signal
// // Processing, 29(6):11531160, 1981.
// //static const float* coeffs_table = initCoeffsTable(-0.5f, stream);
// return sCoeffsTableHalf;
// } else {
// //static const float* coeffs_table = initCoeffsTable(-0.75f, stream);
// return sCoeffsTableThreeFourth;
// }
// }
inline _CUDA_HD Nd4jLong bound(Nd4jLong val, Nd4jLong limit) {
return math::nd4j_min(limit - 1ll, math::nd4j_max(Nd4jLong{0}, val));
}
template <typename T>
inline _CUDA_HD float interpolate1D(const float weight0, const float weight1, const float weight2, const float weight3,
const T value0, const T value1, const T value2, const T value3) {
return static_cast<float>(value0) * weight0 +
static_cast<float>(value1) * weight1 +
static_cast<float>(value2) * weight2 +
static_cast<float>(value3) * weight3;
}
// Compute the 1D interpolation for a given X index using the y_weights
static _CUDA_HD float compute(float values[4], const float xW0, const float xW1, const float xW2, const float xW3) {
return interpolate1D(xW0, xW1, xW2, xW3, values[0], values[1],values[2], values[3]);
}
template <typename Scaler, bool use_keys_cubic>
inline _CUDA_HD void getWeightsAndIndices(float const* coeffs_table, const float scale, const Nd4jLong out_loc, const Nd4jLong limit, WeightsAndIndices* out) {
const Scaler scaler;
const float in_loc_f = scaler(out_loc, scale);
const Nd4jLong in_loc = math::nd4j_floor<float, Nd4jLong>(in_loc_f);
const float delta = in_loc_f - in_loc;
const Nd4jLong offset = math::nd4j_round<float, Nd4jLong>(delta * kTableSize);
//const float* coeffs_table = getCoeffsTable(use_keys_cubic);
if (use_keys_cubic) {
// The legacy code placed more weight on the edge pixels, since bounding
// the set of inputs to sample could cause an edge pixel to be repeated.
// Here we change the behavior at borders to match that used by the
// scale_and_translate_op, where sampling locations outside the image have
// their weight set to 0, and the weights are renormalized so that their sum
// is 1.0.
out->_index0 = bound(in_loc - 1, limit);
out->_weight0 =
(out->_index0 == in_loc - 1 ? coeffs_table[offset * 2 + 1] : 0.0f);
out->_index1 = bound(in_loc, limit);
out->_weight1 = (out->_index1 == in_loc ? coeffs_table[offset * 2] : 0.0f);
out->_index2 = bound(in_loc + 1, limit);
out->_weight2 =
(out->_index2 == in_loc + 1 ? coeffs_table[(kTableSize - offset) * 2]
: 0.0f);
out->_index3 = bound(in_loc + 2, limit);
out->_weight3 = (out->_index3 == in_loc + 2
? coeffs_table[(kTableSize - offset) * 2 + 1]
: 0.0f);
const float weight_sum =
out->_weight0 + out->_weight1 + out->_weight2 + out->_weight3;
if (math::nd4j_abs(weight_sum) >= 1000.0f * DataTypeUtils::min<float>()) {
const float one_over_weight_sum = 1.0f / weight_sum;
out->_weight0 *= one_over_weight_sum;
out->_weight1 *= one_over_weight_sum;
out->_weight2 *= one_over_weight_sum;
out->_weight3 *= one_over_weight_sum;
}
} else {
out->_weight0 = coeffs_table[offset * 2 + 1];
out->_weight1 = coeffs_table[offset * 2];
out->_weight2 = coeffs_table[(kTableSize - offset) * 2];
out->_weight3 = coeffs_table[(kTableSize - offset) * 2 + 1];
out->_index0 = bound(in_loc - 1, limit);
out->_index1 = bound(in_loc, limit);
out->_index2 = bound(in_loc + 1, limit);
out->_index3 = bound(in_loc + 2, limit);
}
}
// Older incorrect scaling method that causes all resizes to have a slight
// translation leading to inconsistent results. For example, a flip then a
// resize gives different results then a resize then a flip.
struct LegacyScaler {
_CUDA_HD LegacyScaler(){};
inline _CUDA_HD float operator()(const int x, const float scale) const {
return static_cast<float>(x) * scale;
}
};
static __global__ void accumulateChannelsKernel(WeightsAndIndices* pXWais, Nd4jLong outWidth, Nd4jLong channels) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto x = start; x < outWidth; x += step) {
pXWais[x]._index0 *= channels;
pXWais[x]._index1 *= channels;
pXWais[x]._index2 *= channels;
pXWais[x]._index3 *= channels;
}
}
static __global__ void advaceWeightsAndIndicesKernel(float const* cacheTable, CachedInterpolationCalculator* calc, WeightsAndIndices* pXWais, Nd4jLong inWidth, float widthScale,
Nd4jLong outWidth, Nd4jLong channels, bool halfPixelCenters) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto x = start; x < outWidth; x += step) {
if (halfPixelCenters)
getWeightsAndIndices<HalfPixelScaler, true>(cacheTable, widthScale, x, inWidth, &pXWais[x]);
else
getWeightsAndIndices<LegacyScaler, false>(cacheTable, widthScale, x, inWidth, &pXWais[x]);
pXWais[x]._advance = calc->Advance(pXWais[x]._index0, pXWais[x]._index1, pXWais[x]._index2, pXWais[x]._index3);
}
}
// resizerState and xWais are device allocated
static void computeXWeightsAndIndices(float const* coeffsTable, const ImageResizerState& resizerState,
const bool halfPixelCenters,
WeightsAndIndices* pXWais) {
auto stream = resizerState.stream;
auto outWidth = resizerState.outWidth;
CachedInterpolationCalculator calc; // = new CachedInterpolationCalculator;
CachedInterpolationCalculator* pCalcD;
auto err = hipMalloc(&pCalcD, sizeof(CachedInterpolationCalculator));
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot allocated device memory for interpolate calculator", err);
}
err = hipMemcpy(pCalcD, &calc, sizeof(CachedInterpolationCalculator), hipMemcpyHostToDevice);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot set up device memory for interpolate calculator", err);
}
hipLaunchKernelGGL(( advaceWeightsAndIndicesKernel), dim3(128), dim3(128), 128, *stream, coeffsTable, pCalcD, pXWais, resizerState.inWidth, resizerState.widthScale, outWidth, resizerState.channels, halfPixelCenters);
err = hipFree(pCalcD);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot deallocated device memory for interpolate calculator", err);
}
err = hipStreamSynchronize(*stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot synchronize stream after advance weights and indicers", err);
}
// Scale the values so they can be used as offsets into buffers.
hipLaunchKernelGGL(( accumulateChannelsKernel), dim3(128), dim3(128), 512, *stream, pXWais, outWidth, resizerState.channels);
err = hipStreamSynchronize(*stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot synchronize stream after accumulate channels", err);
}
}
template <typename T>
static _CUDA_HD FORCEINLINE float computeYInterpolation(
int which, int channelNum, const WeightsAndIndices& yWai,
const T* pY0, const T* pY1, const T* pY2, const T* pY3,
const WeightsAndIndices& xWai) {
int xIndex;
switch (which) {
case 0:
xIndex = xWai._index0;
break;
case 1:
xIndex = xWai._index1;
break;
case 2:
xIndex = xWai._index2;
break;
default:
xIndex = xWai._index3;
break;
}
const Nd4jLong pt_index = xIndex + channelNum;
return interpolate1D<T>(yWai._weight0, yWai._weight1, yWai._weight2,
yWai._weight3, pY0[pt_index], pY1[pt_index],
pY2[pt_index], pY3[pt_index]);
}
template <typename T>
static __global__ void bicubicInterpolateWithCachingKernel(float const* cachedTable, float* cachedValue, T const* inputPtr, ImageResizerState* pResizerState, WeightsAndIndices* xWais, bool halfPixelCenters, Nd4jLong inBatchWidth, Nd4jLong inRowWidth, T* outputPtr) {
// auto numChannels = pResizerState->channels;
for (Nd4jLong b = blockIdx.x; b < pResizerState->batchSize; b += gridDim.x) {
auto pInput = inputPtr + b * inBatchWidth;
for (Nd4jLong y = threadIdx.x; y < pResizerState->outHeight; y += blockDim.x) {
auto pos = (b * pResizerState->outHeight + y) * pResizerState->outWidth * pResizerState->channels;
auto pOutput = &outputPtr[pos];
struct WeightsAndIndices yWai;
if (halfPixelCenters) {
getWeightsAndIndices<HalfPixelScaler, true>(cachedTable, pResizerState->heightScale, y, pResizerState->inHeight, &yWai);
} else {
getWeightsAndIndices<LegacyScaler, false>(cachedTable, pResizerState->heightScale, y, pResizerState->inHeight, &yWai);
}
// Make pointers represent offsets of data in inputBPtr.
const T* y_ptr_0 = pInput + yWai._index0 * inRowWidth;
const T* y_ptr_1 = pInput + yWai._index1 * inRowWidth;
const T* y_ptr_2 = pInput + yWai._index2 * inRowWidth;
const T* y_ptr_3 = pInput + yWai._index3 * inRowWidth;
if (pResizerState->channels == 3) {
// Manually unroll case of 3 channels.
float cached_value_0[4] = {0};
float cached_value_1[4] = {0};
float cached_value_2[4] = {0};
for (Nd4jLong x = 0; x < pResizerState->outWidth; ++x) {
const WeightsAndIndices& xWai = xWais[x];
// Shift values in cached_value_* to fill first '_advance' values.
switch (xWai._advance) {
case 3:
cached_value_0[0] = cached_value_0[1];
cached_value_0[1] = cached_value_0[2];
cached_value_0[2] = cached_value_0[3];
cached_value_1[0] = cached_value_1[1];
cached_value_1[1] = cached_value_1[2];
cached_value_1[2] = cached_value_1[3];
cached_value_2[0] = cached_value_2[1];
cached_value_2[1] = cached_value_2[2];
cached_value_2[2] = cached_value_2[3];
break;
case 2:
cached_value_0[0] = cached_value_0[2];
cached_value_0[1] = cached_value_0[3];
cached_value_1[0] = cached_value_1[2];
cached_value_1[1] = cached_value_1[3];
cached_value_2[0] = cached_value_2[2];
cached_value_2[1] = cached_value_2[3];
break;
case 1: {
cached_value_0[0] = cached_value_0[3];
cached_value_1[0] = cached_value_1[3];
cached_value_2[0] = cached_value_2[3];
break;
}
}
// Set the remaining '4-_advance' values by computing.
switch (xWai._advance) {
case 0:
cached_value_0[0] = computeYInterpolation(0, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[0] = computeYInterpolation(0, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[0] = computeYInterpolation(0, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 1:
cached_value_0[1] = computeYInterpolation(1, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[1] = computeYInterpolation(1, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[1] = computeYInterpolation(1, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 2:
cached_value_0[2] = computeYInterpolation(2, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[2] = computeYInterpolation(2, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[2] = computeYInterpolation(2, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 3:
cached_value_0[3] = computeYInterpolation(3, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[3] = computeYInterpolation(3, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[3] = computeYInterpolation(3, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
// break;
}
pOutput[x * pResizerState->channels + 0] = compute(cached_value_0, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
pOutput[x * pResizerState->channels + 1] = compute(cached_value_1, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
pOutput[x * pResizerState->channels + 2] = compute(cached_value_2, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
}
} else {
for (Nd4jLong x = 0; x < pResizerState->outWidth; ++x) {
const WeightsAndIndices& xWai = xWais[x];
// Shift values in cachedValue to fill first '_advance' values.
switch (xWai._advance) {
case 3:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 1];
cachedValue[4 * c + 1] = cachedValue[4 * c + 2];
cachedValue[4 * c + 2] = cachedValue[4 * c + 3];
}
break;
case 2:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 2];
cachedValue[4 * c + 1] = cachedValue[4 * c + 3];
}
break;
case 1: {
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 3];
}
break;
}
}
// Set the remaining '4-_advance' values by computing.
switch (xWai._advance) {
case 0:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = computeYInterpolation(0, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 1:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 1] = computeYInterpolation(1, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 2:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 2] = computeYInterpolation(2, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 3:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 3] = computeYInterpolation(3, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
// break;
}
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
pOutput[x * pResizerState->channels + c] = compute(&cachedValue[4 * c], xWai._weight0, xWai._weight1, xWai._weight2, xWai._weight3);
}
}
}
}
}
}
template <typename T>
static void
bicubicInterpolateWithCaching(NDArray const* image, ImageResizerState const& resizerState, bool const halfPixelCenters, NDArray* output) {
const auto numChannels = resizerState.channels;
const Nd4jLong inRowWidth = resizerState.inWidth * numChannels;
const Nd4jLong inBatchWidth = resizerState.inHeight * inRowWidth;
auto stream = resizerState.stream; //output->getContext()->getCudaStream();
ImageResizerState* resizerStateD;
auto err = hipMalloc(&resizerStateD, sizeof(ImageResizerState));
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot allocate memory for resizerState", err);
}
err = hipMemcpy(resizerStateD, &resizerState, sizeof(ImageResizerState), hipMemcpyHostToDevice);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot set up memory for resizerState", err);
}
float* cachedValue = nullptr;
size_t cachedSize = sizeof(float) * (numChannels == 3 ? 0 : 4 * numChannels);
if (cachedSize) {
err = hipMalloc(reinterpret_cast<void**>(&cachedValue), cachedSize);
if (err != 0) {
throw cuda_exception::build(
"helpers::bicubicInterpolateWithCaching: Cannot allocate memory for cached values", err);
}
err = hipMemset(cachedValue, 0, cachedSize);
if (err != 0) {
throw cuda_exception::build(
"helpers::bicubicInterpolateWithCaching: Cannot set up memory for cached values", err);
}
}
WeightsAndIndices* xWais; //(resizerState.outWidth);
err = hipMalloc(&xWais, sizeof(WeightsAndIndices) * resizerState.outWidth);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot allocate memory for weights and indices", err);
}
auto coeffsTable = halfPixelCenters?initCoeffsTable(-0.5, stream): initCoeffsTable(-0.75, stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err);
}
computeXWeightsAndIndices(coeffsTable, resizerState, halfPixelCenters, xWais);
err = hipStreamQuery(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err);
}
const T* pInput = image->getDataBuffer()->specialAsT<T>();
T* pOutput = output->dataBuffer()->specialAsT<T>(); //_data.data();
hipLaunchKernelGGL(( bicubicInterpolateWithCachingKernel<T>), dim3(128), dim3(1), 512, *stream, coeffsTable, cachedValue, pInput,
resizerStateD, xWais, halfPixelCenters, inBatchWidth, inRowWidth, pOutput);
err = hipStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Kernels finished with error", err);
}
err = hipFree(resizerStateD);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for resizerState", err);
}
if (cachedSize)
err = hipFree(cachedValue);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for cached values", err);
}
err = hipFree(xWais);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for weights and indices", err);
}
err = hipFree(coeffsTable);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for coefficients table", err);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
int resizeBicubicFunctor_(nd4j::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output) {
return Status::OK();
}
int resizeBicubicFunctor(nd4j::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctor_, (context, image,
width, height, preserveAspectRatio, antialias, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctor_, (nd4j::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output), NUMERIC_TYPES);
// ------------------------------------------------------------------------------------------------------------------ //
// ------------------------------------------------------------------------------------------------------------------ //
// simplified bicubic resize without antialiasing
//
template <typename T>
int resizeBicubicFunctorA_(nd4j::LaunchContext * context, NDArray const* image, int width, int height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
ImageResizerState st(alignCorners, halfPixelCenters); // align_corners, half_pixel_align
st.stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {image});
int res = st.validateAndCreateOutput(image, width, height);
if (res == Status::OK())
bicubicInterpolateWithCaching<T>(image, st, halfPixelCenters, output);
NDArray::registerSpecialUse({output}, {image});
return res;
}
int resizeBicubicFunctorA(nd4j::LaunchContext * context, NDArray const* image, int width, int height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctorA_, (context,
image, width, height, alignCorners, halfPixelCenters, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctorA_, (nd4j::LaunchContext * context,
NDArray const* image, int width, int height, bool const alignCorners, bool const halfPixelCenters, NDArray* output), NUMERIC_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeFunctor(nd4j::LaunchContext * context, NDArray const* image, int width, int height,
ImageResizeMethods method, bool preserveAspectRatio, bool antialias, NDArray* output) {
switch (method) {
case kResizeBilinear: return resizeBilinearFunctor(context, image, width, height, false, output); break;
case kResizeNearest: return resizeNeighborFunctor(context, image, width, height, true, output); break;
case kResizeBicubic: return resizeBicubicFunctor(context, image, width, height, preserveAspectRatio, antialias, output); break;
case kResizeLanczos5:
case kResizeGaussian:
case kResizeArea:
case kResizeMitchelcubic:
throw std::runtime_error("helper::resizeFunctor: Non implemented yet.");
}
return ND4J_STATUS_OK;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// --------------------------------------------------------------------------------------------------------------- //
// Crop and Resize helper implementation
// -------------------------------------------------------------------------------------------------------------- //
// cropAndResize kernel type of input(images) and output should be the same
//
template <typename T, typename Z, typename I>
static __global__ void cropAndResizeKernel(T const *images, Nd4jLong* imagesShape, Z const* boxes, Nd4jLong* boxesShape,
I const* indices, Nd4jLong* indexShape, I const* cropSize, Nd4jLong* cropShape, int method,
double extrapolationVal, T* output, Nd4jLong* outputShape, int numBoxes, int cropHeight, int cropWidth,
int batchSize, int imageHeight, int imageWidth, int depth) {
for (int b = blockIdx.x; b < numBoxes; b += gridDim.x)
{
Nd4jLong x1Pos[] = {b, 1};
Nd4jLong y1Pos[] = {b, 0};
Nd4jLong y2Pos[] = {b, 2};
Nd4jLong x2Pos[] = {b, 3};
Z y1 = boxes[shape::getOffset(boxesShape, y1Pos)];//->t<T>(b, 0)];
Z x1 = boxes[shape::getOffset(boxesShape, x1Pos)];
Z y2 = boxes[shape::getOffset(boxesShape, y2Pos)];
Z x2 = boxes[shape::getOffset(boxesShape, x2Pos)];
int bIn = indices[b];
if (bIn >= batchSize) {
continue;
}
Z heightScale = (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / Z(cropHeight - 1) : Z(0);
Z widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / Z(cropWidth - 1) : Z(0);
for (int y = threadIdx.x; y < cropHeight; y += blockDim.x) {
const float inY = (cropHeight > 1)
? y1 * (imageHeight - 1) + y * heightScale
: 0.5 * (y1 + y2) * (imageHeight - 1);
if (inY < 0 || inY > imageHeight - 1) {
for (int x = threadIdx.y; x < cropWidth; x += blockDim.y) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
//crops->p(b, y, x, d, extrapolationVal);
}
}
continue;
}
if (method == 0 /* bilinear */) {
const int topYIndex = nd4j::math::p_floor(inY);
const int bottomYIndex = nd4j::math::p_ceil(inY);
const float y_lerp = inY - topYIndex;
for (int x = 0; x < cropWidth; ++x) {
const float in_x = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (in_x < 0 || in_x > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
// crops->p(b, y, x, d, extrapolationVal);
}
continue;
}
int left_x_index = math::p_floor(in_x);
int right_x_index = math::p_ceil(in_x);
T x_lerp = in_x - left_x_index;
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong topLeftPos[] = {bIn, topYIndex, left_x_index, d};
Nd4jLong topRightPos[] = {bIn, topYIndex, right_x_index, d};
Nd4jLong bottomLeftPos[] = {bIn, bottomYIndex, left_x_index, d};
Nd4jLong bottomRightPos[] = {bIn, bottomYIndex, right_x_index, d};
const T topLeft(images[shape::getOffset(imagesShape, topLeftPos)]); //->e<float>(bIn, topYIndex, left_x_index, d));
const T topRight(images[shape::getOffset(imagesShape, topRightPos)]); //->e<float>(bIn, topYIndex, right_x_index, d));
const T bottomLeft(images[shape::getOffset(imagesShape, bottomLeftPos)]);//->e<float>(bIn, bottomYIndex, left_x_index, d));
const T bottomRight(images[shape::getOffset(imagesShape, bottomRightPos)]); //->e<float>(bIn, bottomYIndex, right_x_index, d));
const T top = topLeft + (topRight - topLeft) * x_lerp;
const T bottom = bottomLeft + (bottomRight - bottomLeft) * x_lerp;
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = Z(top + (bottom - top) * y_lerp);
}
}
} else { // method is "nearest neighbor"
for (int x = 0; x < cropWidth; ++x) {
const float inX = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (inX < 0 || inX > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
}
continue;
}
const int closestXIndex = roundf(inX);
const int closestYIndex = roundf(inY);
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
Nd4jLong xPos[] = {bIn, closestYIndex, closestXIndex, d};
auto zIndex = shape::getOffset(outputShape, zPos);
auto xIndex = shape::getOffset(imagesShape, xPos);
output[zIndex] = images[xIndex];
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// cropAndResizeFunctor main algorithm
// context - launch context
// images - batch of images (4D tensor - [batch, width, height, pixels])
// boxes - 2D tensor with boxes for crop
// indices - 2D int tensor with indices of boxes to crop
// cropSize - 2D int tensor with crop box sizes
// method - (one of 0 - bilinear, 1 - nearest)
// extrapolationVal - double value of extrapolation
// crops - output (4D tensor - [batch, outWidth, outHeight, pixels])
//
template <typename T, typename Z, typename I>
static void cropAndResizeFunctor_(nd4j::LaunchContext* context, NDArray const *images, NDArray const *boxes, NDArray const *indices,
NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
const int batchSize = images->sizeAt(0);
const int imageHeight = images->sizeAt(1);
const int imageWidth = images->sizeAt(2);
const int numBoxes = crops->sizeAt(0);
const int cropHeight = crops->sizeAt(1);
const int cropWidth = crops->sizeAt(2);
const int depth = crops->sizeAt(3);
auto stream = context->getCudaStream();
T const* imagesBuf = reinterpret_cast<T const*>(images->getSpecialBuffer());
Z const* boxesBuf = reinterpret_cast<Z const*>(boxes->getSpecialBuffer());
I const* indexBuf = reinterpret_cast<I const*>(indices->getSpecialBuffer());
I const* cropSizes = reinterpret_cast<I const*>(cropSize->getSpecialBuffer());
T* outBuf = reinterpret_cast<T*>(crops->specialBuffer());
NDArray::prepareSpecialUse({crops}, {images, boxes, indices, cropSize});
hipLaunchKernelGGL(( cropAndResizeKernel<T,Z,I>), dim3(batchSize), dim3(math::nd4j_max(imageHeight * imageWidth, cropHeight * cropWidth)), 512, *stream, imagesBuf, images->getSpecialShapeInfo(), boxesBuf, boxes->getSpecialShapeInfo(), indexBuf, indices->getSpecialShapeInfo(),
cropSizes, cropSize->getSpecialShapeInfo(), method, extrapolationVal, outBuf, crops->specialShapeInfo(), numBoxes, cropHeight, cropWidth, batchSize, imageHeight, imageWidth, depth);
NDArray::registerSpecialUse({crops}, {images, boxes, indices, cropSize});
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void cropAndResizeFunctor(nd4j::LaunchContext * context, NDArray const *images, NDArray const *boxes, NDArray const *indices, NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
BUILD_TRIPLE_SELECTOR(images->dataType(), boxes->dataType(), indices->dataType(), cropAndResizeFunctor_,
(context, images, boxes, indices, cropSize, method, extrapolationVal, crops), NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
//
}
BUILD_TRIPLE_TEMPLATE(template void cropAndResizeFunctor_,
(nd4j::LaunchContext * context, NDArray const* images, NDArray const* boxes, NDArray const* indices, NDArray const* cropSize, int method, double extrapolationVal, NDArray* crops),
NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
}
}
} | 4fda9d85b0f3f70a9841b4e5ac68605b4a9e7364.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <ops/declarable/helpers/image_resize.h>
#include <cuda_exception.h>
namespace nd4j {
namespace ops {
namespace helpers {
struct BilinearInterpolationData {
Nd4jLong bottomIndex; // Lower source index used in the interpolation
Nd4jLong topIndex; // Upper source index used in the interpolation
// 1-D linear iterpolation scale (see:
// https://en.wikipedia.org/wiki/Bilinear_interpolation)
double interpolarValue;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// computeInterpolationWeights kernel
// outSize - output length
// inSize - input size
// scale - input scale
// interporationData - result
//
static __global__ void computeInterpolationWeights(Nd4jLong outSize,
Nd4jLong inSize,
double scale,
Nd4jLong channels,
BilinearInterpolationData* interpolationData) {
interpolationData[outSize].bottomIndex = 0;
interpolationData[outSize].topIndex = 0;
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (Nd4jLong i = outSize - tid; i >= 0; i -= step) {
double in = i * scale;
interpolationData[i].bottomIndex = static_cast<Nd4jLong>(in);
interpolationData[i].topIndex = nd4j::math::nd4j_min(interpolationData[i].bottomIndex + 1, inSize - 1);
interpolationData[i].interpolarValue = in - interpolationData[i].bottomIndex;
if (channels) {
math::atomics::nd4j_atomicMul(&interpolationData[i].bottomIndex, channels);
math::atomics::nd4j_atomicMul(&interpolationData[i].topIndex, channels);
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm
//
static void resizeImage(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with bilinear interpolation algorithm kernel
//
template <typename T>
static __global__ void resizeImageKernel(T const* input, Nd4jLong const* inputShape, T* outputYptr, Nd4jLong* outputShape, Nd4jLong batchSize,
Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, Nd4jLong inRowSize, Nd4jLong outRowSize, Nd4jLong inBatchNumValues,
BilinearInterpolationData* xs_, BilinearInterpolationData* ys_) {
for (auto batch = blockIdx.x; batch < batchSize; batch += gridDim.x ) { // blockIdx.x as batch index
auto pX = input + batch * inBatchNumValues;
for (Nd4jLong y = threadIdx.x; y < outHeight; y += blockDim.x) {
const T *ys_input_lower_ptr = pX + ys_[y].bottomIndex * inRowSize;
const T *ys_input_upper_ptr = pX + ys_[y].topIndex * inRowSize;
double yVal = ys_[y].interpolarValue;
auto pZ = outputYptr + (batch * outHeight + y) * outRowSize;
for (Nd4jLong x = threadIdx.y; x < outWidth; x += blockDim.y) {
auto xsBottom = xs_[x].bottomIndex;
auto xsTop = xs_[x].topIndex;
auto xVal = xs_[x].interpolarValue;
// process interpolation for all channels
for (int c = threadIdx.z; c < channels; c += blockDim.z) {
double topLeft(ys_input_lower_ptr[xsBottom + c]);
double topRight(ys_input_lower_ptr[xsTop + c]);
double bottomLeft(ys_input_upper_ptr[xsBottom + c]);
double bottomRight(ys_input_upper_ptr[xsTop + c]);
double top = topLeft + (topRight - topLeft) * xVal;
double bottom = bottomLeft + (bottomRight - bottomLeft) * xVal;
pZ[x * channels + c] = T(top + (bottom - top) * yVal);
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize image with
template <typename T>
static void resizeImage_(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight,
Nd4jLong outWidth, Nd4jLong channels,
BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_,
NDArray* output) {
Nd4jLong inRowSize = inWidth * channels;
Nd4jLong inBatchNumValues = inHeight * inRowSize;
Nd4jLong outRowSize = outWidth * channels;
auto stream = context->getCudaStream();
T const *input_b_ptr = reinterpret_cast<T const *>(images->getSpecialBuffer()); // this works only with 'c' direction
T *output_y_ptr = reinterpret_cast<T *>(output->specialBuffer());
dim3 batchSizeBlock(batchSize, 1, 1);
dim3 pictureBlock(outHeight, outWidth, channels);
resizeImageKernel<T><<<256, pictureBlock, 256, *stream>>>(input_b_ptr, images->getSpecialShapeInfo(), output_y_ptr, output->specialShapeInfo(), batchSize,
outWidth, outHeight, channels, inRowSize, outRowSize, inBatchNumValues, xs_, ys_);
auto err = cudaStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::resizeImage_: Cannot synchronize kernel execution", err);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
static int resizeBilinearFunctor_(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
// Special case for TF compatibility
if((center && inHeight < 2) || (center && inWidth < 2)){
center = false;
}
if ((center && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (center && outHeight < 2) ||
(center && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// wrong input data
nd4j_printf("image.resize_bilinear: Wrong input or output size to resize\n", "");
return ND4J_STATUS_BAD_ARGUMENTS;
}
float heightScale = center ? (inHeight - 1.f) / double(outHeight - 1.f) : (inHeight / float(outHeight));
float widthScale = center ? (inWidth - 1.f) / double(outWidth - 1.f) : (inWidth / float(outWidth));
BilinearInterpolationData* xs_;// = xs.data();
BilinearInterpolationData* ys_;// = xs.data();
cudaError_t err = cudaMalloc(&xs_, sizeof(BilinearInterpolationData) * (outWidth + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for vertical parts rectangulars", err);
}
err = cudaMalloc(&ys_, sizeof(BilinearInterpolationData) * (outHeight + 1));
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot allocate memory for horizontal parts rectangulars", err);
}
auto stream = context->getCudaStream();
// Compute the cached interpolation weights on the x and y dimensions.
computeInterpolationWeights<<<256, 512, 512, *stream>>>(outHeight, inHeight, heightScale, 0, ys_);
computeInterpolationWeights<<<256, 512, 512, *stream>>>(outWidth, inWidth, widthScale, channels, xs_);
NDArray::prepareSpecialUse({output}, {images});
resizeImage(context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output);
NDArray::registerSpecialUse({output}, {images});
err = cudaFree(xs_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for vertical parts rectangulars", err);
}
err = cudaFree(ys_);
if (err != 0) {
throw cuda_exception::build("helpers::resize_image: Cannot deallocate memory for horizontical parts rectangulars", err);
}
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resize by interpolation nearest neighbor algorithm kernel
//
template <typename T>
static __global__ void resizeNeighborKernel(T const* input, Nd4jLong* inputShape, T* output, Nd4jLong* outputShape,
Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool center) {
//for (int b = blockIdx.x; b < batchSize; b += gridDim.x)
if (blockIdx.x < batchSize)
{
auto b = blockIdx.x;
for (int y = threadIdx.x; y < outHeight; y += blockDim.x) {
Nd4jLong inY = nd4j::math::nd4j_min(
(center) ? static_cast<Nd4jLong>(nd4j::math::p_round<float>(y * heightScale)) : static_cast<Nd4jLong>(nd4j::math::p_floor<float>(
y * heightScale)), inHeight - 1);
for (int x = threadIdx.y; x < outWidth; x += blockDim.y) {
Nd4jLong inX = nd4j::math::nd4j_min(
(center) ? static_cast<Nd4jLong>(nd4j::math::p_round<float>(x * widthScale)) : static_cast<Nd4jLong>(nd4j::math::p_floor<float>(
x * widthScale)), inWidth - 1);
auto start = blockIdx.z * blockDim.z + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (Nd4jLong e = start; e < channels; e += step) {
Nd4jLong posX[] = {b, inY, inX, e};
Nd4jLong posZ[] = {b, y, x, e};
auto xIndex = shape::getOffset(inputShape, posX);
auto zIndex = shape::getOffset(outputShape, posZ);
output[zIndex] = input[xIndex];
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeNeighborFunctor - main algorithm by nearest neighbor
//
template <typename T>
int resizeNeighborFunctor_(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
const Nd4jLong batchSize = images->sizeAt(0);
const Nd4jLong inHeight = images->sizeAt(1);
const Nd4jLong inWidth = images->sizeAt(2);
const Nd4jLong channels = images->sizeAt(3);
const Nd4jLong outHeight = output->sizeAt(1);
const Nd4jLong outWidth = output->sizeAt(2);
// Handle no-op resizes efficiently.
if (outHeight == inHeight && outWidth == inWidth) {
output->assign(images);
return ND4J_STATUS_OK;
}
if ((center && inHeight < 2) || (inHeight < 1) || (outHeight < 1) || (center && outHeight < 2) ||
(center && inWidth < 2) || (inWidth < 1) || (outWidth < 1) || (center && outWidth < 2)) {
// wrong input data
nd4j_printf("image.resize_nearest_neighbor: Wrong input or output size to resize\n", "");
return ND4J_STATUS_BAD_ARGUMENTS;
}
double heightScale = center ? (inHeight - 1.) / double(outHeight - 1.0) : (inHeight / double(outHeight));
double widthScale = center ? (inWidth - 1.) / double(outWidth - 1.0) : (inWidth / double(outWidth));
auto imagesBuffer = reinterpret_cast<T const*>(images->getSpecialBuffer());
auto outputBuffer = reinterpret_cast<T*>(output->specialBuffer());
auto stream = context->getCudaStream();
//T const* input, Nd4jLong const* inputShape, T* output, Nd4jLong* outputShape,
// Nd4jLong batchSize, Nd4jLong inWidth, Nd4jLong inHeight, Nd4jLong outWidth, Nd4jLong outHeight, Nd4jLong channels, double widthScale, double heightScale, bool center
//input, inputShape, output, outputShape,
// batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, center
NDArray::prepareSpecialUse({output}, {images});
resizeNeighborKernel<T><<<batchSize, outHeight * outWidth, 512, *stream>>>(imagesBuffer, images->getSpecialShapeInfo(), outputBuffer, output->specialShapeInfo(),
batchSize, inWidth, inHeight, outWidth, outHeight, channels, widthScale, heightScale, center);
NDArray::registerSpecialUse({output}, {images});
return Status::OK();
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// resizeImage - resize bilinear algorithm caller
//
void resizeImage(nd4j::LaunchContext* context, NDArray const* images, Nd4jLong batchSize, Nd4jLong inHeight,
Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth, Nd4jLong channels, BilinearInterpolationData* xs_,
BilinearInterpolationData* ys_, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), resizeImage_, (context, images, batchSize, inHeight, inWidth, outHeight, outWidth, channels, xs_, ys_, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void resizeImage_,(nd4j::LaunchContext* context, NDArray const* images,
Nd4jLong batchSize, Nd4jLong inHeight, Nd4jLong inWidth, Nd4jLong outHeight, Nd4jLong outWidth,
Nd4jLong channels, BilinearInterpolationData* xs_, BilinearInterpolationData* ys_, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeBilinearFunctor(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeBilinearFunctor_, (context, images, width, height, center, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBilinearFunctor_, (nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeNeighborFunctor(nd4j::LaunchContext* context, NDArray const* images, int width, int height, bool center, NDArray* output) {
BUILD_SINGLE_SELECTOR(images->dataType(), return resizeNeighborFunctor_, (context, images, width, height, center, output), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeNeighborFunctor_, (nd4j::LaunchContext* context, NDArray const* images,
int width, int height, bool center, NDArray* output), LIBND4J_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Bicubic interpolation
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Utility functions and classes
// calculateResizeScale determines the float scaling factor.
inline float calculateResizeScale(Nd4jLong inSize, Nd4jLong outSize,
bool alignCorners) {
return (alignCorners && outSize > 1)
? (inSize - 1) / static_cast<float>(outSize - 1)
: inSize / static_cast<float>(outSize);
}
struct ImageResizerState {
explicit ImageResizerState(bool alignCorners, bool halfPixelCenters)
: _alignCorners(alignCorners),
_halfPixelCenters(halfPixelCenters) {}
// ValidateAndCalculateOutputSize checks the bounds on the input tensors
// and requested size, sets up some of the resizing state such as the
// heightScale and widthScale, and calculates the output size.
// If any of these operations fails, it sets an error status in
// the context, which the caller must check.
int validateAndCalculateOutputSize(NDArray const* input, int const width, int const height) {
//
batchSize = input->sizeAt(0);//.dim_size(0);
outHeight = height;
outWidth = width; //internal::SubtleMustCopy(Svec(1));
inHeight = static_cast<int32_t>(input->sizeAt(1));
inWidth = static_cast<int32_t>(input->sizeAt(2));
channels = input->sizeAt(3); //.dim_size(3);
heightScale = calculateResizeScale(inHeight, outHeight, _alignCorners);
widthScale = calculateResizeScale(inWidth, outWidth, _alignCorners);
// Guard against overflows
if (ceilf((outHeight - 1) * heightScale) > static_cast<float>(DataTypeUtils::max<int>())) {
nd4j_printf("resize_bicubic: Upper overflow occurs for resize height (%f)\n", ceilf((outHeight - 1) * heightScale));
return Status::CODE(ND4J_STATUS_BAD_INPUT, "resize_bicubic: Upper overflow occurs for resize height");
}
if (ceilf((outWidth - 1) * heightScale) > static_cast<float>(DataTypeUtils::max<int>())) {
nd4j_printf("resize_bicubic: Upper overflow occurs for resize height (%f)\n", ceilf((outHeight - 1) * heightScale));
return Status::CODE(ND4J_STATUS_BAD_INPUT, "resize_bicubic: Upper overflow occurs for resize width");
}
return Status::OK();
}
// Calculates all the required variables, and allocates the output.
int validateAndCreateOutput(NDArray const* input, int const width, int const height) {
return validateAndCalculateOutputSize(input, width, height);
}
Nd4jLong batchSize;
Nd4jLong outHeight;
Nd4jLong outWidth;
Nd4jLong inHeight;
Nd4jLong inWidth;
Nd4jLong channels;
float heightScale;
float widthScale;
NDArray* output = nullptr;
cudaStream_t* stream;
private:
bool _alignCorners;
bool _halfPixelCenters;
};
// Half pixel scaler scales assuming that the pixel centers are at 0.5, i.e. the
// floating point coordinates of the top,left pixel is 0.5,0.5.
struct HalfPixelScaler {
_CUDA_HD HalfPixelScaler(){};
inline _CUDA_HD float operator()(const int x, const float scale) const {
// Note that we subtract 0.5 from the return value, as the existing bilinear
// sampling code etc assumes pixels are in the old coordinate system.
return (static_cast<float>(x) + 0.5f) * scale - 0.5f;
}
};
struct WeightsAndIndices {
float _weight0;
float _weight1;
float _weight2;
float _weight3;
Nd4jLong _index0;
Nd4jLong _index1;
Nd4jLong _index2;
Nd4jLong _index3;
int _advance; // advance value.
};
class CachedInterpolationCalculator {
public:
_CUDA_HD CachedInterpolationCalculator() : _indexes{-1, -1, -1, -1} {}
// Advances iteration. Returns the number of values that should be copied from
// the current point to the next point. The copying should always be done by
// copying the last <retval> values from the old point to the first <retval>
// values of the new point.
inline _CUDA_HD int Advance(const Nd4jLong x0, const Nd4jLong x1, const Nd4jLong x2,
const Nd4jLong x3) {
// We use 2 hands and walk through, copying from one to another where
// we already have values.
// Invariant, new_indicies_hand <= cached_values_hand
const Nd4jLong new_x_indices[4] = {x0, x1, x2, x3};
int cachedValuesHand = 0;
int newIndiciesHand = 0;
while (cachedValuesHand < 4) {
if (_indexes[cachedValuesHand] == new_x_indices[newIndiciesHand]) {
if (newIndiciesHand < cachedValuesHand) {
_indexes[newIndiciesHand] = _indexes[cachedValuesHand];
}
newIndiciesHand++;
}
cachedValuesHand++;
}
switch (newIndiciesHand) {
case 0:
_indexes[0] = x0;
case 1:
_indexes[1] = x1;
case 2:
_indexes[2] = x2;
case 3:
_indexes[3] = x3;
break;
}
return newIndiciesHand;
}
private:
Nd4jLong _indexes[4];
};
static __global__ void initCoefTableKernel(const double a, float* table, Nd4jLong tableSize) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (int i = start; i <= tableSize; i += step) {
float x = i * 1.0 / tableSize;
table[i * 2] = ((a + 2) * x - (a + 3)) * x * x + 1;
x += 1.0;
table[i * 2 + 1] = ((a * x - 5 * a) * x + 8 * a) * x - 4 * a;
}
}
static const Nd4jLong kTableSize = (1 << 10);
float* initCoeffsTable(const double a, cudaStream_t* stream) {
// Allocate and initialize coefficients table using Bicubic
// convolution algorithm.
// https://en.wikipedia.org/wiki/Bicubic_interpolation
float* coeffs_table; // = new float[(kTableSize + 1) * 2];
auto err = cudaMalloc(&coeffs_table, sizeof(float) * ((kTableSize + 1) * 2));
if (err != 0) {
throw cuda_exception::build("helpers::initCoeffsTable: Cannot allocate memory for vertical parts rectangulars", err);
}
initCoefTableKernel<<<128,128,128, *stream>>>(a, coeffs_table, kTableSize);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::initCoeffsTable: Cannot syncronize kernel", err);
}
return coeffs_table;
}
// _CUDA_HD const float* getCoeffsTable(const bool use_keys_cubic) {
// // Static so that we initialize it on first use
// if (use_keys_cubic) {
// // http://ieeexplore.ieee.org/document/1163711/
// // R. G. Keys. Cubic convolution interpolation for digital image
// // processing. IEEE Transactions on Acoustics, Speech, and Signal
// // Processing, 29(6):1153–1160, 1981.
// //static const float* coeffs_table = initCoeffsTable(-0.5f, stream);
// return sCoeffsTableHalf;
// } else {
// //static const float* coeffs_table = initCoeffsTable(-0.75f, stream);
// return sCoeffsTableThreeFourth;
// }
// }
inline _CUDA_HD Nd4jLong bound(Nd4jLong val, Nd4jLong limit) {
return math::nd4j_min(limit - 1ll, math::nd4j_max(Nd4jLong{0}, val));
}
template <typename T>
inline _CUDA_HD float interpolate1D(const float weight0, const float weight1, const float weight2, const float weight3,
const T value0, const T value1, const T value2, const T value3) {
return static_cast<float>(value0) * weight0 +
static_cast<float>(value1) * weight1 +
static_cast<float>(value2) * weight2 +
static_cast<float>(value3) * weight3;
}
// Compute the 1D interpolation for a given X index using the y_weights
static _CUDA_HD float compute(float values[4], const float xW0, const float xW1, const float xW2, const float xW3) {
return interpolate1D(xW0, xW1, xW2, xW3, values[0], values[1],values[2], values[3]);
}
template <typename Scaler, bool use_keys_cubic>
inline _CUDA_HD void getWeightsAndIndices(float const* coeffs_table, const float scale, const Nd4jLong out_loc, const Nd4jLong limit, WeightsAndIndices* out) {
const Scaler scaler;
const float in_loc_f = scaler(out_loc, scale);
const Nd4jLong in_loc = math::nd4j_floor<float, Nd4jLong>(in_loc_f);
const float delta = in_loc_f - in_loc;
const Nd4jLong offset = math::nd4j_round<float, Nd4jLong>(delta * kTableSize);
//const float* coeffs_table = getCoeffsTable(use_keys_cubic);
if (use_keys_cubic) {
// The legacy code placed more weight on the edge pixels, since bounding
// the set of inputs to sample could cause an edge pixel to be repeated.
// Here we change the behavior at borders to match that used by the
// scale_and_translate_op, where sampling locations outside the image have
// their weight set to 0, and the weights are renormalized so that their sum
// is 1.0.
out->_index0 = bound(in_loc - 1, limit);
out->_weight0 =
(out->_index0 == in_loc - 1 ? coeffs_table[offset * 2 + 1] : 0.0f);
out->_index1 = bound(in_loc, limit);
out->_weight1 = (out->_index1 == in_loc ? coeffs_table[offset * 2] : 0.0f);
out->_index2 = bound(in_loc + 1, limit);
out->_weight2 =
(out->_index2 == in_loc + 1 ? coeffs_table[(kTableSize - offset) * 2]
: 0.0f);
out->_index3 = bound(in_loc + 2, limit);
out->_weight3 = (out->_index3 == in_loc + 2
? coeffs_table[(kTableSize - offset) * 2 + 1]
: 0.0f);
const float weight_sum =
out->_weight0 + out->_weight1 + out->_weight2 + out->_weight3;
if (math::nd4j_abs(weight_sum) >= 1000.0f * DataTypeUtils::min<float>()) {
const float one_over_weight_sum = 1.0f / weight_sum;
out->_weight0 *= one_over_weight_sum;
out->_weight1 *= one_over_weight_sum;
out->_weight2 *= one_over_weight_sum;
out->_weight3 *= one_over_weight_sum;
}
} else {
out->_weight0 = coeffs_table[offset * 2 + 1];
out->_weight1 = coeffs_table[offset * 2];
out->_weight2 = coeffs_table[(kTableSize - offset) * 2];
out->_weight3 = coeffs_table[(kTableSize - offset) * 2 + 1];
out->_index0 = bound(in_loc - 1, limit);
out->_index1 = bound(in_loc, limit);
out->_index2 = bound(in_loc + 1, limit);
out->_index3 = bound(in_loc + 2, limit);
}
}
// Older incorrect scaling method that causes all resizes to have a slight
// translation leading to inconsistent results. For example, a flip then a
// resize gives different results then a resize then a flip.
struct LegacyScaler {
_CUDA_HD LegacyScaler(){};
inline _CUDA_HD float operator()(const int x, const float scale) const {
return static_cast<float>(x) * scale;
}
};
static __global__ void accumulateChannelsKernel(WeightsAndIndices* pXWais, Nd4jLong outWidth, Nd4jLong channels) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto x = start; x < outWidth; x += step) {
pXWais[x]._index0 *= channels;
pXWais[x]._index1 *= channels;
pXWais[x]._index2 *= channels;
pXWais[x]._index3 *= channels;
}
}
static __global__ void advaceWeightsAndIndicesKernel(float const* cacheTable, CachedInterpolationCalculator* calc, WeightsAndIndices* pXWais, Nd4jLong inWidth, float widthScale,
Nd4jLong outWidth, Nd4jLong channels, bool halfPixelCenters) {
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = blockDim.x * gridDim.x;
for (auto x = start; x < outWidth; x += step) {
if (halfPixelCenters)
getWeightsAndIndices<HalfPixelScaler, true>(cacheTable, widthScale, x, inWidth, &pXWais[x]);
else
getWeightsAndIndices<LegacyScaler, false>(cacheTable, widthScale, x, inWidth, &pXWais[x]);
pXWais[x]._advance = calc->Advance(pXWais[x]._index0, pXWais[x]._index1, pXWais[x]._index2, pXWais[x]._index3);
}
}
// resizerState and xWais are device allocated
static void computeXWeightsAndIndices(float const* coeffsTable, const ImageResizerState& resizerState,
const bool halfPixelCenters,
WeightsAndIndices* pXWais) {
auto stream = resizerState.stream;
auto outWidth = resizerState.outWidth;
CachedInterpolationCalculator calc; // = new CachedInterpolationCalculator;
CachedInterpolationCalculator* pCalcD;
auto err = cudaMalloc(&pCalcD, sizeof(CachedInterpolationCalculator));
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot allocated device memory for interpolate calculator", err);
}
err = cudaMemcpy(pCalcD, &calc, sizeof(CachedInterpolationCalculator), cudaMemcpyHostToDevice);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot set up device memory for interpolate calculator", err);
}
advaceWeightsAndIndicesKernel<<<128, 128, 128, *stream>>>(coeffsTable, pCalcD, pXWais, resizerState.inWidth, resizerState.widthScale, outWidth, resizerState.channels, halfPixelCenters);
err = cudaFree(pCalcD);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot deallocated device memory for interpolate calculator", err);
}
err = cudaStreamSynchronize(*stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot synchronize stream after advance weights and indicers", err);
}
// Scale the values so they can be used as offsets into buffers.
accumulateChannelsKernel<<<128, 128, 512, *stream>>>(pXWais, outWidth, resizerState.channels);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
cuda_exception::build("helpers::computeXWeightsAndIndices: Cannot synchronize stream after accumulate channels", err);
}
}
template <typename T>
static _CUDA_HD FORCEINLINE float computeYInterpolation(
int which, int channelNum, const WeightsAndIndices& yWai,
const T* pY0, const T* pY1, const T* pY2, const T* pY3,
const WeightsAndIndices& xWai) {
int xIndex;
switch (which) {
case 0:
xIndex = xWai._index0;
break;
case 1:
xIndex = xWai._index1;
break;
case 2:
xIndex = xWai._index2;
break;
default:
xIndex = xWai._index3;
break;
}
const Nd4jLong pt_index = xIndex + channelNum;
return interpolate1D<T>(yWai._weight0, yWai._weight1, yWai._weight2,
yWai._weight3, pY0[pt_index], pY1[pt_index],
pY2[pt_index], pY3[pt_index]);
}
template <typename T>
static __global__ void bicubicInterpolateWithCachingKernel(float const* cachedTable, float* cachedValue, T const* inputPtr, ImageResizerState* pResizerState, WeightsAndIndices* xWais, bool halfPixelCenters, Nd4jLong inBatchWidth, Nd4jLong inRowWidth, T* outputPtr) {
// auto numChannels = pResizerState->channels;
for (Nd4jLong b = blockIdx.x; b < pResizerState->batchSize; b += gridDim.x) {
auto pInput = inputPtr + b * inBatchWidth;
for (Nd4jLong y = threadIdx.x; y < pResizerState->outHeight; y += blockDim.x) {
auto pos = (b * pResizerState->outHeight + y) * pResizerState->outWidth * pResizerState->channels;
auto pOutput = &outputPtr[pos];
struct WeightsAndIndices yWai;
if (halfPixelCenters) {
getWeightsAndIndices<HalfPixelScaler, true>(cachedTable, pResizerState->heightScale, y, pResizerState->inHeight, &yWai);
} else {
getWeightsAndIndices<LegacyScaler, false>(cachedTable, pResizerState->heightScale, y, pResizerState->inHeight, &yWai);
}
// Make pointers represent offsets of data in inputBPtr.
const T* y_ptr_0 = pInput + yWai._index0 * inRowWidth;
const T* y_ptr_1 = pInput + yWai._index1 * inRowWidth;
const T* y_ptr_2 = pInput + yWai._index2 * inRowWidth;
const T* y_ptr_3 = pInput + yWai._index3 * inRowWidth;
if (pResizerState->channels == 3) {
// Manually unroll case of 3 channels.
float cached_value_0[4] = {0};
float cached_value_1[4] = {0};
float cached_value_2[4] = {0};
for (Nd4jLong x = 0; x < pResizerState->outWidth; ++x) {
const WeightsAndIndices& xWai = xWais[x];
// Shift values in cached_value_* to fill first '_advance' values.
switch (xWai._advance) {
case 3:
cached_value_0[0] = cached_value_0[1];
cached_value_0[1] = cached_value_0[2];
cached_value_0[2] = cached_value_0[3];
cached_value_1[0] = cached_value_1[1];
cached_value_1[1] = cached_value_1[2];
cached_value_1[2] = cached_value_1[3];
cached_value_2[0] = cached_value_2[1];
cached_value_2[1] = cached_value_2[2];
cached_value_2[2] = cached_value_2[3];
break;
case 2:
cached_value_0[0] = cached_value_0[2];
cached_value_0[1] = cached_value_0[3];
cached_value_1[0] = cached_value_1[2];
cached_value_1[1] = cached_value_1[3];
cached_value_2[0] = cached_value_2[2];
cached_value_2[1] = cached_value_2[3];
break;
case 1: {
cached_value_0[0] = cached_value_0[3];
cached_value_1[0] = cached_value_1[3];
cached_value_2[0] = cached_value_2[3];
break;
}
}
// Set the remaining '4-_advance' values by computing.
switch (xWai._advance) {
case 0:
cached_value_0[0] = computeYInterpolation(0, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[0] = computeYInterpolation(0, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[0] = computeYInterpolation(0, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 1:
cached_value_0[1] = computeYInterpolation(1, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[1] = computeYInterpolation(1, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[1] = computeYInterpolation(1, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 2:
cached_value_0[2] = computeYInterpolation(2, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[2] = computeYInterpolation(2, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[2] = computeYInterpolation(2, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
case 3:
cached_value_0[3] = computeYInterpolation(3, 0, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_1[3] = computeYInterpolation(3, 1, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
cached_value_2[3] = computeYInterpolation(3, 2, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
// break;
}
pOutput[x * pResizerState->channels + 0] = compute(cached_value_0, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
pOutput[x * pResizerState->channels + 1] = compute(cached_value_1, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
pOutput[x * pResizerState->channels + 2] = compute(cached_value_2, xWai._weight0, xWai._weight1,
xWai._weight2, xWai._weight3);
}
} else {
for (Nd4jLong x = 0; x < pResizerState->outWidth; ++x) {
const WeightsAndIndices& xWai = xWais[x];
// Shift values in cachedValue to fill first '_advance' values.
switch (xWai._advance) {
case 3:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 1];
cachedValue[4 * c + 1] = cachedValue[4 * c + 2];
cachedValue[4 * c + 2] = cachedValue[4 * c + 3];
}
break;
case 2:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 2];
cachedValue[4 * c + 1] = cachedValue[4 * c + 3];
}
break;
case 1: {
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = cachedValue[4 * c + 3];
}
break;
}
}
// Set the remaining '4-_advance' values by computing.
switch (xWai._advance) {
case 0:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 0] = computeYInterpolation(0, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 1:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 1] = computeYInterpolation(1, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 2:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 2] = computeYInterpolation(2, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
case 3:
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
cachedValue[4 * c + 3] = computeYInterpolation(3, c, yWai, y_ptr_0, y_ptr_1, y_ptr_2, y_ptr_3, xWai);
}
// break;
}
for (Nd4jLong c = 0; c < pResizerState->channels; ++c) {
pOutput[x * pResizerState->channels + c] = compute(&cachedValue[4 * c], xWai._weight0, xWai._weight1, xWai._weight2, xWai._weight3);
}
}
}
}
}
}
template <typename T>
static void
bicubicInterpolateWithCaching(NDArray const* image, ImageResizerState const& resizerState, bool const halfPixelCenters, NDArray* output) {
const auto numChannels = resizerState.channels;
const Nd4jLong inRowWidth = resizerState.inWidth * numChannels;
const Nd4jLong inBatchWidth = resizerState.inHeight * inRowWidth;
auto stream = resizerState.stream; //output->getContext()->getCudaStream();
ImageResizerState* resizerStateD;
auto err = cudaMalloc(&resizerStateD, sizeof(ImageResizerState));
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot allocate memory for resizerState", err);
}
err = cudaMemcpy(resizerStateD, &resizerState, sizeof(ImageResizerState), cudaMemcpyHostToDevice);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot set up memory for resizerState", err);
}
float* cachedValue = nullptr;
size_t cachedSize = sizeof(float) * (numChannels == 3 ? 0 : 4 * numChannels);
if (cachedSize) {
err = cudaMalloc(reinterpret_cast<void**>(&cachedValue), cachedSize);
if (err != 0) {
throw cuda_exception::build(
"helpers::bicubicInterpolateWithCaching: Cannot allocate memory for cached values", err);
}
err = cudaMemset(cachedValue, 0, cachedSize);
if (err != 0) {
throw cuda_exception::build(
"helpers::bicubicInterpolateWithCaching: Cannot set up memory for cached values", err);
}
}
WeightsAndIndices* xWais; //(resizerState.outWidth);
err = cudaMalloc(&xWais, sizeof(WeightsAndIndices) * resizerState.outWidth);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot allocate memory for weights and indices", err);
}
auto coeffsTable = halfPixelCenters?initCoeffsTable(-0.5, stream): initCoeffsTable(-0.75, stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err);
}
computeXWeightsAndIndices(coeffsTable, resizerState, halfPixelCenters, xWais);
err = cudaStreamQuery(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: computeXWeigtsAndInidces finished with error", err);
}
const T* pInput = image->getDataBuffer()->specialAsT<T>();
T* pOutput = output->dataBuffer()->specialAsT<T>(); //_data.data();
bicubicInterpolateWithCachingKernel<T><<<128, 1, 512, *stream>>>(coeffsTable, cachedValue, pInput,
resizerStateD, xWais, halfPixelCenters, inBatchWidth, inRowWidth, pOutput);
err = cudaStreamSynchronize(*stream);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Kernels finished with error", err);
}
err = cudaFree(resizerStateD);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for resizerState", err);
}
if (cachedSize)
err = cudaFree(cachedValue);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for cached values", err);
}
err = cudaFree(xWais);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for weights and indices", err);
}
err = cudaFree(coeffsTable);
if (err != 0) {
throw cuda_exception::build("helpers::bicubicInterpolateWithCaching: Cannot deallocate memory for coefficients table", err);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template <typename T>
int resizeBicubicFunctor_(nd4j::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output) {
return Status::OK();
}
int resizeBicubicFunctor(nd4j::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctor_, (context, image,
width, height, preserveAspectRatio, antialias, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctor_, (nd4j::LaunchContext * context, NDArray const* image, int width, int height,
bool preserveAspectRatio, bool antialias, NDArray* output), NUMERIC_TYPES);
// ------------------------------------------------------------------------------------------------------------------ //
// ------------------------------------------------------------------------------------------------------------------ //
// simplified bicubic resize without antialiasing
//
template <typename T>
int resizeBicubicFunctorA_(nd4j::LaunchContext * context, NDArray const* image, int width, int height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
ImageResizerState st(alignCorners, halfPixelCenters); // align_corners, half_pixel_align
st.stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {image});
int res = st.validateAndCreateOutput(image, width, height);
if (res == Status::OK())
bicubicInterpolateWithCaching<T>(image, st, halfPixelCenters, output);
NDArray::registerSpecialUse({output}, {image});
return res;
}
int resizeBicubicFunctorA(nd4j::LaunchContext * context, NDArray const* image, int width, int height,
bool const alignCorners, bool const halfPixelCenters, NDArray* output) {
BUILD_SINGLE_SELECTOR(image->dataType(), return resizeBicubicFunctorA_, (context,
image, width, height, alignCorners, halfPixelCenters, output), NUMERIC_TYPES);
}
BUILD_SINGLE_TEMPLATE(template int resizeBicubicFunctorA_, (nd4j::LaunchContext * context,
NDArray const* image, int width, int height, bool const alignCorners, bool const halfPixelCenters, NDArray* output), NUMERIC_TYPES);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int resizeFunctor(nd4j::LaunchContext * context, NDArray const* image, int width, int height,
ImageResizeMethods method, bool preserveAspectRatio, bool antialias, NDArray* output) {
switch (method) {
case kResizeBilinear: return resizeBilinearFunctor(context, image, width, height, false, output); break;
case kResizeNearest: return resizeNeighborFunctor(context, image, width, height, true, output); break;
case kResizeBicubic: return resizeBicubicFunctor(context, image, width, height, preserveAspectRatio, antialias, output); break;
case kResizeLanczos5:
case kResizeGaussian:
case kResizeArea:
case kResizeMitchelcubic:
throw std::runtime_error("helper::resizeFunctor: Non implemented yet.");
}
return ND4J_STATUS_OK;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// --------------------------------------------------------------------------------------------------------------- //
// Crop and Resize helper implementation
// -------------------------------------------------------------------------------------------------------------- //
// cropAndResize kernel type of input(images) and output should be the same
//
template <typename T, typename Z, typename I>
static __global__ void cropAndResizeKernel(T const *images, Nd4jLong* imagesShape, Z const* boxes, Nd4jLong* boxesShape,
I const* indices, Nd4jLong* indexShape, I const* cropSize, Nd4jLong* cropShape, int method,
double extrapolationVal, T* output, Nd4jLong* outputShape, int numBoxes, int cropHeight, int cropWidth,
int batchSize, int imageHeight, int imageWidth, int depth) {
for (int b = blockIdx.x; b < numBoxes; b += gridDim.x)
{
Nd4jLong x1Pos[] = {b, 1};
Nd4jLong y1Pos[] = {b, 0};
Nd4jLong y2Pos[] = {b, 2};
Nd4jLong x2Pos[] = {b, 3};
Z y1 = boxes[shape::getOffset(boxesShape, y1Pos)];//->t<T>(b, 0)];
Z x1 = boxes[shape::getOffset(boxesShape, x1Pos)];
Z y2 = boxes[shape::getOffset(boxesShape, y2Pos)];
Z x2 = boxes[shape::getOffset(boxesShape, x2Pos)];
int bIn = indices[b];
if (bIn >= batchSize) {
continue;
}
Z heightScale = (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / Z(cropHeight - 1) : Z(0);
Z widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / Z(cropWidth - 1) : Z(0);
for (int y = threadIdx.x; y < cropHeight; y += blockDim.x) {
const float inY = (cropHeight > 1)
? y1 * (imageHeight - 1) + y * heightScale
: 0.5 * (y1 + y2) * (imageHeight - 1);
if (inY < 0 || inY > imageHeight - 1) {
for (int x = threadIdx.y; x < cropWidth; x += blockDim.y) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
//crops->p(b, y, x, d, extrapolationVal);
}
}
continue;
}
if (method == 0 /* bilinear */) {
const int topYIndex = nd4j::math::p_floor(inY);
const int bottomYIndex = nd4j::math::p_ceil(inY);
const float y_lerp = inY - topYIndex;
for (int x = 0; x < cropWidth; ++x) {
const float in_x = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (in_x < 0 || in_x > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
// crops->p(b, y, x, d, extrapolationVal);
}
continue;
}
int left_x_index = math::p_floor(in_x);
int right_x_index = math::p_ceil(in_x);
T x_lerp = in_x - left_x_index;
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong topLeftPos[] = {bIn, topYIndex, left_x_index, d};
Nd4jLong topRightPos[] = {bIn, topYIndex, right_x_index, d};
Nd4jLong bottomLeftPos[] = {bIn, bottomYIndex, left_x_index, d};
Nd4jLong bottomRightPos[] = {bIn, bottomYIndex, right_x_index, d};
const T topLeft(images[shape::getOffset(imagesShape, topLeftPos)]); //->e<float>(bIn, topYIndex, left_x_index, d));
const T topRight(images[shape::getOffset(imagesShape, topRightPos)]); //->e<float>(bIn, topYIndex, right_x_index, d));
const T bottomLeft(images[shape::getOffset(imagesShape, bottomLeftPos)]);//->e<float>(bIn, bottomYIndex, left_x_index, d));
const T bottomRight(images[shape::getOffset(imagesShape, bottomRightPos)]); //->e<float>(bIn, bottomYIndex, right_x_index, d));
const T top = topLeft + (topRight - topLeft) * x_lerp;
const T bottom = bottomLeft + (bottomRight - bottomLeft) * x_lerp;
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = Z(top + (bottom - top) * y_lerp);
}
}
} else { // method is "nearest neighbor"
for (int x = 0; x < cropWidth; ++x) {
const float inX = (cropWidth > 1)
? x1 * (imageWidth - 1) + x * widthScale
: 0.5 * (x1 + x2) * (imageWidth - 1);
if (inX < 0 || inX > imageWidth - 1) {
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
auto zIndex = shape::getOffset(outputShape, zPos);
output[zIndex] = (Z)extrapolationVal;
}
continue;
}
const int closestXIndex = roundf(inX);
const int closestYIndex = roundf(inY);
auto start = blockIdx.z * blockDim.x + threadIdx.z;
auto step = blockDim.z * gridDim.z;
for (int d = start; d < depth; d += step) {
Nd4jLong zPos[] = {b, y, x, d};
Nd4jLong xPos[] = {bIn, closestYIndex, closestXIndex, d};
auto zIndex = shape::getOffset(outputShape, zPos);
auto xIndex = shape::getOffset(imagesShape, xPos);
output[zIndex] = images[xIndex];
}
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// cropAndResizeFunctor main algorithm
// context - launch context
// images - batch of images (4D tensor - [batch, width, height, pixels])
// boxes - 2D tensor with boxes for crop
// indices - 2D int tensor with indices of boxes to crop
// cropSize - 2D int tensor with crop box sizes
// method - (one of 0 - bilinear, 1 - nearest)
// extrapolationVal - double value of extrapolation
// crops - output (4D tensor - [batch, outWidth, outHeight, pixels])
//
template <typename T, typename Z, typename I>
static void cropAndResizeFunctor_(nd4j::LaunchContext* context, NDArray const *images, NDArray const *boxes, NDArray const *indices,
NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
const int batchSize = images->sizeAt(0);
const int imageHeight = images->sizeAt(1);
const int imageWidth = images->sizeAt(2);
const int numBoxes = crops->sizeAt(0);
const int cropHeight = crops->sizeAt(1);
const int cropWidth = crops->sizeAt(2);
const int depth = crops->sizeAt(3);
auto stream = context->getCudaStream();
T const* imagesBuf = reinterpret_cast<T const*>(images->getSpecialBuffer());
Z const* boxesBuf = reinterpret_cast<Z const*>(boxes->getSpecialBuffer());
I const* indexBuf = reinterpret_cast<I const*>(indices->getSpecialBuffer());
I const* cropSizes = reinterpret_cast<I const*>(cropSize->getSpecialBuffer());
T* outBuf = reinterpret_cast<T*>(crops->specialBuffer());
NDArray::prepareSpecialUse({crops}, {images, boxes, indices, cropSize});
cropAndResizeKernel<T,Z,I><<<batchSize, math::nd4j_max(imageHeight * imageWidth, cropHeight * cropWidth), 512, *stream>>>(imagesBuf, images->getSpecialShapeInfo(), boxesBuf, boxes->getSpecialShapeInfo(), indexBuf, indices->getSpecialShapeInfo(),
cropSizes, cropSize->getSpecialShapeInfo(), method, extrapolationVal, outBuf, crops->specialShapeInfo(), numBoxes, cropHeight, cropWidth, batchSize, imageHeight, imageWidth, depth);
NDArray::registerSpecialUse({crops}, {images, boxes, indices, cropSize});
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void cropAndResizeFunctor(nd4j::LaunchContext * context, NDArray const *images, NDArray const *boxes, NDArray const *indices, NDArray const *cropSize, int method, double extrapolationVal, NDArray *crops) {
BUILD_TRIPLE_SELECTOR(images->dataType(), boxes->dataType(), indices->dataType(), cropAndResizeFunctor_,
(context, images, boxes, indices, cropSize, method, extrapolationVal, crops), NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
//
}
BUILD_TRIPLE_TEMPLATE(template void cropAndResizeFunctor_,
(nd4j::LaunchContext * context, NDArray const* images, NDArray const* boxes, NDArray const* indices, NDArray const* cropSize, int method, double extrapolationVal, NDArray* crops),
NUMERIC_TYPES, FLOAT_TYPES, INTEGER_TYPES);
}
}
} |
8eb631f08e09bc9f0f3dc05d1655ed067a8c1dd0.hip | // !!! This is a file automatically generated by hipify!!!
/**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
float *gvalue, *goldval, *gnewval;
int size = MAXPOINTS+2;
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
__global__ void gpu_init_old_val(float *a, float *b, float *c, int n){
int j=blockIdx.x*blockDim.x+threadIdx.x;
int m=gridDim.x*blockDim.x;
for(int k=j; k<n; k+=m){
a[k] = b[k];
}
__syncthreads();
}
__global__ void gpu_update_point(float *a, float *b, float *c, int point, int nsteps){
int p=blockIdx.x*blockDim.x+threadIdx.x;
float aval = a[p];
float bval = b[p];
float cval;
if (p < point) {
for (int k = 0;k<nsteps;k++){
if ((p== 0) || (p == point - 1))
cval = 0.0;
else
cval = (2.0 * bval) - aval + (0.09 * (-2.0)*bval);
aval = bval;
bval = cval;
__syncthreads();
}
}
b[p] = bval;
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
void do_math(int i)
{
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0)*values[i]);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
void init_line(void)
{
int j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 0; j < tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
hipMemcpy(gvalue, values, size, hipMemcpyHostToDevice);
hipMemcpy(goldval, oldval, size, hipMemcpyHostToDevice);
hipMemcpy(gnewval, newval, size, hipMemcpyHostToDevice);
/* Initialize old values array */
hipLaunchKernelGGL(( gpu_init_old_val), dim3(30),dim3(512), 0, 0, goldval, gvalue, gnewval, tpoints);
printf("Updating all points for all time steps...\n");
/* Update values for each time step */
/* Update points along line for this time step */
hipLaunchKernelGGL(( gpu_update_point), dim3((tpoints/512 + 1)),dim3(512), 0, 0, goldval, gvalue, gnewval, tpoints, nsteps);
hipMemcpy(values, gvalue, size, hipMemcpyDeviceToHost);
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 0; i < tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 9)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
// set GPU memory
hipMalloc((void**)&gvalue, size);
hipMalloc((void**)&goldval, size);
hipMalloc((void**)&gnewval, size);
printf("Initializing points on the line...\n");
init_line();
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
| 8eb631f08e09bc9f0f3dc05d1655ed067a8c1dd0.cu |
/**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
float *gvalue, *goldval, *gnewval;
int size = MAXPOINTS+2;
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
__global__ void gpu_init_old_val(float *a, float *b, float *c, int n){
int j=blockIdx.x*blockDim.x+threadIdx.x;
int m=gridDim.x*blockDim.x;
for(int k=j; k<n; k+=m){
a[k] = b[k];
}
__syncthreads();
}
__global__ void gpu_update_point(float *a, float *b, float *c, int point, int nsteps){
int p=blockIdx.x*blockDim.x+threadIdx.x;
float aval = a[p];
float bval = b[p];
float cval;
if (p < point) {
for (int k = 0;k<nsteps;k++){
if ((p== 0) || (p == point - 1))
cval = 0.0;
else
cval = (2.0 * bval) - aval + (0.09 * (-2.0)*bval);
aval = bval;
bval = cval;
__syncthreads();
}
}
b[p] = bval;
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
void do_math(int i)
{
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0)*values[i]);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
void init_line(void)
{
int j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 0; j < tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
cudaMemcpy(gvalue, values, size, cudaMemcpyHostToDevice);
cudaMemcpy(goldval, oldval, size, cudaMemcpyHostToDevice);
cudaMemcpy(gnewval, newval, size, cudaMemcpyHostToDevice);
/* Initialize old values array */
gpu_init_old_val<<<30,512>>>(goldval, gvalue, gnewval, tpoints);
printf("Updating all points for all time steps...\n");
/* Update values for each time step */
/* Update points along line for this time step */
gpu_update_point<<<(tpoints/512 + 1),512>>>(goldval, gvalue, gnewval, tpoints, nsteps);
cudaMemcpy(values, gvalue, size, cudaMemcpyDeviceToHost);
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 0; i < tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 9)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
// set GPU memory
cudaMalloc((void**)&gvalue, size);
cudaMalloc((void**)&goldval, size);
cudaMalloc((void**)&gnewval, size);
printf("Initializing points on the line...\n");
init_line();
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
|
cd1dc426f33cb1de66616fc7dec19c9c2c7ed543.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/core/core.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/gapi.hpp>
#include <stdio.h>
#include <iostream>
#include <ctime>
#include <hip/device_functions.h>
//**********************************************************************************************
//**********************************************************************************************
extern void median_filter_wrapper(const cv::Mat& input, cv::Mat& output);
extern void bilateral_filter_wrapper(const cv::Mat& input, cv::Mat& output);
int main()
{
// Read input file (image)
std::string imagePath = "W9Tbeo4kBkY.jpg";
cv::Mat input = cv::imread(imagePath, 0);
if (input.empty()) {
std::cout << "Could not load image. Check location and try again." << std::endl;
std::cin.get();
return -1;
}
double running_sum = 0.0;
int attempts = 10;
cv::Size resize_size;
resize_size.width = 960;
resize_size.height = 800;
cv::resize(input, input, resize_size);
cv::Mat output_gpu(input.rows, input.cols, CV_8UC1);
cv::Mat output_cpu(input.rows, input.cols, CV_8UC1);
// --------------- MEDIAN FILTER ---------------
for (int ctr = 0; ctr < attempts; ctr++) {
clock_t gpu_s = clock();
median_filter_wrapper(input, output_gpu);
clock_t gpu_e = clock();
if (ctr > 0)
running_sum = running_sum + (double(gpu_e - gpu_s) * 1000) / CLOCKS_PER_SEC;
}
std::cout << "GPU Accelerated Median Filter took " << running_sum / (attempts - 1) << " ms.\n";
running_sum = 0.0;
for (int ctr = 0; ctr < attempts; ctr++) {
clock_t cpu_s = clock();
cv::medianBlur(input, output_cpu, 9);
clock_t cpu_e = clock();
if (ctr > 0)
running_sum = running_sum + (double(cpu_e - cpu_s) * 1000) / CLOCKS_PER_SEC;
}
std::cout << "CPU Accelerated Median Filter took " << running_sum / (attempts - 1) << " ms.\n";
cv::imshow("(MF) Output Image - GPU", output_gpu);
cv::imwrite("gpu_median_result.png", output_gpu);
cv::imshow("(MF) Output Image - CPU", output_cpu);
cv::imwrite("cpu_median_result.png", output_cpu);
// ------------- BILATERAL FILTER --------------
running_sum = 0.0;
for (int ctr = 0; ctr < attempts; ctr++) {
clock_t gpu_bs = clock();
bilateral_filter_wrapper(input, output_gpu);
clock_t gpu_be = clock();
if (ctr > 0)
running_sum = running_sum + (double(gpu_be - gpu_bs) * 1000) / CLOCKS_PER_SEC;
}
std::cout << "GPU Accelerated Bilateral Filter took " << running_sum / (attempts - 1) << " ms.\n";
running_sum = 0.0;
for (int ctr = 0; ctr < attempts; ctr++) {
clock_t cpu_bs = clock();
cv::bilateralFilter(input, output_cpu, 9, 50, 50);
clock_t cpu_be = clock();
if (ctr > 0)
running_sum = running_sum + (double(cpu_be - cpu_bs) * 1000) / CLOCKS_PER_SEC;
}
std::cout << "CPU Accelerated Bilateral Filter took " << running_sum / (attempts - 1) << " ms.\n";
cv::imshow("(BF) Output Image - GPU", output_gpu);
cv::imwrite("gpu_bilateral_result.png", output_gpu);
cv::imshow("(BF) Output Image - CPU", output_cpu);
cv::imwrite("cpu_bilateral_result.png", output_cpu);
cv::waitKey();
return 0;
}
| cd1dc426f33cb1de66616fc7dec19c9c2c7ed543.cu | #include <opencv2/core/core.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/gapi.hpp>
#include <stdio.h>
#include <iostream>
#include <ctime>
#include <device_functions.h>
//**********************************************************************************************
//**********************************************************************************************
extern void median_filter_wrapper(const cv::Mat& input, cv::Mat& output);
extern void bilateral_filter_wrapper(const cv::Mat& input, cv::Mat& output);
int main()
{
// Read input file (image)
std::string imagePath = "W9Tbeo4kBkY.jpg";
cv::Mat input = cv::imread(imagePath, 0);
if (input.empty()) {
std::cout << "Could not load image. Check location and try again." << std::endl;
std::cin.get();
return -1;
}
double running_sum = 0.0;
int attempts = 10;
cv::Size resize_size;
resize_size.width = 960;
resize_size.height = 800;
cv::resize(input, input, resize_size);
cv::Mat output_gpu(input.rows, input.cols, CV_8UC1);
cv::Mat output_cpu(input.rows, input.cols, CV_8UC1);
// --------------- MEDIAN FILTER ---------------
for (int ctr = 0; ctr < attempts; ctr++) {
clock_t gpu_s = clock();
median_filter_wrapper(input, output_gpu);
clock_t gpu_e = clock();
if (ctr > 0)
running_sum = running_sum + (double(gpu_e - gpu_s) * 1000) / CLOCKS_PER_SEC;
}
std::cout << "GPU Accelerated Median Filter took " << running_sum / (attempts - 1) << " ms.\n";
running_sum = 0.0;
for (int ctr = 0; ctr < attempts; ctr++) {
clock_t cpu_s = clock();
cv::medianBlur(input, output_cpu, 9);
clock_t cpu_e = clock();
if (ctr > 0)
running_sum = running_sum + (double(cpu_e - cpu_s) * 1000) / CLOCKS_PER_SEC;
}
std::cout << "CPU Accelerated Median Filter took " << running_sum / (attempts - 1) << " ms.\n";
cv::imshow("(MF) Output Image - GPU", output_gpu);
cv::imwrite("gpu_median_result.png", output_gpu);
cv::imshow("(MF) Output Image - CPU", output_cpu);
cv::imwrite("cpu_median_result.png", output_cpu);
// ------------- BILATERAL FILTER --------------
running_sum = 0.0;
for (int ctr = 0; ctr < attempts; ctr++) {
clock_t gpu_bs = clock();
bilateral_filter_wrapper(input, output_gpu);
clock_t gpu_be = clock();
if (ctr > 0)
running_sum = running_sum + (double(gpu_be - gpu_bs) * 1000) / CLOCKS_PER_SEC;
}
std::cout << "GPU Accelerated Bilateral Filter took " << running_sum / (attempts - 1) << " ms.\n";
running_sum = 0.0;
for (int ctr = 0; ctr < attempts; ctr++) {
clock_t cpu_bs = clock();
cv::bilateralFilter(input, output_cpu, 9, 50, 50);
clock_t cpu_be = clock();
if (ctr > 0)
running_sum = running_sum + (double(cpu_be - cpu_bs) * 1000) / CLOCKS_PER_SEC;
}
std::cout << "CPU Accelerated Bilateral Filter took " << running_sum / (attempts - 1) << " ms.\n";
cv::imshow("(BF) Output Image - GPU", output_gpu);
cv::imwrite("gpu_bilateral_result.png", output_gpu);
cv::imshow("(BF) Output Image - CPU", output_cpu);
cv::imwrite("cpu_bilateral_result.png", output_cpu);
cv::waitKey();
return 0;
}
|
bbdad9c26625719db4d4a484cde16215416e5796.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// A shared memory matrix multiplication program
#include "stdio.h"
#include "stdlib.h"
// SIZE must be evenly divisible by TILE_WIDTH in this program
#define SIZE 512
#define TILE_WIDTH 16
// kernels that are called by another kernel use the __device__ identifier
__device__ float * GetSubMatrix(float * large_matrix, int row, int col)
{
// this returns the address of the first element in a sub-matrix
// when using this sub-matrix you have to jump ahead SIZE elements per row
float * subMatrix = &large_matrix[ row * SIZE * TILE_WIDTH + col * TILE_WIDTH ];
return subMatrix;
}
// matrix multiplication kernel definition
__global__ void MatrixMulKernel(float * A,float * B,float * C)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
// Here we call another kernel from within a kernel.
// Note that we do not need to pass any grid or block
// information between the <<< >>> symbols.
float * Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes on element of Csub
// by accumulating results into Csub
float Cvalue = 0.0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Iterate through n sub_matrices, where n depends on the size of the tile and matrix
int sub_matrices_per_row = gridDim.x;
int i;
for ( i = 0; i < sub_matrices_per_row; i++ )
{
// Get sub-matrices
// Block threads will work on these sub-matrices
float * Asub = GetSubMatrix(A, blockRow, i);
float * Bsub = GetSubMatrix(B, i, blockCol);
// Shared memory used to store Asub and Bsub
__shared__ float As[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bs[TILE_WIDTH][TILE_WIDTH];
// Load Asub and Bsub from device memory into shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = Asub[ row*SIZE + col ];
Bs[row][col] = Bsub[ row*SIZE + col ];
// Sync threads to make sure sub-matrices are completely loaded into shared memory
// Remember, this only syncs threads within a block
__syncthreads();
int j;
// Multiply Asub and Bsub together, using fast shared memory for data access
for ( j = 0; j < TILE_WIDTH ; j++ )
Cvalue += As[row][j] * Bs[j][col];
// Sync threads to ensure the preceding computation is done before loading
// two new sub-matrices of A and B in the next iteration
__syncthreads();
}
Csub[ row*SIZE + col ] = Cvalue;
}
int main(int argc, char ** argv)
{
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float h_A[SIZE*SIZE],h_B[SIZE*SIZE],h_C[SIZE*SIZE];
float * d_A, * d_B, * d_C;
// initialize host matrices with arbitrary data
int i;
for (i=0;i<SIZE*SIZE;i++) {
h_A[i] = (float)i;
h_B[i] = (float)SIZE * (float)SIZE - (float)i - 1.00;
h_C[i] = 0.0;
}
// allocate space on device
size_t size = SIZE*SIZE*sizeof(float);
hipMalloc(&d_A,size);
hipMalloc(&d_B,size);
hipMalloc(&d_C,size);
//copy data to device
hipMemcpy(d_A,h_A,size,hipMemcpyHostToDevice);
hipMemcpy(d_B,h_B,size,hipMemcpyHostToDevice);
hipMemcpy(d_C,h_C,size,hipMemcpyHostToDevice);
dim3 threadsPerBlock(TILE_WIDTH,TILE_WIDTH); // 2d block
int blocks = ( SIZE + TILE_WIDTH - 1 ) / TILE_WIDTH;
dim3 blocksPerGrid(blocks,blocks); // 2d grid
hipEventRecord(start);
// invoke the kernel here
hipLaunchKernelGGL(( MatrixMulKernel), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_A,d_B,d_C);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("kernel time (ms) : %7.5f\n",milliseconds);
// copy results back to host
hipMemcpy(h_C,d_C,size,hipMemcpyDeviceToHost);
// output results
for (i=0;i<SIZE*SIZE;i++) {
printf("i: %d h_C[i]: %f\n",i,h_C[i]);
}
// Free up device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
} | bbdad9c26625719db4d4a484cde16215416e5796.cu | // A shared memory matrix multiplication program
#include "stdio.h"
#include "stdlib.h"
// SIZE must be evenly divisible by TILE_WIDTH in this program
#define SIZE 512
#define TILE_WIDTH 16
// kernels that are called by another kernel use the __device__ identifier
__device__ float * GetSubMatrix(float * large_matrix, int row, int col)
{
// this returns the address of the first element in a sub-matrix
// when using this sub-matrix you have to jump ahead SIZE elements per row
float * subMatrix = &large_matrix[ row * SIZE * TILE_WIDTH + col * TILE_WIDTH ];
return subMatrix;
}
// matrix multiplication kernel definition
__global__ void MatrixMulKernel(float * A,float * B,float * C)
{
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
// Here we call another kernel from within a kernel.
// Note that we do not need to pass any grid or block
// information between the <<< >>> symbols.
float * Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes on element of Csub
// by accumulating results into Csub
float Cvalue = 0.0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Iterate through n sub_matrices, where n depends on the size of the tile and matrix
int sub_matrices_per_row = gridDim.x;
int i;
for ( i = 0; i < sub_matrices_per_row; i++ )
{
// Get sub-matrices
// Block threads will work on these sub-matrices
float * Asub = GetSubMatrix(A, blockRow, i);
float * Bsub = GetSubMatrix(B, i, blockCol);
// Shared memory used to store Asub and Bsub
__shared__ float As[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bs[TILE_WIDTH][TILE_WIDTH];
// Load Asub and Bsub from device memory into shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = Asub[ row*SIZE + col ];
Bs[row][col] = Bsub[ row*SIZE + col ];
// Sync threads to make sure sub-matrices are completely loaded into shared memory
// Remember, this only syncs threads within a block
__syncthreads();
int j;
// Multiply Asub and Bsub together, using fast shared memory for data access
for ( j = 0; j < TILE_WIDTH ; j++ )
Cvalue += As[row][j] * Bs[j][col];
// Sync threads to ensure the preceding computation is done before loading
// two new sub-matrices of A and B in the next iteration
__syncthreads();
}
Csub[ row*SIZE + col ] = Cvalue;
}
int main(int argc, char ** argv)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float h_A[SIZE*SIZE],h_B[SIZE*SIZE],h_C[SIZE*SIZE];
float * d_A, * d_B, * d_C;
// initialize host matrices with arbitrary data
int i;
for (i=0;i<SIZE*SIZE;i++) {
h_A[i] = (float)i;
h_B[i] = (float)SIZE * (float)SIZE - (float)i - 1.00;
h_C[i] = 0.0;
}
// allocate space on device
size_t size = SIZE*SIZE*sizeof(float);
cudaMalloc(&d_A,size);
cudaMalloc(&d_B,size);
cudaMalloc(&d_C,size);
//copy data to device
cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_C,h_C,size,cudaMemcpyHostToDevice);
dim3 threadsPerBlock(TILE_WIDTH,TILE_WIDTH); // 2d block
int blocks = ( SIZE + TILE_WIDTH - 1 ) / TILE_WIDTH;
dim3 blocksPerGrid(blocks,blocks); // 2d grid
cudaEventRecord(start);
// invoke the kernel here
MatrixMulKernel<<< blocksPerGrid, threadsPerBlock >>>(d_A,d_B,d_C);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("kernel time (ms) : %7.5f\n",milliseconds);
// copy results back to host
cudaMemcpy(h_C,d_C,size,cudaMemcpyDeviceToHost);
// output results
for (i=0;i<SIZE*SIZE;i++) {
printf("i: %d h_C[i]: %f\n",i,h_C[i]);
}
// Free up device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
} |
f973a2d9cf6b585f5d168ede25370354508c2095.hip | // !!! This is a file automatically generated by hipify!!!
/*%****************************************************************************80
% Code:
% ncclAllGather.cu
%
% Purpose:
% Implements a simple collective operation ALLGATHER using NCCL (ncclAllGather).
%
% Modified:
% Jan 09 2019 10:57
%
% Author:
% Murilo Boratto [muriloboratto 'at' gmail.com]
%
% How to Compile:
% nvcc ncclAllGather.cu -o ncclAllGather -lnccl
%
% How to Execute:
% ./ncclAllGather
%
% Comments:
%
% 1) For ncclAllGather, in place operations are done when the per-rank pointer is located at the rank offset
% of the global buffer. More precisely, these calls are considered in place:
%
% ncclAllGather(data+rank*sendcount, data, sendcount, datatype, comm, stream);
%
% 2) Simple Testbed with size problem = 4 on environment with 4 GPUs.
%
%****************************************************************************80*/
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "nccl.h"
__global__ void Dev_print(float *x) {
int i = threadIdx.x;
printf("%1.2f\t", x[i]);
}/*Dev_print*/
void print_vector(float *in, int n){
for(int i=0; i < n; i++)
if(in[i])
printf("%1.2f\t", in[i]);
}/*print_vector*/
int main(int argc, char* argv[]){
/*Variables*/
int size = 4;
int nGPUs = 4;
int sendcount = 1;
int DeviceList[4] = {0, 1, 2, 3}; /* (GPUs Id) Testbed on environment with 4 GPUs*/
/*Initializing NCCL with Multiples Devices per Thread*/
ncclComm_t* comms = (ncclComm_t*) malloc(sizeof(ncclComm_t) * nGPUs);
hipStream_t* s = (hipStream_t*)malloc(sizeof(hipStream_t)* nGPUs);
ncclCommInitAll(comms, nGPUs, DeviceList);
/*Allocating and initializing device buffers*/
float** sendbuff = (float**) malloc(nGPUs * sizeof(float*));
float** recvbuff = (float**) malloc(nGPUs * sizeof(float*));
/*Host vectors*/
float host_x0[4] = { 10, 0, 0, 0};
float host_x1[4] = { 0, 20, 0, 0};
float host_x2[4] = { 0, 0, 30, 0};
float host_x3[4] = { 0, 0, 0, 40};
print_vector(host_x0, size);
print_vector(host_x1, size);
print_vector(host_x2, size);
print_vector(host_x3, size);
for (int i = 0; i < nGPUs; ++i) {
hipSetDevice(i);
hipMalloc(&sendbuff[i], size * sizeof(float));
hipMalloc(&recvbuff[i], size * sizeof(float));
switch(i) { /*Copy from host to devices*/
case 0 : hipMemcpy(sendbuff[i] , host_x0, size * sizeof(float), hipMemcpyHostToDevice); break;
case 1 : hipMemcpy(sendbuff[i] , host_x1, size * sizeof(float), hipMemcpyHostToDevice); break;
case 2 : hipMemcpy(sendbuff[i] , host_x2, size * sizeof(float), hipMemcpyHostToDevice); break;
case 3 : hipMemcpy(sendbuff[i] , host_x3, size * sizeof(float), hipMemcpyHostToDevice); break;
}
hipStreamCreate(s+i);
}
ncclGroupStart();
for(int g = 0; g < nGPUs; g++) {
hipSetDevice(g);
ncclAllGather(sendbuff[g] + g, recvbuff[g], sendcount, ncclFloat, comms[g], s[g]); /*All Gathering the data on GPUs*/
}
ncclGroupEnd();
for(int g = 0; g < nGPUs; g++) {
hipSetDevice(g);
printf("\nThis is device %d\n", g);
hipLaunchKernelGGL(( Dev_print) , dim3(1), dim3(size) , 0, 0, recvbuff[g]); /*Call the CUDA Kernel: Print vector on GPUs*/
hipDeviceSynchronize();
}
printf("\n");
for (int i = 0; i < nGPUs; ++i) { /*Synchronizing CUDA Streams*/
hipSetDevice(i);
hipStreamSynchronize(s[i]);
}
for (int i = 0; i < nGPUs; ++i) { /*Destroy CUDA Streams*/
hipSetDevice(i);
hipFree(sendbuff[i]);
hipFree(recvbuff[i]);
}
for(int i = 0; i < nGPUs; ++i) /*Finalizing NCCL*/
ncclCommDestroy(comms[i]);
/*Freeing memory*/
hipFree(sendbuff);
hipFree(recvbuff);
return 0;
}/*main*/
| f973a2d9cf6b585f5d168ede25370354508c2095.cu | /*%****************************************************************************80
% Code:
% ncclAllGather.cu
%
% Purpose:
% Implements a simple collective operation ALLGATHER using NCCL (ncclAllGather).
%
% Modified:
% Jan 09 2019 10:57
%
% Author:
% Murilo Boratto [muriloboratto 'at' gmail.com]
%
% How to Compile:
% nvcc ncclAllGather.cu -o ncclAllGather -lnccl
%
% How to Execute:
% ./ncclAllGather
%
% Comments:
%
% 1) For ncclAllGather, in place operations are done when the per-rank pointer is located at the rank offset
% of the global buffer. More precisely, these calls are considered in place:
%
% ncclAllGather(data+rank*sendcount, data, sendcount, datatype, comm, stream);
%
% 2) Simple Testbed with size problem = 4 on environment with 4 GPUs.
%
%****************************************************************************80*/
#include <stdio.h>
#include "cuda_runtime.h"
#include "nccl.h"
__global__ void Dev_print(float *x) {
int i = threadIdx.x;
printf("%1.2f\t", x[i]);
}/*Dev_print*/
void print_vector(float *in, int n){
for(int i=0; i < n; i++)
if(in[i])
printf("%1.2f\t", in[i]);
}/*print_vector*/
int main(int argc, char* argv[]){
/*Variables*/
int size = 4;
int nGPUs = 4;
int sendcount = 1;
int DeviceList[4] = {0, 1, 2, 3}; /* (GPUs Id) Testbed on environment with 4 GPUs*/
/*Initializing NCCL with Multiples Devices per Thread*/
ncclComm_t* comms = (ncclComm_t*) malloc(sizeof(ncclComm_t) * nGPUs);
cudaStream_t* s = (cudaStream_t*)malloc(sizeof(cudaStream_t)* nGPUs);
ncclCommInitAll(comms, nGPUs, DeviceList);
/*Allocating and initializing device buffers*/
float** sendbuff = (float**) malloc(nGPUs * sizeof(float*));
float** recvbuff = (float**) malloc(nGPUs * sizeof(float*));
/*Host vectors*/
float host_x0[4] = { 10, 0, 0, 0};
float host_x1[4] = { 0, 20, 0, 0};
float host_x2[4] = { 0, 0, 30, 0};
float host_x3[4] = { 0, 0, 0, 40};
print_vector(host_x0, size);
print_vector(host_x1, size);
print_vector(host_x2, size);
print_vector(host_x3, size);
for (int i = 0; i < nGPUs; ++i) {
cudaSetDevice(i);
cudaMalloc(&sendbuff[i], size * sizeof(float));
cudaMalloc(&recvbuff[i], size * sizeof(float));
switch(i) { /*Copy from host to devices*/
case 0 : cudaMemcpy(sendbuff[i] , host_x0, size * sizeof(float), cudaMemcpyHostToDevice); break;
case 1 : cudaMemcpy(sendbuff[i] , host_x1, size * sizeof(float), cudaMemcpyHostToDevice); break;
case 2 : cudaMemcpy(sendbuff[i] , host_x2, size * sizeof(float), cudaMemcpyHostToDevice); break;
case 3 : cudaMemcpy(sendbuff[i] , host_x3, size * sizeof(float), cudaMemcpyHostToDevice); break;
}
cudaStreamCreate(s+i);
}
ncclGroupStart();
for(int g = 0; g < nGPUs; g++) {
cudaSetDevice(g);
ncclAllGather(sendbuff[g] + g, recvbuff[g], sendcount, ncclFloat, comms[g], s[g]); /*All Gathering the data on GPUs*/
}
ncclGroupEnd();
for(int g = 0; g < nGPUs; g++) {
cudaSetDevice(g);
printf("\nThis is device %d\n", g);
Dev_print <<< 1, size >>> (recvbuff[g]); /*Call the CUDA Kernel: Print vector on GPUs*/
cudaDeviceSynchronize();
}
printf("\n");
for (int i = 0; i < nGPUs; ++i) { /*Synchronizing CUDA Streams*/
cudaSetDevice(i);
cudaStreamSynchronize(s[i]);
}
for (int i = 0; i < nGPUs; ++i) { /*Destroy CUDA Streams*/
cudaSetDevice(i);
cudaFree(sendbuff[i]);
cudaFree(recvbuff[i]);
}
for(int i = 0; i < nGPUs; ++i) /*Finalizing NCCL*/
ncclCommDestroy(comms[i]);
/*Freeing memory*/
cudaFree(sendbuff);
cudaFree(recvbuff);
return 0;
}/*main*/
|
7de82544a16c3cb10bc57bc43e2604ccee271cc8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include "Raytracing.h"
#include "Sphere.h"
#include "AleaTools.h"
using std::cout;
using std::endl;
#define USE_SM false
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void raytracing(uchar4* ptrDevPixels, Sphere* ptrDevSphere, uint nbSphere, uint w, uint h, float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Raytracing::Raytracing(const Grid& grid, uint w, uint h, float dt, uint nbSphere) : Animable_I<uchar4>(grid, w, h, "RayTracing_Cuda_RGBA_uchar4")
{
this->nbSphere = nbSphere;
this->tabSphere = new Sphere[nbSphere];
// Dimension
this->w = w;
this->h = h;
this->dt = dt;
this->t = 0;
AleaTools aleaTools = AleaTools();
float bord = 200;
for(int i=0;i<nbSphere;i++)
{
float rayon = aleaTools.uniformeAB(20, this->w / 10);
float x = aleaTools.uniformeAB(bord, this->h - bord);
float y = aleaTools.uniformeAB(bord, this->w - bord);
float z = aleaTools.uniformeAB(10, 2 * this->w);
float hue = aleaTools.uniforme01();
tabSphere[i] = Sphere(x,y,z,rayon,hue);
}
}
Raytracing::~Raytracing()
{
delete[] tabSphere;
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Raytracing::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
Sphere* ptrDevSphere=NULL;
size_t size = this->nbSphere*sizeof(Sphere);
HANDLE_ERROR(hipMalloc(&ptrDevSphere,size));
HANDLE_ERROR(hipMemcpy(ptrDevSphere, this->tabSphere,size,hipMemcpyHostToDevice));
#if USE_SM
hipLaunchKernelGGL(( raytracing), dim3(dg), dim3(db), size, 0, ptrDevPixels, ptrDevSphere, this->nbSphere, w, h, this->t);
#else
hipLaunchKernelGGL(( raytracing), dim3(dg), dim3(db), 0, 0, ptrDevPixels,ptrDevSphere,this->nbSphere, w, h, this->t);
#endif
Device::lastCudaError("raytracing rgba uchar4 (after)"); // facultatif, for debug only, remove for release
HANDLE_ERROR(hipFree(ptrDevSphere));
}
/**
* Override
* Call periodicly by the API
*/
void Raytracing::animationStep()
{
this->t += this->dt;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 7de82544a16c3cb10bc57bc43e2604ccee271cc8.cu | #include <iostream>
#include <assert.h>
#include "Device.h"
#include "Raytracing.h"
#include "Sphere.h"
#include "AleaTools.h"
using std::cout;
using std::endl;
#define USE_SM false
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void raytracing(uchar4* ptrDevPixels, Sphere* ptrDevSphere, uint nbSphere, uint w, uint h, float t);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\
|* Constructeur *|
\*-------------------------*/
Raytracing::Raytracing(const Grid& grid, uint w, uint h, float dt, uint nbSphere) : Animable_I<uchar4>(grid, w, h, "RayTracing_Cuda_RGBA_uchar4")
{
this->nbSphere = nbSphere;
this->tabSphere = new Sphere[nbSphere];
// Dimension
this->w = w;
this->h = h;
this->dt = dt;
this->t = 0;
AleaTools aleaTools = AleaTools();
float bord = 200;
for(int i=0;i<nbSphere;i++)
{
float rayon = aleaTools.uniformeAB(20, this->w / 10);
float x = aleaTools.uniformeAB(bord, this->h - bord);
float y = aleaTools.uniformeAB(bord, this->w - bord);
float z = aleaTools.uniformeAB(10, 2 * this->w);
float hue = aleaTools.uniforme01();
tabSphere[i] = Sphere(x,y,z,rayon,hue);
}
}
Raytracing::~Raytracing()
{
delete[] tabSphere;
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void Raytracing::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
Sphere* ptrDevSphere=NULL;
size_t size = this->nbSphere*sizeof(Sphere);
HANDLE_ERROR(cudaMalloc(&ptrDevSphere,size));
HANDLE_ERROR(cudaMemcpy(ptrDevSphere, this->tabSphere,size,cudaMemcpyHostToDevice));
#if USE_SM
raytracing<<<dg, db, size>>>(ptrDevPixels, ptrDevSphere, this->nbSphere, w, h, this->t);
#else
raytracing<<<dg, db>>>(ptrDevPixels,ptrDevSphere,this->nbSphere, w, h, this->t);
#endif
Device::lastCudaError("raytracing rgba uchar4 (after)"); // facultatif, for debug only, remove for release
HANDLE_ERROR(cudaFree(ptrDevSphere));
}
/**
* Override
* Call periodicly by the API
*/
void Raytracing::animationStep()
{
this->t += this->dt;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
0a8ede12505199b8975afcf5863433383f3dbf37.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <complex.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hip/hip_complex.h>
#include <Integration.hpp>
#ifndef NX
#define NX 128
#endif
#ifndef NY
#define NY 128
#endif
#ifndef NZ
#define NZ 128
#endif
#define NXYZ (NX*NY*NZ)
inline __complex__ double gaussian3D(double x, double y,double z, double sigma)
{
return cexp( -1.*(x*x + y*y + z*z)/( 2.*sigma*sigma) );
//return 1. + I*0.;
}
inline void fill_array(__complex__ double *psi, double *params)
{
#pragma omp parallel for num_threads(8)
for (int ix = 0; ix < NX; ix++)
for (int iy = 0; iy < NY; iy++)
for (int iz = 0; iz < NZ; iz++)
{
psi[iz + NZ*(iy + NY*ix)] = gaussian3D((double) ix-NX/2,(double) iy-NY/2,(double) iz-NZ/2,params[0]) + I*0.;
}
}
template<typename T>
inline void save_file(const char* filename, void* data, size_t size)
{
FILE* file = fopen(filename,"wb");
fwrite(data,(size_t) size * sizeof(T),1,file);
fclose(file);
}
int main(int argc, char* argv[])
{
__complex__ double *h_psi; // used as a buffer on host side, be careful of 'magic' with pointer casting
hipDoubleComplex *d_psi;
cuErrCheck( hipHostMalloc((void**) &h_psi,(size_t) NXYZ * sizeof(__complex__ double)) );
cuErrCheck( hipMalloc((void**) &d_psi,(size_t) NXYZ * sizeof(hipDoubleComplex)) );
// initialize Integration module
Integration<NX,NY,NZ>* integ = new Integration<NX,NY,NZ>();
// get analytical result
double sigma = 5.0; // a_ho, charge
fill_array(h_psi,&sigma);
save_file<__complex__ double>("orginal.bin",(void*) h_psi, NX*NY*NZ );
cuErrCheck( hipMemcpy(d_psi, h_psi, (size_t) NX*NY*NZ * sizeof(hipDoubleComplex), hipMemcpyHostToDevice) );
double norm = 0.0;
norm = integ->normalize_cublas(d_psi);
printf("psi(0,0): %lf\n",creal(h_psi[NZ/2 + NZ*(NY/2 + NY*NX/2)]));
printf("norm^2= %lf\t\t(sigma*sigma*pi)^(3/2): %lf\n",norm*norm, pow( sigma*sigma*M_PI, 1.5 ) ); // norm of square of gaussian!
// for sigma = 10 -> 15749.609945722419
printf("%lf\n",integ->get_norm_cublas(d_psi));
cuErrCheck( hipMemcpy(h_psi, d_psi, (size_t) NX*NY*NZ * sizeof(hipDoubleComplex), hipMemcpyDeviceToHost) );
printf("psi(0,0): %lf\t%lf\n",creal(h_psi[NZ/2 + NZ*(NY/2 + NY*NX/2)]), 1./norm);
delete integ;
hipFree(d_psi);
hipHostFree(h_psi);
return EXIT_SUCCESS;
} | 0a8ede12505199b8975afcf5863433383f3dbf37.cu | #include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <thrust/functional.h>
#include <thrust/iterator/counting_iterator.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <complex.h>
#include <stdint.h>
#include <cuda.h>
#include <cufft.h>
#include <cuComplex.h>
#include <Integration.hpp>
#ifndef NX
#define NX 128
#endif
#ifndef NY
#define NY 128
#endif
#ifndef NZ
#define NZ 128
#endif
#define NXYZ (NX*NY*NZ)
inline __complex__ double gaussian3D(double x, double y,double z, double sigma)
{
return cexp( -1.*(x*x + y*y + z*z)/( 2.*sigma*sigma) );
//return 1. + I*0.;
}
inline void fill_array(__complex__ double *psi, double *params)
{
#pragma omp parallel for num_threads(8)
for (int ix = 0; ix < NX; ix++)
for (int iy = 0; iy < NY; iy++)
for (int iz = 0; iz < NZ; iz++)
{
psi[iz + NZ*(iy + NY*ix)] = gaussian3D((double) ix-NX/2,(double) iy-NY/2,(double) iz-NZ/2,params[0]) + I*0.;
}
}
template<typename T>
inline void save_file(const char* filename, void* data, size_t size)
{
FILE* file = fopen(filename,"wb");
fwrite(data,(size_t) size * sizeof(T),1,file);
fclose(file);
}
int main(int argc, char* argv[])
{
__complex__ double *h_psi; // used as a buffer on host side, be careful of 'magic' with pointer casting
cuDoubleComplex *d_psi;
cuErrCheck( cudaMallocHost((void**) &h_psi,(size_t) NXYZ * sizeof(__complex__ double)) );
cuErrCheck( cudaMalloc((void**) &d_psi,(size_t) NXYZ * sizeof(cuDoubleComplex)) );
// initialize Integration module
Integration<NX,NY,NZ>* integ = new Integration<NX,NY,NZ>();
// get analytical result
double sigma = 5.0; // a_ho, charge
fill_array(h_psi,&sigma);
save_file<__complex__ double>("orginal.bin",(void*) h_psi, NX*NY*NZ );
cuErrCheck( cudaMemcpy(d_psi, h_psi, (size_t) NX*NY*NZ * sizeof(cuDoubleComplex), cudaMemcpyHostToDevice) );
double norm = 0.0;
norm = integ->normalize_cublas(d_psi);
printf("psi(0,0): %lf\n",creal(h_psi[NZ/2 + NZ*(NY/2 + NY*NX/2)]));
printf("norm^2= %lf\t\t(sigma*sigma*pi)^(3/2): %lf\n",norm*norm, pow( sigma*sigma*M_PI, 1.5 ) ); // norm of square of gaussian!
// for sigma = 10 -> 15749.609945722419
printf("%lf\n",integ->get_norm_cublas(d_psi));
cuErrCheck( cudaMemcpy(h_psi, d_psi, (size_t) NX*NY*NZ * sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost) );
printf("psi(0,0): %lf\t%lf\n",creal(h_psi[NZ/2 + NZ*(NY/2 + NY*NX/2)]), 1./norm);
delete integ;
cudaFree(d_psi);
cudaFreeHost(h_psi);
return EXIT_SUCCESS;
} |
633368ff053a4531d371c325df22d159dd28182b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include "../common/common.h"
__global__ void convertToGray(uchar3* color, unsigned char* gray) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
gray[idx] = (unsigned char)(0.299f*(float)color[idx].x
+ 0.587f * (float)color[idx].y
+ 0.114f * (float)color[idx].z);
}
int main(int argc, char* const argv[]) {
// read image
cv::Mat img_orig;
img_orig = cv::imread("../../data/cat.jpg", 1);
// check data
if (!img_orig.data)
{
printf("No image data \n");
return -1;
}
int width = img_orig.cols;
int height = img_orig.rows;
cv::Mat host_img_out(height, width, CV_8UC1);
cv::Mat cvcuda_img_out(height, width, CV_8UC1);
cv::Mat gpu_img_out(height, width, CV_8UC1);
// CPU
// execution time mesuring in CPU
double cpu_start, cpu_end;
cpu_start = seconds();
cv::cvtColor(img_orig, host_img_out, cv::COLOR_BGR2GRAY);
cv::imwrite("./images/q2_grayscale_cpu.jpg", host_img_out);
cpu_end = seconds();
std::cout << "CPU time: " << cpu_end - cpu_start << std::endl;
// OpenCV CUDA
cv::cuda::GpuMat dst, src;
src.upload(img_orig);
double cvcuda_start, cvcuda_end;
cvcuda_start = seconds();
cv::cuda::cvtColor(src, dst, cv::COLOR_BGR2GRAY);
cvcuda_end = seconds();
std::cout << "OpenCV CUDA time: " << cvcuda_end - cvcuda_start << std::endl;
dst.download(cvcuda_img_out);
// save kernel result
cv::imwrite("./images/q2_grayscale_cvcuda.jpg", cvcuda_img_out);
// GPU
// host array
uchar3* host_img_color = new uchar3 [width * height];
unsigned char* host_img_gray = new unsigned char [width * height];
// host image to 1 array
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
host_img_color[x + y * width]
= make_uchar3(img_orig.at<cv::Vec3b>(y, x)[2], img_orig.at<cv::Vec3b>(y, x)[1], img_orig.at<cv::Vec3b>(y, x)[0]);
}
}
// malloc device global memory
const int n_bytes = width * height;
uchar3 *device_img_color;
unsigned char* device_img_gray;
CHECK(hipMalloc((void **)&device_img_color, sizeof(uchar3) * n_bytes));
CHECK(hipMalloc((void **)&device_img_gray, sizeof(unsigned char) * n_bytes));
// transfer data from host to device
CHECK(hipMemcpy(device_img_color, host_img_color, sizeof(uchar3) * n_bytes, hipMemcpyHostToDevice));
dim3 block(32);
dim3 grid((width * height + block.x - 1) / block.x);
// execution time mesuring in GPU
double gpu_start, gpu_end;
gpu_start = seconds();
hipLaunchKernelGGL(( convertToGray), dim3(grid), dim3(block), 0, 0, device_img_color, device_img_gray);
CHECK(hipDeviceSynchronize());
gpu_end = seconds();
std::cout << "GPU time: " << gpu_end - gpu_start << std::endl;
// copy kernel result back to host side
CHECK(hipMemcpy(host_img_gray, device_img_gray, sizeof(unsigned char) * n_bytes, hipMemcpyDeviceToHost));
// Results
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
gpu_img_out.at<unsigned char>(y, x) = host_img_gray[x + y * width];
}
}
// save kernel result
cv::imwrite("./images/q2_grayscale_gpu.jpg", gpu_img_out);
// free device global memory
CHECK(hipFree(device_img_color));
CHECK(hipFree(device_img_gray));
return 0;
} | 633368ff053a4531d371c325df22d159dd28182b.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include "../common/common.h"
__global__ void convertToGray(uchar3* color, unsigned char* gray) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
gray[idx] = (unsigned char)(0.299f*(float)color[idx].x
+ 0.587f * (float)color[idx].y
+ 0.114f * (float)color[idx].z);
}
int main(int argc, char* const argv[]) {
// read image
cv::Mat img_orig;
img_orig = cv::imread("../../data/cat.jpg", 1);
// check data
if (!img_orig.data)
{
printf("No image data \n");
return -1;
}
int width = img_orig.cols;
int height = img_orig.rows;
cv::Mat host_img_out(height, width, CV_8UC1);
cv::Mat cvcuda_img_out(height, width, CV_8UC1);
cv::Mat gpu_img_out(height, width, CV_8UC1);
// CPU
// execution time mesuring in CPU
double cpu_start, cpu_end;
cpu_start = seconds();
cv::cvtColor(img_orig, host_img_out, cv::COLOR_BGR2GRAY);
cv::imwrite("./images/q2_grayscale_cpu.jpg", host_img_out);
cpu_end = seconds();
std::cout << "CPU time: " << cpu_end - cpu_start << std::endl;
// OpenCV CUDA
cv::cuda::GpuMat dst, src;
src.upload(img_orig);
double cvcuda_start, cvcuda_end;
cvcuda_start = seconds();
cv::cuda::cvtColor(src, dst, cv::COLOR_BGR2GRAY);
cvcuda_end = seconds();
std::cout << "OpenCV CUDA time: " << cvcuda_end - cvcuda_start << std::endl;
dst.download(cvcuda_img_out);
// save kernel result
cv::imwrite("./images/q2_grayscale_cvcuda.jpg", cvcuda_img_out);
// GPU
// host array
uchar3* host_img_color = new uchar3 [width * height];
unsigned char* host_img_gray = new unsigned char [width * height];
// host image to 1 array
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
host_img_color[x + y * width]
= make_uchar3(img_orig.at<cv::Vec3b>(y, x)[2], img_orig.at<cv::Vec3b>(y, x)[1], img_orig.at<cv::Vec3b>(y, x)[0]);
}
}
// malloc device global memory
const int n_bytes = width * height;
uchar3 *device_img_color;
unsigned char* device_img_gray;
CHECK(cudaMalloc((void **)&device_img_color, sizeof(uchar3) * n_bytes));
CHECK(cudaMalloc((void **)&device_img_gray, sizeof(unsigned char) * n_bytes));
// transfer data from host to device
CHECK(cudaMemcpy(device_img_color, host_img_color, sizeof(uchar3) * n_bytes, cudaMemcpyHostToDevice));
dim3 block(32);
dim3 grid((width * height + block.x - 1) / block.x);
// execution time mesuring in GPU
double gpu_start, gpu_end;
gpu_start = seconds();
convertToGray<<<grid, block>>>(device_img_color, device_img_gray);
CHECK(cudaDeviceSynchronize());
gpu_end = seconds();
std::cout << "GPU time: " << gpu_end - gpu_start << std::endl;
// copy kernel result back to host side
CHECK(cudaMemcpy(host_img_gray, device_img_gray, sizeof(unsigned char) * n_bytes, cudaMemcpyDeviceToHost));
// Results
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
gpu_img_out.at<unsigned char>(y, x) = host_img_gray[x + y * width];
}
}
// save kernel result
cv::imwrite("./images/q2_grayscale_gpu.jpg", gpu_img_out);
// free device global memory
CHECK(cudaFree(device_img_color));
CHECK(cudaFree(device_img_gray));
return 0;
} |
87328a6fdb8da7c857fda14c99b22fc1719bd0cb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp,NULL);
return ( (double)tp.tv_sec + (double)tp.tv_usec * 1e-6 );
}
#define CHECK(call){ \
const hipError_t error = call; \
if( error != hipSuccess ){ \
printf("Error: %s:%d\n", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
void checkResult(float* hostRef, float* gpuRef, const int N){
double epsilon = 1.0E-8;
int match = 1;
for( int idx = 0; idx != N; ++idx ){
if(abs(gpuRef[idx] - hostRef[idx]) > epsilon){
match = 0;
printf("Arrays don't match.\n");
printf("gpu: %5.2f host: %5.2f at current %d\n", gpuRef[idx], hostRef[idx], idx);
break;
}
}
if(match){ printf("Arrays match.\n"); }
return;
}
void initializeData(float* ptr, const int size){
time_t t;
srand( (unsigned) time(&t) );
for(int idx = 0; idx != size; ++idx ){
ptr[idx] = (float)(rand() & 0xFF) / 10.0f;
}
}
void sumArraysOnHost(float* A, float* B, float* C, const int N){
for(int idx = 0; idx != N; ++idx)
C[idx] = A[idx] + B[idx];
}
__global__ void sumArraysOnDevice(float* A, float* B, float* C){
int idx = threadIdx.x + blockIdx.x * blockDim.x; // assuming 1D ?
C[idx] = A[idx] + B[idx];
}
int main(int argc, char** argv){
if(argc != 2){
printf("Invalid arguments\n");
exit(2);
}
printf("Starting...\n");
double iStart, iElapse;
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("using device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
int nElem = 1<<24;
printf("Vector size: %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
float* h_A = (float *) malloc(nBytes);
float* h_B = (float *) malloc(nBytes);
float* hostRef = (float *) malloc(nBytes);
float* gpuRef = (float *) malloc(nBytes);
initializeData(h_A, nElem);
initializeData(h_B, nElem);
memset(hostRef, 0, nElem);
memset(gpuRef, 0, nElem);
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((float**) &d_A, nBytes));
CHECK(hipMalloc((float**) &d_B, nBytes));
CHECK(hipMalloc((float**) &d_C, nBytes));
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
dim3 block(atoi(argv[1]));
dim3 grid((nElem + block.x -1) / block.x);
iStart = cpuSecond();
hipLaunchKernelGGL(( sumArraysOnDevice), dim3(grid), dim3(block) , 0, 0, d_A, d_B, d_C);
CHECK(hipDeviceSynchronize());
iElapse = cpuSecond() - iStart;
printf("sumArraysOnDevice() <<< %d, %d >>> time: %5.6f sec\n", grid.x, block.x, iElapse);
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost ));
iStart = cpuSecond();
sumArraysOnHost(h_A, h_B, hostRef, nElem);
iElapse = cpuSecond() - iStart;
printf("sumArraysOnHost(): time: %5.6f sec\n", iElapse);
checkResult(hostRef, gpuRef, nElem);
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
return(0);
}
| 87328a6fdb8da7c857fda14c99b22fc1719bd0cb.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <cuda_runtime.h>
#include <sys/time.h>
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp,NULL);
return ( (double)tp.tv_sec + (double)tp.tv_usec * 1e-6 );
}
#define CHECK(call){ \
const cudaError_t error = call; \
if( error != cudaSuccess ){ \
printf("Error: %s:%d\n", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
void checkResult(float* hostRef, float* gpuRef, const int N){
double epsilon = 1.0E-8;
int match = 1;
for( int idx = 0; idx != N; ++idx ){
if(abs(gpuRef[idx] - hostRef[idx]) > epsilon){
match = 0;
printf("Arrays don't match.\n");
printf("gpu: %5.2f host: %5.2f at current %d\n", gpuRef[idx], hostRef[idx], idx);
break;
}
}
if(match){ printf("Arrays match.\n"); }
return;
}
void initializeData(float* ptr, const int size){
time_t t;
srand( (unsigned) time(&t) );
for(int idx = 0; idx != size; ++idx ){
ptr[idx] = (float)(rand() & 0xFF) / 10.0f;
}
}
void sumArraysOnHost(float* A, float* B, float* C, const int N){
for(int idx = 0; idx != N; ++idx)
C[idx] = A[idx] + B[idx];
}
__global__ void sumArraysOnDevice(float* A, float* B, float* C){
int idx = threadIdx.x + blockIdx.x * blockDim.x; // assuming 1D ?
C[idx] = A[idx] + B[idx];
}
int main(int argc, char** argv){
if(argc != 2){
printf("Invalid arguments\n");
exit(2);
}
printf("Starting...\n");
double iStart, iElapse;
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("using device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int nElem = 1<<24;
printf("Vector size: %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
float* h_A = (float *) malloc(nBytes);
float* h_B = (float *) malloc(nBytes);
float* hostRef = (float *) malloc(nBytes);
float* gpuRef = (float *) malloc(nBytes);
initializeData(h_A, nElem);
initializeData(h_B, nElem);
memset(hostRef, 0, nElem);
memset(gpuRef, 0, nElem);
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((float**) &d_A, nBytes));
CHECK(cudaMalloc((float**) &d_B, nBytes));
CHECK(cudaMalloc((float**) &d_C, nBytes));
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
dim3 block(atoi(argv[1]));
dim3 grid((nElem + block.x -1) / block.x);
iStart = cpuSecond();
sumArraysOnDevice<<< grid, block >>>(d_A, d_B, d_C);
CHECK(cudaDeviceSynchronize());
iElapse = cpuSecond() - iStart;
printf("sumArraysOnDevice() <<< %d, %d >>> time: %5.6f sec\n", grid.x, block.x, iElapse);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost ));
iStart = cpuSecond();
sumArraysOnHost(h_A, h_B, hostRef, nElem);
iElapse = cpuSecond() - iStart;
printf("sumArraysOnHost(): time: %5.6f sec\n", iElapse);
checkResult(hostRef, gpuRef, nElem);
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
return(0);
}
|
14d2e14de304c44dfacc342b0ac33d51498d1601.hip | // !!! This is a file automatically generated by hipify!!!
#include "include/gpu/gpu_cublas.h"
#ifdef USE_GPU
#ifdef USE_CUBLAS
template<typename T>
void GpuCublas<T>::gemm(bool transA, bool transB, int m, int n, int k, \
T* a, int lda, T* b, int ldb, T* c, int ldc, T alpha, T beta){
hipblasHandle_t cublasHandle;
cublasErrCheck(hipblasCreate(&cublasHandle));
// warm up
for(int i = 0; i < Base::_warm_up; ++i){
#ifdef USE_TENSOR_CORE
cublasErrCheck(hipblasGemmEx(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N,
m, n, k, &alpha,
a, HIP_R_32F, lda,
b, HIP_R_32F, ldb, &beta,
c, HIP_R_32F, ldc,
HIP_R_32F, HIPBLAS_GEMM_DEFAULT));
#else
cublasErrCheck(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N,
m, n, k, &alpha,
a, lda, b, ldb, &beta, c, ldc));
#endif
}
Timer<NV> t;
for(int i = 0; i < Base::_iter_num; ++i){
t.start();
#ifdef USE_TENSOR_CORE
cublasErrCheck(hipblasGemmEx(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N,
m, n, k, &alpha,
a, HIP_R_32F, lda,
b, HIP_R_32F, ldb, &beta,
c, HIP_R_32F, ldc,
HIP_R_32F, HIPBLAS_GEMM_DEFAULT));
#else
cublasErrCheck(hipblasSgemm(cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N,
m, n, k, &alpha,
a, lda, b, ldb, &beta, c, ldc));
#endif
t.end();
}
Base::_elapsed = t.getAverageTimeMs();
}
template<typename T>
void GpuCublas<T>::operator()(bool transA, bool transB, int m, int n, int k, \
T* a, int lda, T* b, int ldb, T* c, int ldc, T alpha, T beta){
T* d_a = nullptr;
T* d_b = nullptr;
T* d_c = nullptr;
int size_a = lda * k;
int size_b = ldb * n;
int size_c = ldc * n;
cudaErrCheck(hipMalloc((void**)&d_a, size_a * sizeof(T)));
cudaErrCheck(hipMalloc((void**)&d_b, size_b * sizeof(T)));
cudaErrCheck(hipMalloc((void**)&d_c, size_c * sizeof(T)));
cudaErrCheck(hipMemcpy(d_a, a, size_a * sizeof(T), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(d_b, b, size_b * sizeof(T), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(d_c, c, size_c * sizeof(T), hipMemcpyHostToDevice));
gemm(transA, transB, m, n, k, d_a, lda, d_b, ldb, d_c, ldc, alpha, beta);
cudaErrCheck(hipMemcpy(c, d_c, size_c * sizeof(T), hipMemcpyDeviceToHost));
cudaErrCheck(hipFree(d_a));
cudaErrCheck(hipFree(d_b));
cudaErrCheck(hipFree(d_c));
}
// template instantiation declarations
template class GpuCublas<float>;
// register CPU_RAW to GEMM Repo;
REGISTER_GEMM(GPU_CUBLAS, GpuCublas);
#endif //USE_CUBLASE
#endif //USE_GPU | 14d2e14de304c44dfacc342b0ac33d51498d1601.cu | #include "include/gpu/gpu_cublas.h"
#ifdef USE_GPU
#ifdef USE_CUBLAS
template<typename T>
void GpuCublas<T>::gemm(bool transA, bool transB, int m, int n, int k, \
T* a, int lda, T* b, int ldb, T* c, int ldc, T alpha, T beta){
cublasHandle_t cublasHandle;
cublasErrCheck(cublasCreate(&cublasHandle));
// warm up
for(int i = 0; i < Base::_warm_up; ++i){
#ifdef USE_TENSOR_CORE
cublasErrCheck(cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
m, n, k, &alpha,
a, CUDA_R_32F, lda,
b, CUDA_R_32F, ldb, &beta,
c, CUDA_R_32F, ldc,
CUDA_R_32F, CUBLAS_GEMM_DEFAULT));
#else
cublasErrCheck(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
m, n, k, &alpha,
a, lda, b, ldb, &beta, c, ldc));
#endif
}
Timer<NV> t;
for(int i = 0; i < Base::_iter_num; ++i){
t.start();
#ifdef USE_TENSOR_CORE
cublasErrCheck(cublasGemmEx(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
m, n, k, &alpha,
a, CUDA_R_32F, lda,
b, CUDA_R_32F, ldb, &beta,
c, CUDA_R_32F, ldc,
CUDA_R_32F, CUBLAS_GEMM_DEFAULT));
#else
cublasErrCheck(cublasSgemm(cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N,
m, n, k, &alpha,
a, lda, b, ldb, &beta, c, ldc));
#endif
t.end();
}
Base::_elapsed = t.getAverageTimeMs();
}
template<typename T>
void GpuCublas<T>::operator()(bool transA, bool transB, int m, int n, int k, \
T* a, int lda, T* b, int ldb, T* c, int ldc, T alpha, T beta){
T* d_a = nullptr;
T* d_b = nullptr;
T* d_c = nullptr;
int size_a = lda * k;
int size_b = ldb * n;
int size_c = ldc * n;
cudaErrCheck(cudaMalloc((void**)&d_a, size_a * sizeof(T)));
cudaErrCheck(cudaMalloc((void**)&d_b, size_b * sizeof(T)));
cudaErrCheck(cudaMalloc((void**)&d_c, size_c * sizeof(T)));
cudaErrCheck(cudaMemcpy(d_a, a, size_a * sizeof(T), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(d_b, b, size_b * sizeof(T), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(d_c, c, size_c * sizeof(T), cudaMemcpyHostToDevice));
gemm(transA, transB, m, n, k, d_a, lda, d_b, ldb, d_c, ldc, alpha, beta);
cudaErrCheck(cudaMemcpy(c, d_c, size_c * sizeof(T), cudaMemcpyDeviceToHost));
cudaErrCheck(cudaFree(d_a));
cudaErrCheck(cudaFree(d_b));
cudaErrCheck(cudaFree(d_c));
}
// template instantiation declarations
template class GpuCublas<float>;
// register CPU_RAW to GEMM Repo;
REGISTER_GEMM(GPU_CUBLAS, GpuCublas);
#endif //USE_CUBLASE
#endif //USE_GPU |
cuFFT.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <complex.h>
#include <Python.h>
#include <numpy/arrayobject.h>
#include <hipfft.h>
#include <rocblas.h>
// Define macro functions
#define M_CONC(A, B) M_CONC_(A, B)
#define M_CONC_(A, B) A##B
#define STRINGIFY(x) STRINGIFY_(x)
#define STRINGIFY_(x) #x
// Select precision
#ifdef singleprecisioncomplex
#define MODULE_LABEL singleprecisioncomplex
typedef float2 PREC_TYPE;
typedef float SCALE_TYPE;
typedef hipfftComplex PREC_CUTYPE;
#define cublasscal(...) hipblasCsscal(__VA_ARGS__);
#define cufftExec(...) hipfftExecC2C(__VA_ARGS__);
#define NPY_CPREC NPY_CFLOAT
#define CUFFT_PLAN HIPFFT_C2C
#endif
#ifdef doubleprecisioncomplex
#define MODULE_LABEL doubleprecisioncomplex
typedef double2 PREC_TYPE;
typedef double SCALE_TYPE;
typedef hipfftDoubleComplex PREC_CUTYPE;
#define cublasscal(...) hipblasZdscal(__VA_ARGS__);
#define cufftExec(...) hipfftExecZ2Z(__VA_ARGS__);
#define NPY_CPREC NPY_CDOUBLE
#define CUFFT_PLAN HIPFFT_Z2Z
#endif
// Init function names
#define MODULE_NAME STRINGIFY(MODULE_LABEL)
#define INIT_FUNCTION M_CONC(init, MODULE_LABEL)
#define INIT_FUNCTION3 M_CONC(PyInit_, MODULE_LABEL)
static PyObject* fft(PyObject* self, PyObject *arg, PyObject *keywords);
static PyObject* ifft(PyObject* self, PyObject *arg, PyObject *keywords);
///////////////////////////////////
// Python Interface
static char function_docstring_fft[] =
"fft(signal)\nFast Fourier Transform implemented in CUDA\n using cuFFT\n ";
static char function_docstring_ifft[] =
"ifft(signal)\nInverse Fast Fourier Transform implemented in CUDA\n using cuFFT\n ";
static PyMethodDef extension_funcs[] =
{
{"fft", (PyCFunction) fft, METH_VARARGS|METH_KEYWORDS, function_docstring_fft},
{"ifft", (PyCFunction) ifft, METH_VARARGS|METH_KEYWORDS, function_docstring_ifft},
{NULL, NULL, 0, NULL}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_gpu_fft",
"This a module that contains a python interface to call cuFFT and cuiFFT",
-1,
extension_funcs,
NULL,
NULL,
NULL,
NULL,
};
#endif
static PyObject *
moduleinit(void)
{
PyObject *m;
#if PY_MAJOR_VERSION >= 3
m = PyModule_Create(&moduledef);
#else
m = Py_InitModule3(MODULE_NAME,
extension_funcs, "cuFFT module");
#endif
return m;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC
INIT_FUNCTION(void)
{
import_array();
moduleinit();
}
#else
PyMODINIT_FUNC
INIT_FUNCTION3(void)
{
import_array();
return moduleinit();
}
#endif
///////////////////////////////////////////////
// //
// Fast Fourier Transform //
// //
///////////////////////////////////////////////
static PyObject* fft(PyObject* self, PyObject *arg, PyObject *keywords)
{
// Interface with Python
PyObject *h_signal_obj;
static char *kwlist[] = {"input_data", NULL};
if (!PyArg_ParseTupleAndKeywords(arg, keywords, "O", kwlist, &h_signal_obj)) return NULL;
PyObject *h_signal_array = PyArray_FROM_OTF(h_signal_obj, NPY_CPREC, NPY_IN_ARRAY);
if (h_signal_array == NULL ) {
Py_XDECREF(h_signal_array);
return NULL;
}
PREC_TYPE *h_signal = (PREC_TYPE *)PyArray_DATA(h_signal_array);
int signal_size = (int)PyArray_DIM(h_signal_array, 0);
PyArrayObject *return_object;
int dims[1] = {signal_size};
return_object = (PyArrayObject *) PyArray_FromDims(1,dims,NPY_CPREC);
PREC_TYPE *return_data = (PREC_TYPE *)PyArray_DATA(return_object);
int mem_size = sizeof(PREC_TYPE) * signal_size;
// Allocate device memory for signal in the device
PREC_TYPE* d_signal;
hipMalloc((void**)&d_signal, mem_size);
// Copy host memory to device
hipMemcpy(d_signal, h_signal, mem_size,
hipMemcpyHostToDevice);
// CUFFT plan
hipfftHandle plan;
hipfftPlan1d(&plan, signal_size, CUFFT_PLAN, 1);
// Fourier transform using HIPFFT_FORWARD
cufftExec(plan, (PREC_CUTYPE *)d_signal, (PREC_CUTYPE *)d_signal, HIPFFT_FORWARD);
// Copy device memory to host
hipMemcpy(return_data, d_signal, mem_size,
hipMemcpyDeviceToHost);
// cleanup memory
hipfftDestroy(plan);
hipFree(d_signal);
Py_DECREF(h_signal_array);
return(PyArray_Return(return_object));
}
////////////////////////////////////////////////
// //
// Inverse Fast Fourier Transform //
// //
////////////////////////////////////////////////
static PyObject* ifft(PyObject* self, PyObject *arg, PyObject *keywords)
{
// Interface with Python
PyObject *h_signal_obj;
static char *kwlist[] = {"input_data", NULL};
if (!PyArg_ParseTupleAndKeywords(arg, keywords, "O", kwlist, &h_signal_obj)) return NULL;
PyObject *h_signal_array = PyArray_FROM_OTF(h_signal_obj, NPY_CPREC, NPY_IN_ARRAY);
if (h_signal_array == NULL ) {
Py_XDECREF(h_signal_array);
return NULL;
}
PREC_TYPE *h_signal = (PREC_TYPE *)PyArray_DATA(h_signal_array);
int signal_size = (int)PyArray_DIM(h_signal_array, 0);
//Create new numpy array for storing result
PyArrayObject *return_object;
int dims[1]={signal_size};
return_object = (PyArrayObject *) PyArray_FromDims(1,dims ,NPY_CPREC);
PREC_TYPE *return_data = (PREC_TYPE *)PyArray_DATA(return_object);
int mem_size = sizeof(PREC_TYPE) * signal_size;
// Allocate device memory for signal in the device
PREC_TYPE* d_signal;
hipMalloc((void**)&d_signal, mem_size);
// Copy host memory to device
hipMemcpy(d_signal, h_signal, mem_size,
hipMemcpyHostToDevice);
// CUFFT plan
hipfftHandle plan;
hipfftPlan1d(&plan, signal_size, CUFFT_PLAN, 1);
// Create a handle for CUBLAS
hipblasHandle_t handle;
hipblasCreate(&handle);
// Inverse Fourier transform using HIPFFT_BACKWARD
cufftExec(plan, (PREC_CUTYPE *)d_signal, (PREC_CUTYPE *)d_signal, HIPFFT_BACKWARD);
SCALE_TYPE alpha = 1.0 / signal_size;
cublasscal(handle, signal_size,
&alpha,
d_signal, 1);
// Copy device memory to host
hipMemcpy(return_data, d_signal, mem_size,
hipMemcpyDeviceToHost);
// cleanup memory
hipblasDestroy(handle);
hipfftDestroy(plan);
hipFree(d_signal);
Py_DECREF(h_signal_array);
return(PyArray_Return(return_object));
}
| cuFFT.cu | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <complex.h>
#include <Python.h>
#include <numpy/arrayobject.h>
#include <cufft.h>
#include <cublas_v2.h>
// Define macro functions
#define M_CONC(A, B) M_CONC_(A, B)
#define M_CONC_(A, B) A##B
#define STRINGIFY(x) STRINGIFY_(x)
#define STRINGIFY_(x) #x
// Select precision
#ifdef singleprecisioncomplex
#define MODULE_LABEL singleprecisioncomplex
typedef float2 PREC_TYPE;
typedef float SCALE_TYPE;
typedef cufftComplex PREC_CUTYPE;
#define cublasscal(...) cublasCsscal(__VA_ARGS__);
#define cufftExec(...) cufftExecC2C(__VA_ARGS__);
#define NPY_CPREC NPY_CFLOAT
#define CUFFT_PLAN CUFFT_C2C
#endif
#ifdef doubleprecisioncomplex
#define MODULE_LABEL doubleprecisioncomplex
typedef double2 PREC_TYPE;
typedef double SCALE_TYPE;
typedef cufftDoubleComplex PREC_CUTYPE;
#define cublasscal(...) cublasZdscal(__VA_ARGS__);
#define cufftExec(...) cufftExecZ2Z(__VA_ARGS__);
#define NPY_CPREC NPY_CDOUBLE
#define CUFFT_PLAN CUFFT_Z2Z
#endif
// Init function names
#define MODULE_NAME STRINGIFY(MODULE_LABEL)
#define INIT_FUNCTION M_CONC(init, MODULE_LABEL)
#define INIT_FUNCTION3 M_CONC(PyInit_, MODULE_LABEL)
static PyObject* fft(PyObject* self, PyObject *arg, PyObject *keywords);
static PyObject* ifft(PyObject* self, PyObject *arg, PyObject *keywords);
///////////////////////////////////
// Python Interface
static char function_docstring_fft[] =
"fft(signal)\nFast Fourier Transform implemented in CUDA\n using cuFFT\n ";
static char function_docstring_ifft[] =
"ifft(signal)\nInverse Fast Fourier Transform implemented in CUDA\n using cuFFT\n ";
static PyMethodDef extension_funcs[] =
{
{"fft", (PyCFunction) fft, METH_VARARGS|METH_KEYWORDS, function_docstring_fft},
{"ifft", (PyCFunction) ifft, METH_VARARGS|METH_KEYWORDS, function_docstring_ifft},
{NULL, NULL, 0, NULL}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_gpu_fft",
"This a module that contains a python interface to call cuFFT and cuiFFT",
-1,
extension_funcs,
NULL,
NULL,
NULL,
NULL,
};
#endif
static PyObject *
moduleinit(void)
{
PyObject *m;
#if PY_MAJOR_VERSION >= 3
m = PyModule_Create(&moduledef);
#else
m = Py_InitModule3(MODULE_NAME,
extension_funcs, "cuFFT module");
#endif
return m;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC
INIT_FUNCTION(void)
{
import_array();
moduleinit();
}
#else
PyMODINIT_FUNC
INIT_FUNCTION3(void)
{
import_array();
return moduleinit();
}
#endif
///////////////////////////////////////////////
// //
// Fast Fourier Transform //
// //
///////////////////////////////////////////////
static PyObject* fft(PyObject* self, PyObject *arg, PyObject *keywords)
{
// Interface with Python
PyObject *h_signal_obj;
static char *kwlist[] = {"input_data", NULL};
if (!PyArg_ParseTupleAndKeywords(arg, keywords, "O", kwlist, &h_signal_obj)) return NULL;
PyObject *h_signal_array = PyArray_FROM_OTF(h_signal_obj, NPY_CPREC, NPY_IN_ARRAY);
if (h_signal_array == NULL ) {
Py_XDECREF(h_signal_array);
return NULL;
}
PREC_TYPE *h_signal = (PREC_TYPE *)PyArray_DATA(h_signal_array);
int signal_size = (int)PyArray_DIM(h_signal_array, 0);
PyArrayObject *return_object;
int dims[1] = {signal_size};
return_object = (PyArrayObject *) PyArray_FromDims(1,dims,NPY_CPREC);
PREC_TYPE *return_data = (PREC_TYPE *)PyArray_DATA(return_object);
int mem_size = sizeof(PREC_TYPE) * signal_size;
// Allocate device memory for signal in the device
PREC_TYPE* d_signal;
cudaMalloc((void**)&d_signal, mem_size);
// Copy host memory to device
cudaMemcpy(d_signal, h_signal, mem_size,
cudaMemcpyHostToDevice);
// CUFFT plan
cufftHandle plan;
cufftPlan1d(&plan, signal_size, CUFFT_PLAN, 1);
// Fourier transform using CUFFT_FORWARD
cufftExec(plan, (PREC_CUTYPE *)d_signal, (PREC_CUTYPE *)d_signal, CUFFT_FORWARD);
// Copy device memory to host
cudaMemcpy(return_data, d_signal, mem_size,
cudaMemcpyDeviceToHost);
// cleanup memory
cufftDestroy(plan);
cudaFree(d_signal);
Py_DECREF(h_signal_array);
return(PyArray_Return(return_object));
}
////////////////////////////////////////////////
// //
// Inverse Fast Fourier Transform //
// //
////////////////////////////////////////////////
static PyObject* ifft(PyObject* self, PyObject *arg, PyObject *keywords)
{
// Interface with Python
PyObject *h_signal_obj;
static char *kwlist[] = {"input_data", NULL};
if (!PyArg_ParseTupleAndKeywords(arg, keywords, "O", kwlist, &h_signal_obj)) return NULL;
PyObject *h_signal_array = PyArray_FROM_OTF(h_signal_obj, NPY_CPREC, NPY_IN_ARRAY);
if (h_signal_array == NULL ) {
Py_XDECREF(h_signal_array);
return NULL;
}
PREC_TYPE *h_signal = (PREC_TYPE *)PyArray_DATA(h_signal_array);
int signal_size = (int)PyArray_DIM(h_signal_array, 0);
//Create new numpy array for storing result
PyArrayObject *return_object;
int dims[1]={signal_size};
return_object = (PyArrayObject *) PyArray_FromDims(1,dims ,NPY_CPREC);
PREC_TYPE *return_data = (PREC_TYPE *)PyArray_DATA(return_object);
int mem_size = sizeof(PREC_TYPE) * signal_size;
// Allocate device memory for signal in the device
PREC_TYPE* d_signal;
cudaMalloc((void**)&d_signal, mem_size);
// Copy host memory to device
cudaMemcpy(d_signal, h_signal, mem_size,
cudaMemcpyHostToDevice);
// CUFFT plan
cufftHandle plan;
cufftPlan1d(&plan, signal_size, CUFFT_PLAN, 1);
// Create a handle for CUBLAS
cublasHandle_t handle;
cublasCreate(&handle);
// Inverse Fourier transform using CUFFT_INVERSE
cufftExec(plan, (PREC_CUTYPE *)d_signal, (PREC_CUTYPE *)d_signal, CUFFT_INVERSE);
SCALE_TYPE alpha = 1.0 / signal_size;
cublasscal(handle, signal_size,
&alpha,
d_signal, 1);
// Copy device memory to host
cudaMemcpy(return_data, d_signal, mem_size,
cudaMemcpyDeviceToHost);
// cleanup memory
cublasDestroy(handle);
cufftDestroy(plan);
cudaFree(d_signal);
Py_DECREF(h_signal_array);
return(PyArray_Return(return_object));
}
|
dbd691487ab4daf9e62c07f16d34c07ec58c08b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_sin(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = sin(A[index]);
}
} | dbd691487ab4daf9e62c07f16d34c07ec58c08b5.cu | #include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_sin(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = sin(A[index]);
}
} |
845ac23018b54b5fcf2f71218a74b37a8d42e041.hip | // !!! This is a file automatically generated by hipify!!!
// includes
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
//-------------Funcion sumar velocidad
__global__ void densidad_suma_doble_if(float * pdist,float * psum, int node) {
int ndist=9; //numero de funcion de distribucion
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x<node){ //para que se paralelice en cada nodo
if (y<ndist){ //para que se paralelice en cada parte del vector de funcion de distribucion
psum[x]+= pdist[(x*ndist+y)];}
}
}
// nodo == x
//velocidad == y
| 845ac23018b54b5fcf2f71218a74b37a8d42e041.cu | // includes
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
//-------------Funcion sumar velocidad
__global__ void densidad_suma_doble_if(float * pdist,float * psum, int node) {
int ndist=9; //numero de funcion de distribucion
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x<node){ //para que se paralelice en cada nodo
if (y<ndist){ //para que se paralelice en cada parte del vector de funcion de distribucion
psum[x]+= pdist[(x*ndist+y)];}
}
}
// nodo == x
//velocidad == y
|
ed03d5189f73897eb228047080b96ca9736d543a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define error 1e-6
#define BLOCK_SIZE 32
///////////////////////////////////////// UTILITIES ////////////////////////////////////////////////////////////////////////////////////////////
/*
**********************************************************************
function name: init_Array
description: create a matrix with random values
parameters:
&x pointer to a row x col Matrix
return: None
**********************************************************************
*/
void init_Array(float *x, int m, int n){
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
x[i * n + j] = rand() % 1000;
}
}
}
/*
**********************************************************************
function name: init_0_Array
description: create a matrix that cointains a high number of zero values, but every row have at least one non-zero values
parameters:
&x pointer to a row x col Matrix
return: None
**********************************************************************
*/
void init_0_Array(float *x, int row, int col){
bool ultimo = false;
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
if(j == col - 1 && !ultimo){
ultimo = true;
}
float r = (float) rand() / RAND_MAX;
if(r <= 0.25){
x[i*col + j] = rand() % 1000;
ultimo = false;
} else{
if (!ultimo){
x[i*col + j] = 0;
} else {
x[i*col + j] = rand() % 1000;
ultimo = false;
}
}
}
}
}
/*
**********************************************************************
function name: count_NUM
description: count the non-zero elements of a matrix
parameters:
&x pointer to a row x col Matrix
return: the total number of non-zero elements
**********************************************************************
*/
int count_Num(float *x, int row, int col){
int count = 0;
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
if(x[i*col + j] != 0){
count++;
}
}
}
return count;
}
/*
**********************************************************************
function name: sparse_matrix
description: convert a matric in a sparse matrix format in CSR
parameters:
&x pointer to a row x col matrix
&rows pointer to a rows + 1 vector: points to the first element in each row and assigns it the value of its index in value
&cols pointer to a size vector: the column to which each non-zero value belongs
&val pointer to a size vector: contains every non-zero value of the x matrix
return: None
**********************************************************************
*/
void sparse_matrix(float *x, int *rows, int *cols, float *val, int row, int col){
int antes = -1;
int count = 0;
int count_row = 0;
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
if(x[i*col + j] != 0){
if(i != antes){
rows[count_row] = count;
antes = i;
count_row++;
}
cols[count] = j;
val[count] = x[i*col + j];
count++;
}
}
}
}
/*
**********************************************************************
function name: cpuSecond()
description: when it is called you obtain the exact time in this moment
return: the time in microseonds
**********************************************************************
*/
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
/*
**********************************************************************
function name: print_matrix
description: print a matrix compose by (float) elements
parameters:
&a pointer to a row x col Matrix
return: None
**********************************************************************
*/
void print_matrix(float *a, int row, int col){
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
printf("%f ",a[i * col + j]);
}
printf("\n");
}
printf("\n");
}
/*
**********************************************************************
function name: print_imatrix
description: print a matrix compose by (int) elements
parameters:
&a pointer to a row x col Matrix
return: None
**********************************************************************
*/
void print_imatrix(int *a, int row, int col){
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
printf("%d ",a[i * col + j]);
}
printf("\n");
}
printf("\n");
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////// GPU KERNEL METHODS //////////////////////////////////////////////////////////////////
/*
*********************************************************************
function name: mmatrix
description: dot product of two matrix (not only square) in GPU
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE,(m + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
__global__ void mmatrix(float *a, float *b, float *c, int m, int n, int k){
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0;
if(col < k && row < m){
for(int i = 0; i < n; i++){
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
/*
*********************************************************************
function name: gpuMatrixConv
description: implementation of the convultion operation
parameters:
&a GPU device pointer to a row1 X col1 matrix (A)
&b GPU device pointer to a row2 X col2 matrix (B)
&c GPU device output purpose pointer to a row3 X col3 matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((col3 + BLOCK_SIZE - 1) / BLOCK_SIZE,(row3 + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
__global__ void gpuMatrixConv(float *a, float *b, float *c, int row1, int col1, int row2, int col2, int row3, int col3)
{
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0;
if (row < row3 && col < col3) {
for (int i = 0; i < row2; i++) {
for (int j = 0; j < col2; j++) {
sum += a[(row + i) * col1 + col + j] * b[i * row2 + j];
}
}
c[row * col3 + col] = sum;
}
}
/*
*********************************************************************
function name: gpu_matrix_transpose
description: matrix transpose
parameters:
&mat_in GPU device pointer to a rows X cols matrix
&mat_out GPU device output purpose pointer to a cols X rows matrix
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((col + BLOCK_SIZE - 1) / BLOCK_SIZE, (row + BLOCK_SIZE - 1) / BLOCK_SIZE, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
__global__ void gpuMatrixTranpose(float *a, float *b, int rows, int cols){
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < rows && col < cols){
int pos_a = row * cols + col;
int pos_b = col * rows + row;
b[pos_b] = a[pos_a];
}
}
/*
*********************************************************************
function name: gpuMatrixConv
description: implementation the matrix * vector in sparse format CSR
parameters:
&values GPU device pointer to a size vector (values): the non-zero values for the original matrix
&vector GPU device pointer to a col vector (x): to multiply the non-zero values
&rows GPU device pointer to a rows + 1 vector (row): points to the first element in each row and assigns it the value of its index in value
&cols GPU device pointer to a col size vector (col): the column to which each non-zero value belongs
&res GPU device output purpose pointer to a row vector (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((col3 + BLOCK_SIZE - 1) / BLOCK_SIZE,(row3 + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
__global__ void gpuMVSparse(float *values, float *vector, int *rows, int *cols,float *res, int row){
unsigned int Id = threadIdx.x + blockDim.x * blockIdx.x;
if(Id < row){
for(int k = rows[Id]; k < rows[Id+1]; k++){
res[Id] += values[k]*vector[cols[k]];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////// CPU METHODS AND GPU PREPARATION ///////////////////////////////////////////////////////////
/*
**********************************************************************
function name: sparse_preparation
description: compare the CPU and GPU implementation of a matrix * vector multiplication in sparse format
1 obtaint the size of the matrix
2 create the matrix
3 count the non-zero values
4 tranfor into CSR format
5 Do the operation
6 compare the results
Optional: print the results
return: None
**********************************************************************
*/
void sparse_preparation(){
int col, row, val;
bool bien = true;
printf("\n");
printf("\n");
SP1: printf("Introduce the rows of A:\n");
fflush(stdout);
val = scanf("%d", &row);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto SP1;
}
SP2: printf("Introduce the columns of A:\n");
fflush(stdout);
val = scanf("%d", &col);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto SP2;
}
double start_GPU, stop_GPU;
double start_CPU, stop_CPU;
double diferencia_CPU, diferencia_GPU;
float *A = (float *)malloc(row * col * sizeof(float));
float *vector = (float *)malloc(col * sizeof(float));
init_Array(vector, 1, col);
init_0_Array(A, row, col);
int size = count_Num(A, row, col);
float *values = (float *)malloc(size * sizeof(float));
int *rows = (int *)malloc((row + 1) * sizeof(int));
int *cols = (int *)malloc(size * sizeof(int));
float *res = (float *)malloc(row * sizeof(float));
float *res_F = (float *)malloc(row * sizeof(float));
sparse_matrix(A, rows, cols, values, row, col);
rows[row] = size;
int *rows_GPU;
int *cols_GPU;
float *values_GPU;
float *res_GPU;
float *vector_GPU;
hipMalloc(&rows_GPU, (row + 1) * sizeof(int));
hipMalloc(&cols_GPU, size * sizeof(int));
hipMalloc(&values_GPU, size * sizeof(float));
hipMalloc(&res_GPU, row * sizeof(float));
hipMalloc(&vector_GPU, col * sizeof(float));
hipMemset(res_GPU, 0, row * sizeof(float));
hipMemcpy(rows_GPU, rows, (row +1) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cols_GPU, cols, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(values_GPU, values, size * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(vector_GPU, vector, col * sizeof(float), hipMemcpyHostToDevice);
//Start CPU Part//
start_CPU = cpuSecond();
for(int k = 0; k < row; k++){
res[k] = 0;
}
for(int i = 0; i < row; i++){
for(int k = rows[i]; k < rows[i + 1]; k++){
res[i] += values[k]*vector[cols[k]];
}
}
stop_CPU = cpuSecond();
diferencia_CPU = stop_CPU - start_CPU;
//Stop Cpu Part //
unsigned int GRID = col + BLOCK_SIZE - 1 / BLOCK_SIZE;
//Init GPU part//
start_GPU = cpuSecond();
hipLaunchKernelGGL(( gpuMVSparse), dim3(GRID), dim3(BLOCK_SIZE), 0, 0, values_GPU, vector_GPU, rows_GPU, cols_GPU, res_GPU, row);
hipDeviceSynchronize();
hipMemcpy(res_F, res_GPU, row * sizeof(float), hipMemcpyDeviceToHost);
stop_GPU = cpuSecond();
diferencia_GPU = stop_GPU - start_GPU;
//Stop GPU part//
//Start Checking //
for(int j = 0; j < row; j++){
if(fabs(res_F[j] - res[j]) >= error ){
bien = false;
printf("Error en: %f %f\n", res_F[j], res[j]);
}
}
if(bien){
printf("Comparing the output for each implementation.. Correct!\n");
}else {
printf("Comparing the output for each implementation.. Incorrect!\n");
}
char d;
printf("Do you want to print the matrix:\n");
printf("YES: y or NO: n\n");
fflush(stdout);
scanf(" %c", &d);
if(d == 'y'){
print_matrix(A,row,col);
print_matrix(values, 1, size);
print_imatrix(rows, 1, (col + 1));
print_imatrix(cols, 1 ,size);
print_matrix(res,row,1);
print_matrix(res_F,row,1);
fflush(stdout);
}
printf("Duration of the CPU: %f\n", diferencia_CPU);
printf("Duration of the GPU: %f\n", diferencia_GPU);
delete[] A;
delete[] vector;
delete[] cols;
delete[] rows;
delete[] res;
delete[] res_F;
delete[] values;
hipFree(values_GPU);
hipFree(cols_GPU);
hipFree(rows_GPU);
hipFree(res_GPU);
hipFree(vector_GPU);
}
/*
**********************************************************************
function name: transpose_preparation
description: compare the CPU and GPU implementation of the transpose operation
1 obtaint the size of the matrix
2 create the matrix
3 Do the operation
4 compare the results
Optional: print the results
return: None
**********************************************************************
*/
void tranpose_preparation(){
int col, row, val;
bool bien = true;
printf("\n");
printf("\n");
TR1: printf("Introduce the rows of A:\n");
fflush(stdout);
val = scanf("%d", &row);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto TR1;
}
TR2: printf("Introduce the columns of A:\n");
fflush(stdout);
val = scanf("%d", &col);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto TR2;
}
double start_GPU, stop_GPU;
double start_CPU, stop_CPU;
double diferencia_CPU, diferencia_GPU;
float *A = (float *)malloc(row * col * sizeof(float));
float *res = (float *)malloc(row * col * sizeof(float));
float *res_F = (float *)malloc(row * col * sizeof(float));
float *A_GPU;
float *res_GPU;
hipMalloc(&A_GPU, row * col * sizeof(float));
hipMalloc(&res_GPU, row * col * sizeof(float));
init_Array(A, row, col);
hipMemcpy(A_GPU, A, row * col * sizeof(float), hipMemcpyHostToDevice);
//Start CPU Part//
start_CPU = cpuSecond();
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
int pos_a = i * col + j;
int pos_res = j * row + i;
res[pos_res] = A[pos_a];
}
}
stop_CPU = cpuSecond();
diferencia_CPU = stop_CPU - start_CPU;
//Stop Cpu Part //
unsigned int grid_rows = (row + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_colm = (col + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_colm, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//Init GPU part//
start_GPU = cpuSecond();
hipLaunchKernelGGL(( gpuMatrixTranpose), dim3(dimGrid), dim3(dimBlock), 0, 0, A_GPU, res_GPU, row, col);
hipDeviceSynchronize();
hipMemcpy(res_F, res_GPU, row * col * sizeof(float), hipMemcpyDeviceToHost);
stop_GPU = cpuSecond();
diferencia_GPU = stop_GPU - start_GPU;
//Stop GPU part//
//Start Checking //
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
if(fabs(res_F[i * col + j] - res[i*col + j]) >= error ){
bien = false;
printf("Error en: %f %f\n", res_F[i * col + j], res[i * col + j]);
}
}
}
if(bien){
printf("Comparing the output for each implementation.. Correct!\n");
}else {
printf("Comparing the output for each implementation.. Incorrect!\n");
}
char d;
printf("Do you want to print the differents matrix:\n");
printf("YES: y or NO: n\n");
fflush(stdout);
scanf(" %c", &d);
if(d == 'y'){
print_matrix(A,row,col);
print_matrix(res,col,row);
print_matrix(res_F,col,row);
fflush(stdout);
}
printf("Duration of the CPU: %f\n", diferencia_CPU);
printf("Duration of the GPU: %f\n", diferencia_GPU);
hipHostFree(A);
hipHostFree(res);
hipHostFree(res_F);
hipFree(A_GPU);
hipFree(res_GPU);
}
/*
**********************************************************************
function name: conv_preparation
description: compare the CPU and GPU implementation of the convultion operation
1 obtaint the size of the matrix A and the square matrix B
2 create the matrix A and B with random values
3 Do the operation
4 compare the results
Optional: print the results
return: None
**********************************************************************
*/
void conv_preparation(){
int col1, row1, col2, row2, col3, row3, val;
bool bien = true;
INTRO: printf("\n");
printf("\n");
CO1: printf("Introduce the rows of A:\n");
fflush(stdout);
val = scanf("%d", &row1);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto CO1;
}
CO2: printf("Introduce the colums of A:\n");
fflush(stdout);
val = scanf("%d", &col1);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto CO2;
}
CO3: printf("Introduce the rows and colums of B:\n");
fflush(stdout);
val = scanf("%d", &col2);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto CO3;
}
row2 = col2;
if(row2 >= row1 || col2 >= col1 ) {
printf("You must introduce again the numbers, the matrix (rows and colums) A have to be higher than B\n");
goto INTRO;
}
col3 = col1 - col2 + 1;
row3 = row1 - row2 + 1;
double start_GPU, stop_GPU;
double start_CPU, stop_CPU;
double diferencia_CPU, diferencia_GPU;
float *A = (float *)malloc(row1 * col1 * sizeof(float));
float *B = (float *)malloc(row2 * col2 * sizeof(float));
float *res = (float *)malloc(row3 * col3 * sizeof(float));
float *res_F = (float *)malloc(row3 * col3 * sizeof(float));
float *A_GPU;
float *B_GPU;
float *res_GPU;
hipMalloc(&A_GPU, row1 * col1 * sizeof(float));
hipMalloc(&B_GPU, row2 * col2 * sizeof(float));
hipMalloc(&res_GPU, row3 * col3 * sizeof(float));
init_Array(A, row1, col1);
init_Array(B, row2, col2);
hipMemcpy(A_GPU, A, row1 * col1 * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(B_GPU, B, row2 * col2 * sizeof(float), hipMemcpyHostToDevice);
//Start CPU Part//
start_CPU = cpuSecond();
int i, j ,k, z;
float sum = 0.0;
for(i = 0; i < row3; i++){
for(z = 0; z < col3; z++){
sum = 0.0;
for(j = 0; j < row2; j++){
for(k = 0; k < col2; k++){
sum += A[(i + j) * col1 + z + k] * B[j * row2 + k];
}
}
res[i * col3 + z] = sum;
}
}
stop_CPU = cpuSecond();
diferencia_CPU = stop_CPU - start_CPU;
//Stop Cpu Part //
unsigned int grid_rows = (row3 + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_colm = (col3 + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_colm, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//Init GPU part//
start_GPU = cpuSecond();
hipLaunchKernelGGL(( gpuMatrixConv), dim3(dimGrid), dim3(dimBlock), 0, 0, A_GPU, B_GPU, res_GPU, row1, col1, row2, col2, row3, col3);
hipDeviceSynchronize();
hipMemcpy(res_F, res_GPU, row3 * col3 * sizeof(float), hipMemcpyDeviceToHost);
stop_GPU = cpuSecond();
diferencia_GPU = stop_GPU - start_GPU;
//Stop GPU part//
//Start Checking //
for(int i = 0; i < row3; i++){
for(int j = 0; j < col3; j++){
if(fabs(res_F[i * col3 + j] - res[i*col3 + j]) >= error ){
bien = false;
printf("Error: %f %f\n", res_F[i * col3 + j], res[i * col3 + j]);
}
}
}
if(bien){
printf("Comparing the output for each implementation.. Correct!\n");
}else {
printf("Comparing the output for each implementation.. Incorrect!\n");
}
char d;
printf("Do you want to print the matrix:\n");
printf("YES: y or NO: n\n");
fflush(stdout);
scanf(" %c", &d);
if(d == 'y'){
print_matrix(A,row1,col1);
print_matrix(B,row2, col2);
print_matrix(res,row3,col3);
print_matrix(res_F,row3,col3);
fflush(stdout);
}
printf("Duration of the CPU: %f\n", diferencia_CPU);
printf("Duration of the GPU: %f\n", diferencia_GPU);
hipHostFree(A);
hipHostFree(B);
hipHostFree(res);
hipHostFree(res_F);
hipFree(A_GPU);
hipFree(B_GPU);
hipFree(res_GPU);
}
/*
**********************************************************************
function name: matrix_preparation
description: compare the CPU and GPU implementation of the matrix multiplication
1 obtaint the size of the matrix A and the matrix B
2 create the matrix A and B with random values
3 Do the operation
4 compare the results
Optional: print the results
return: None
**********************************************************************
*/
void matrix_preparation(){
int m, n, k, val;
printf("\n");
printf("\n");
MA1: printf("Introduce the rows of A:\n");
fflush(stdout);
val = scanf("%d", &m);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto MA1;
}
MA2: printf("Introduce the columns of A:\n");
fflush(stdout);
val = scanf("%d", &n);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto MA2;
}
MA3: printf("Introduce The columns of B:\n");
fflush(stdout);
val = scanf("%d", &k);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto MA3;
}
bool bien = true;
double start_GPU, stop_GPU;
double start_CPU, stop_CPU;
double diferencia_CPU, diferencia_GPU;
float *A = (float *)malloc(m * n * sizeof(float));
float *B = (float *)malloc(n * k * sizeof(float));
float *res = (float *)malloc(m * k * sizeof(float));
float *res_F = (float *)malloc(m * k * sizeof(float));
float *A_GPU;
float *B_GPU;
float *res_GPU;
hipMalloc(&A_GPU, m * n * sizeof(float));
hipMalloc(&B_GPU, n * k * sizeof(float));
hipMalloc(&res_GPU, m * k * sizeof(float));
init_Array(A, m, n);
init_Array(B, n, k);
hipMemcpy(A_GPU, A, m * n * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(B_GPU, B, n * k * sizeof(float), hipMemcpyHostToDevice);
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_colm = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_colm, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//init CPU part//
start_CPU = cpuSecond();
for(int i = 0; i < m; i++){
for(int j = 0; j < k; j++){
float cont = 0.0;
for(int z = 0; z < n; z++){
cont += A[i * n + z] * B[z * k + j];
}
res[i * k + j] = cont;
}
}
stop_CPU = cpuSecond();
diferencia_CPU = stop_CPU - start_CPU;
//init GPU Part//
start_GPU = cpuSecond();
hipLaunchKernelGGL(( mmatrix), dim3(dimGrid), dim3(dimBlock), 0, 0, A_GPU, B_GPU, res_GPU, m, n, k);
hipDeviceSynchronize();
hipMemcpy(res_F, res_GPU, m * k * sizeof(float), hipMemcpyDeviceToHost);
stop_GPU = cpuSecond();
diferencia_GPU = stop_GPU - start_GPU;
//check if it is correct//
for(int i = 0; i < m; i++){
for(int j = 0; j < k; j++){
if(fabs(res_F[i * k + j] - res[i*k + j]) >= error ){
bien = false;
break;
}
}
if(!bien){break;}
}
if(bien){
printf("Comparing the output for each implementation.. Correct!\n");
}else {
printf("Comparing the output for each implementation.. Incorrect!\n");
}
char d;
printf("Do you want to print the matrix:\n");
printf("YES: y or NO: n\n");
fflush(stdout);
scanf(" %c", &d);
if(d == 'y'){
print_matrix(A,m,n);
print_matrix(B,n, k);
print_matrix(res,m,k);
print_matrix(res_F,m,k);
fflush(stdout);
}
printf("Duration of the CPU: %f\n", diferencia_CPU);
printf("Duration of the GPU: %f\n", diferencia_GPU);
hipHostFree(A);
hipHostFree(B);
hipHostFree(res);
hipHostFree(res_F);
hipFree(A_GPU);
hipFree(B_GPU);
hipFree(res_GPU);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////// MAIN //////////////////////////////////////////////////////////////////////////////
int main( int argc, char *argv[]){
int op, val;
bool salir = false;
START: printf("\n");
printf("\n");
printf("******************************************\n");
printf("* Select the operation you want to do: *\n");
printf("* *\n");
printf("* 1. Matrix Multiplication *\n");
printf("* 2. Matrix Convection *\n");
printf("* 3. Matrix Transpose *\n");
printf("* 4. Matrix-Vector Sparse *\n");
printf("* 5. Exit *\n");
printf("* *\n");
printf("******************************************\n");
fflush(stdout);
printf("Introduce the number of the operation:\n");
fflush(stdout);
val = scanf("%d", &op);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid option, please do it again.\n");
goto START;
}else{
switch(op){
case 1:
matrix_preparation();
fflush(stdout);
break;
case 2:
conv_preparation();
fflush(stdout);
break;
case 3:
tranpose_preparation();
fflush(stdout);
break;
case 4:
sparse_preparation();
fflush(stdout);
break;
case 5:
salir = true;
printf("Successful Exit\n");
fflush(stdout);
break;
default:
printf("You dont select any option, please do it again\n");
fflush(stdout);
break;
}
if(!salir){goto START;}
}
return 0;
} | ed03d5189f73897eb228047080b96ca9736d543a.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define error 1e-6
#define BLOCK_SIZE 32
///////////////////////////////////////// UTILITIES ////////////////////////////////////////////////////////////////////////////////////////////
/*
**********************************************************************
function name: init_Array
description: create a matrix with random values
parameters:
&x pointer to a row x col Matrix
return: None
**********************************************************************
*/
void init_Array(float *x, int m, int n){
for(int i = 0; i < m; i++){
for(int j = 0; j < n; j++){
x[i * n + j] = rand() % 1000;
}
}
}
/*
**********************************************************************
function name: init_0_Array
description: create a matrix that cointains a high number of zero values, but every row have at least one non-zero values
parameters:
&x pointer to a row x col Matrix
return: None
**********************************************************************
*/
void init_0_Array(float *x, int row, int col){
bool ultimo = false;
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
if(j == col - 1 && !ultimo){
ultimo = true;
}
float r = (float) rand() / RAND_MAX;
if(r <= 0.25){
x[i*col + j] = rand() % 1000;
ultimo = false;
} else{
if (!ultimo){
x[i*col + j] = 0;
} else {
x[i*col + j] = rand() % 1000;
ultimo = false;
}
}
}
}
}
/*
**********************************************************************
function name: count_NUM
description: count the non-zero elements of a matrix
parameters:
&x pointer to a row x col Matrix
return: the total number of non-zero elements
**********************************************************************
*/
int count_Num(float *x, int row, int col){
int count = 0;
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
if(x[i*col + j] != 0){
count++;
}
}
}
return count;
}
/*
**********************************************************************
function name: sparse_matrix
description: convert a matric in a sparse matrix format in CSR
parameters:
&x pointer to a row x col matrix
&rows pointer to a rows + 1 vector: points to the first element in each row and assigns it the value of its index in value
&cols pointer to a size vector: the column to which each non-zero value belongs
&val pointer to a size vector: contains every non-zero value of the x matrix
return: None
**********************************************************************
*/
void sparse_matrix(float *x, int *rows, int *cols, float *val, int row, int col){
int antes = -1;
int count = 0;
int count_row = 0;
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
if(x[i*col + j] != 0){
if(i != antes){
rows[count_row] = count;
antes = i;
count_row++;
}
cols[count] = j;
val[count] = x[i*col + j];
count++;
}
}
}
}
/*
**********************************************************************
function name: cpuSecond()
description: when it is called you obtain the exact time in this moment
return: the time in microseonds
**********************************************************************
*/
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
/*
**********************************************************************
function name: print_matrix
description: print a matrix compose by (float) elements
parameters:
&a pointer to a row x col Matrix
return: None
**********************************************************************
*/
void print_matrix(float *a, int row, int col){
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
printf("%f ",a[i * col + j]);
}
printf("\n");
}
printf("\n");
}
/*
**********************************************************************
function name: print_imatrix
description: print a matrix compose by (int) elements
parameters:
&a pointer to a row x col Matrix
return: None
**********************************************************************
*/
void print_imatrix(int *a, int row, int col){
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
printf("%d ",a[i * col + j]);
}
printf("\n");
}
printf("\n");
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////// GPU KERNEL METHODS //////////////////////////////////////////////////////////////////
/*
*********************************************************************
function name: mmatrix
description: dot product of two matrix (not only square) in GPU
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE,(m + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
__global__ void mmatrix(float *a, float *b, float *c, int m, int n, int k){
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0;
if(col < k && row < m){
for(int i = 0; i < n; i++){
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
/*
*********************************************************************
function name: gpuMatrixConv
description: implementation of the convultion operation
parameters:
&a GPU device pointer to a row1 X col1 matrix (A)
&b GPU device pointer to a row2 X col2 matrix (B)
&c GPU device output purpose pointer to a row3 X col3 matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((col3 + BLOCK_SIZE - 1) / BLOCK_SIZE,(row3 + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
__global__ void gpuMatrixConv(float *a, float *b, float *c, int row1, int col1, int row2, int col2, int row3, int col3)
{
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0;
if (row < row3 && col < col3) {
for (int i = 0; i < row2; i++) {
for (int j = 0; j < col2; j++) {
sum += a[(row + i) * col1 + col + j] * b[i * row2 + j];
}
}
c[row * col3 + col] = sum;
}
}
/*
*********************************************************************
function name: gpu_matrix_transpose
description: matrix transpose
parameters:
&mat_in GPU device pointer to a rows X cols matrix
&mat_out GPU device output purpose pointer to a cols X rows matrix
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((col + BLOCK_SIZE - 1) / BLOCK_SIZE, (row + BLOCK_SIZE - 1) / BLOCK_SIZE, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
return: none
*********************************************************************
*/
__global__ void gpuMatrixTranpose(float *a, float *b, int rows, int cols){
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < rows && col < cols){
int pos_a = row * cols + col;
int pos_b = col * rows + row;
b[pos_b] = a[pos_a];
}
}
/*
*********************************************************************
function name: gpuMatrixConv
description: implementation the matrix * vector in sparse format CSR
parameters:
&values GPU device pointer to a size vector (values): the non-zero values for the original matrix
&vector GPU device pointer to a col vector (x): to multiply the non-zero values
&rows GPU device pointer to a rows + 1 vector (row): points to the first element in each row and assigns it the value of its index in value
&cols GPU device pointer to a col size vector (col): the column to which each non-zero value belongs
&res GPU device output purpose pointer to a row vector (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((col3 + BLOCK_SIZE - 1) / BLOCK_SIZE,(row3 + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
*********************************************************************
*/
__global__ void gpuMVSparse(float *values, float *vector, int *rows, int *cols,float *res, int row){
unsigned int Id = threadIdx.x + blockDim.x * blockIdx.x;
if(Id < row){
for(int k = rows[Id]; k < rows[Id+1]; k++){
res[Id] += values[k]*vector[cols[k]];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////// CPU METHODS AND GPU PREPARATION ///////////////////////////////////////////////////////////
/*
**********************************************************************
function name: sparse_preparation
description: compare the CPU and GPU implementation of a matrix * vector multiplication in sparse format
1º obtaint the size of the matrix
2º create the matrix
3º count the non-zero values
4º tranfor into CSR format
5º Do the operation
6º compare the results
Optional: print the results
return: None
**********************************************************************
*/
void sparse_preparation(){
int col, row, val;
bool bien = true;
printf("\n");
printf("\n");
SP1: printf("Introduce the rows of A:\n");
fflush(stdout);
val = scanf("%d", &row);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto SP1;
}
SP2: printf("Introduce the columns of A:\n");
fflush(stdout);
val = scanf("%d", &col);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto SP2;
}
double start_GPU, stop_GPU;
double start_CPU, stop_CPU;
double diferencia_CPU, diferencia_GPU;
float *A = (float *)malloc(row * col * sizeof(float));
float *vector = (float *)malloc(col * sizeof(float));
init_Array(vector, 1, col);
init_0_Array(A, row, col);
int size = count_Num(A, row, col);
float *values = (float *)malloc(size * sizeof(float));
int *rows = (int *)malloc((row + 1) * sizeof(int));
int *cols = (int *)malloc(size * sizeof(int));
float *res = (float *)malloc(row * sizeof(float));
float *res_F = (float *)malloc(row * sizeof(float));
sparse_matrix(A, rows, cols, values, row, col);
rows[row] = size;
int *rows_GPU;
int *cols_GPU;
float *values_GPU;
float *res_GPU;
float *vector_GPU;
cudaMalloc(&rows_GPU, (row + 1) * sizeof(int));
cudaMalloc(&cols_GPU, size * sizeof(int));
cudaMalloc(&values_GPU, size * sizeof(float));
cudaMalloc(&res_GPU, row * sizeof(float));
cudaMalloc(&vector_GPU, col * sizeof(float));
cudaMemset(res_GPU, 0, row * sizeof(float));
cudaMemcpy(rows_GPU, rows, (row +1) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cols_GPU, cols, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(values_GPU, values, size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(vector_GPU, vector, col * sizeof(float), cudaMemcpyHostToDevice);
//Start CPU Part//
start_CPU = cpuSecond();
for(int k = 0; k < row; k++){
res[k] = 0;
}
for(int i = 0; i < row; i++){
for(int k = rows[i]; k < rows[i + 1]; k++){
res[i] += values[k]*vector[cols[k]];
}
}
stop_CPU = cpuSecond();
diferencia_CPU = stop_CPU - start_CPU;
//Stop Cpu Part //
unsigned int GRID = col + BLOCK_SIZE - 1 / BLOCK_SIZE;
//Init GPU part//
start_GPU = cpuSecond();
gpuMVSparse<<<GRID, BLOCK_SIZE>>>(values_GPU, vector_GPU, rows_GPU, cols_GPU, res_GPU, row);
cudaDeviceSynchronize();
cudaMemcpy(res_F, res_GPU, row * sizeof(float), cudaMemcpyDeviceToHost);
stop_GPU = cpuSecond();
diferencia_GPU = stop_GPU - start_GPU;
//Stop GPU part//
//Start Checking //
for(int j = 0; j < row; j++){
if(fabs(res_F[j] - res[j]) >= error ){
bien = false;
printf("Error en: %f %f\n", res_F[j], res[j]);
}
}
if(bien){
printf("Comparing the output for each implementation.. Correct!\n");
}else {
printf("Comparing the output for each implementation.. Incorrect!\n");
}
char d;
printf("Do you want to print the matrix:\n");
printf("YES: y or NO: n\n");
fflush(stdout);
scanf(" %c", &d);
if(d == 'y'){
print_matrix(A,row,col);
print_matrix(values, 1, size);
print_imatrix(rows, 1, (col + 1));
print_imatrix(cols, 1 ,size);
print_matrix(res,row,1);
print_matrix(res_F,row,1);
fflush(stdout);
}
printf("Duration of the CPU: %f\n", diferencia_CPU);
printf("Duration of the GPU: %f\n", diferencia_GPU);
delete[] A;
delete[] vector;
delete[] cols;
delete[] rows;
delete[] res;
delete[] res_F;
delete[] values;
cudaFree(values_GPU);
cudaFree(cols_GPU);
cudaFree(rows_GPU);
cudaFree(res_GPU);
cudaFree(vector_GPU);
}
/*
**********************************************************************
function name: transpose_preparation
description: compare the CPU and GPU implementation of the transpose operation
1º obtaint the size of the matrix
2º create the matrix
3º Do the operation
4º compare the results
Optional: print the results
return: None
**********************************************************************
*/
void tranpose_preparation(){
int col, row, val;
bool bien = true;
printf("\n");
printf("\n");
TR1: printf("Introduce the rows of A:\n");
fflush(stdout);
val = scanf("%d", &row);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto TR1;
}
TR2: printf("Introduce the columns of A:\n");
fflush(stdout);
val = scanf("%d", &col);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto TR2;
}
double start_GPU, stop_GPU;
double start_CPU, stop_CPU;
double diferencia_CPU, diferencia_GPU;
float *A = (float *)malloc(row * col * sizeof(float));
float *res = (float *)malloc(row * col * sizeof(float));
float *res_F = (float *)malloc(row * col * sizeof(float));
float *A_GPU;
float *res_GPU;
cudaMalloc(&A_GPU, row * col * sizeof(float));
cudaMalloc(&res_GPU, row * col * sizeof(float));
init_Array(A, row, col);
cudaMemcpy(A_GPU, A, row * col * sizeof(float), cudaMemcpyHostToDevice);
//Start CPU Part//
start_CPU = cpuSecond();
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
int pos_a = i * col + j;
int pos_res = j * row + i;
res[pos_res] = A[pos_a];
}
}
stop_CPU = cpuSecond();
diferencia_CPU = stop_CPU - start_CPU;
//Stop Cpu Part //
unsigned int grid_rows = (row + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_colm = (col + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_colm, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//Init GPU part//
start_GPU = cpuSecond();
gpuMatrixTranpose<<<dimGrid, dimBlock>>>(A_GPU, res_GPU, row, col);
cudaDeviceSynchronize();
cudaMemcpy(res_F, res_GPU, row * col * sizeof(float), cudaMemcpyDeviceToHost);
stop_GPU = cpuSecond();
diferencia_GPU = stop_GPU - start_GPU;
//Stop GPU part//
//Start Checking //
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
if(fabs(res_F[i * col + j] - res[i*col + j]) >= error ){
bien = false;
printf("Error en: %f %f\n", res_F[i * col + j], res[i * col + j]);
}
}
}
if(bien){
printf("Comparing the output for each implementation.. Correct!\n");
}else {
printf("Comparing the output for each implementation.. Incorrect!\n");
}
char d;
printf("Do you want to print the differents matrix:\n");
printf("YES: y or NO: n\n");
fflush(stdout);
scanf(" %c", &d);
if(d == 'y'){
print_matrix(A,row,col);
print_matrix(res,col,row);
print_matrix(res_F,col,row);
fflush(stdout);
}
printf("Duration of the CPU: %f\n", diferencia_CPU);
printf("Duration of the GPU: %f\n", diferencia_GPU);
cudaFreeHost(A);
cudaFreeHost(res);
cudaFreeHost(res_F);
cudaFree(A_GPU);
cudaFree(res_GPU);
}
/*
**********************************************************************
function name: conv_preparation
description: compare the CPU and GPU implementation of the convultion operation
1º obtaint the size of the matrix A and the square matrix B
2º create the matrix A and B with random values
3º Do the operation
4º compare the results
Optional: print the results
return: None
**********************************************************************
*/
void conv_preparation(){
int col1, row1, col2, row2, col3, row3, val;
bool bien = true;
INTRO: printf("\n");
printf("\n");
CO1: printf("Introduce the rows of A:\n");
fflush(stdout);
val = scanf("%d", &row1);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto CO1;
}
CO2: printf("Introduce the colums of A:\n");
fflush(stdout);
val = scanf("%d", &col1);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto CO2;
}
CO3: printf("Introduce the rows and colums of B:\n");
fflush(stdout);
val = scanf("%d", &col2);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto CO3;
}
row2 = col2;
if(row2 >= row1 || col2 >= col1 ) {
printf("You must introduce again the numbers, the matrix (rows and colums) A have to be higher than B\n");
goto INTRO;
}
col3 = col1 - col2 + 1;
row3 = row1 - row2 + 1;
double start_GPU, stop_GPU;
double start_CPU, stop_CPU;
double diferencia_CPU, diferencia_GPU;
float *A = (float *)malloc(row1 * col1 * sizeof(float));
float *B = (float *)malloc(row2 * col2 * sizeof(float));
float *res = (float *)malloc(row3 * col3 * sizeof(float));
float *res_F = (float *)malloc(row3 * col3 * sizeof(float));
float *A_GPU;
float *B_GPU;
float *res_GPU;
cudaMalloc(&A_GPU, row1 * col1 * sizeof(float));
cudaMalloc(&B_GPU, row2 * col2 * sizeof(float));
cudaMalloc(&res_GPU, row3 * col3 * sizeof(float));
init_Array(A, row1, col1);
init_Array(B, row2, col2);
cudaMemcpy(A_GPU, A, row1 * col1 * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_GPU, B, row2 * col2 * sizeof(float), cudaMemcpyHostToDevice);
//Start CPU Part//
start_CPU = cpuSecond();
int i, j ,k, z;
float sum = 0.0;
for(i = 0; i < row3; i++){
for(z = 0; z < col3; z++){
sum = 0.0;
for(j = 0; j < row2; j++){
for(k = 0; k < col2; k++){
sum += A[(i + j) * col1 + z + k] * B[j * row2 + k];
}
}
res[i * col3 + z] = sum;
}
}
stop_CPU = cpuSecond();
diferencia_CPU = stop_CPU - start_CPU;
//Stop Cpu Part //
unsigned int grid_rows = (row3 + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_colm = (col3 + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_colm, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//Init GPU part//
start_GPU = cpuSecond();
gpuMatrixConv<<<dimGrid, dimBlock>>>(A_GPU, B_GPU, res_GPU, row1, col1, row2, col2, row3, col3);
cudaDeviceSynchronize();
cudaMemcpy(res_F, res_GPU, row3 * col3 * sizeof(float), cudaMemcpyDeviceToHost);
stop_GPU = cpuSecond();
diferencia_GPU = stop_GPU - start_GPU;
//Stop GPU part//
//Start Checking //
for(int i = 0; i < row3; i++){
for(int j = 0; j < col3; j++){
if(fabs(res_F[i * col3 + j] - res[i*col3 + j]) >= error ){
bien = false;
printf("Error: %f %f\n", res_F[i * col3 + j], res[i * col3 + j]);
}
}
}
if(bien){
printf("Comparing the output for each implementation.. Correct!\n");
}else {
printf("Comparing the output for each implementation.. Incorrect!\n");
}
char d;
printf("Do you want to print the matrix:\n");
printf("YES: y or NO: n\n");
fflush(stdout);
scanf(" %c", &d);
if(d == 'y'){
print_matrix(A,row1,col1);
print_matrix(B,row2, col2);
print_matrix(res,row3,col3);
print_matrix(res_F,row3,col3);
fflush(stdout);
}
printf("Duration of the CPU: %f\n", diferencia_CPU);
printf("Duration of the GPU: %f\n", diferencia_GPU);
cudaFreeHost(A);
cudaFreeHost(B);
cudaFreeHost(res);
cudaFreeHost(res_F);
cudaFree(A_GPU);
cudaFree(B_GPU);
cudaFree(res_GPU);
}
/*
**********************************************************************
function name: matrix_preparation
description: compare the CPU and GPU implementation of the matrix multiplication
1º obtaint the size of the matrix A and the matrix B
2º create the matrix A and B with random values
3º Do the operation
4º compare the results
Optional: print the results
return: None
**********************************************************************
*/
void matrix_preparation(){
int m, n, k, val;
printf("\n");
printf("\n");
MA1: printf("Introduce the rows of A:\n");
fflush(stdout);
val = scanf("%d", &m);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto MA1;
}
MA2: printf("Introduce the columns of A:\n");
fflush(stdout);
val = scanf("%d", &n);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto MA2;
}
MA3: printf("Introduce The columns of B:\n");
fflush(stdout);
val = scanf("%d", &k);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid number, please do it again.\n");
printf("\n");
goto MA3;
}
bool bien = true;
double start_GPU, stop_GPU;
double start_CPU, stop_CPU;
double diferencia_CPU, diferencia_GPU;
float *A = (float *)malloc(m * n * sizeof(float));
float *B = (float *)malloc(n * k * sizeof(float));
float *res = (float *)malloc(m * k * sizeof(float));
float *res_F = (float *)malloc(m * k * sizeof(float));
float *A_GPU;
float *B_GPU;
float *res_GPU;
cudaMalloc(&A_GPU, m * n * sizeof(float));
cudaMalloc(&B_GPU, n * k * sizeof(float));
cudaMalloc(&res_GPU, m * k * sizeof(float));
init_Array(A, m, n);
init_Array(B, n, k);
cudaMemcpy(A_GPU, A, m * n * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_GPU, B, n * k * sizeof(float), cudaMemcpyHostToDevice);
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_colm = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_colm, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//init CPU part//
start_CPU = cpuSecond();
for(int i = 0; i < m; i++){
for(int j = 0; j < k; j++){
float cont = 0.0;
for(int z = 0; z < n; z++){
cont += A[i * n + z] * B[z * k + j];
}
res[i * k + j] = cont;
}
}
stop_CPU = cpuSecond();
diferencia_CPU = stop_CPU - start_CPU;
//init GPU Part//
start_GPU = cpuSecond();
mmatrix<<<dimGrid, dimBlock>>>(A_GPU, B_GPU, res_GPU, m, n, k);
cudaDeviceSynchronize();
cudaMemcpy(res_F, res_GPU, m * k * sizeof(float), cudaMemcpyDeviceToHost);
stop_GPU = cpuSecond();
diferencia_GPU = stop_GPU - start_GPU;
//check if it is correct//
for(int i = 0; i < m; i++){
for(int j = 0; j < k; j++){
if(fabs(res_F[i * k + j] - res[i*k + j]) >= error ){
bien = false;
break;
}
}
if(!bien){break;}
}
if(bien){
printf("Comparing the output for each implementation.. Correct!\n");
}else {
printf("Comparing the output for each implementation.. Incorrect!\n");
}
char d;
printf("Do you want to print the matrix:\n");
printf("YES: y or NO: n\n");
fflush(stdout);
scanf(" %c", &d);
if(d == 'y'){
print_matrix(A,m,n);
print_matrix(B,n, k);
print_matrix(res,m,k);
print_matrix(res_F,m,k);
fflush(stdout);
}
printf("Duration of the CPU: %f\n", diferencia_CPU);
printf("Duration of the GPU: %f\n", diferencia_GPU);
cudaFreeHost(A);
cudaFreeHost(B);
cudaFreeHost(res);
cudaFreeHost(res_F);
cudaFree(A_GPU);
cudaFree(B_GPU);
cudaFree(res_GPU);
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////// MAIN //////////////////////////////////////////////////////////////////////////////
int main( int argc, char *argv[]){
int op, val;
bool salir = false;
START: printf("\n");
printf("\n");
printf("******************************************\n");
printf("* Select the operation you want to do: *\n");
printf("* *\n");
printf("* 1. Matrix Multiplication *\n");
printf("* 2. Matrix Convection *\n");
printf("* 3. Matrix Transpose *\n");
printf("* 4. Matrix-Vector Sparse *\n");
printf("* 5. Exit *\n");
printf("* *\n");
printf("******************************************\n");
fflush(stdout);
printf("Introduce the number of the operation:\n");
fflush(stdout);
val = scanf("%d", &op);
if(val == 0) {
while ( (val = getchar()) != EOF && val != '\n' );
printf("You don't introduce a valid option, please do it again.\n");
goto START;
}else{
switch(op){
case 1:
matrix_preparation();
fflush(stdout);
break;
case 2:
conv_preparation();
fflush(stdout);
break;
case 3:
tranpose_preparation();
fflush(stdout);
break;
case 4:
sparse_preparation();
fflush(stdout);
break;
case 5:
salir = true;
printf("Successful Exit\n");
fflush(stdout);
break;
default:
printf("You dont select any option, please do it again\n");
fflush(stdout);
break;
}
if(!salir){goto START;}
}
return 0;
} |
3d33d7e3c63adf9428542849136092b58026f717.hip | // !!! This is a file automatically generated by hipify!!!
/*
* TrajectoryTest.cpp
*
* Created on: 21 gru 2019
* Author: pkua
*/
#include <catch2/catch.hpp>
#include <sstream>
#include "utils/CudaCheck.h"
#include "simulation/Trajectory.h"
TEST_CASE("Trajectory: adding") {
SECTION("empty") {
Trajectory trajectory;
REQUIRE(trajectory.getSize() == 0);
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 0);
}
SECTION("simple") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, false);
trajectory.addPoint(Point{4, 5}, false);
REQUIRE(trajectory.getSize() == 3);
REQUIRE(trajectory[0] == Point{0, 1});
REQUIRE(trajectory[1] == Point{2, 3});
REQUIRE(trajectory[2] == Point{4, 5});
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 0);
}
SECTION("one accepted") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, true);
trajectory.addPoint(Point{4, 5}, false);
REQUIRE(trajectory.getSize() == 3);
REQUIRE(trajectory[0] == Point{0, 1});
REQUIRE(trajectory[1] == Point{2, 3});
REQUIRE(trajectory[2] == Point{4, 5});
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 1);
}
SECTION("first accepted should throw") {
Trajectory trajectory;
REQUIRE_THROWS(trajectory.addPoint(Point{2, 3}, true));
}
SECTION("clear") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, true);
trajectory.addPoint(Point{4, 5}, false);
trajectory.clear();
REQUIRE(trajectory.getSize() == 0);
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 0);
}
}
TEST_CASE("Trajectory: access") {
SECTION("first") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, false);
trajectory.addPoint(Point{4, 5}, false);
REQUIRE(trajectory.getFirst() == Point{0, 1});
}
SECTION("last") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, false);
trajectory.addPoint(Point{4, 5}, false);
REQUIRE(trajectory.getLast() == Point{4, 5});
}
}
TEST_CASE("Trajectory: append") {
SECTION("easy") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, false);
trajectory.addPoint(Point{4, 5}, false);
Trajectory another;
another.addPoint(Point{4, 5}, false);
another.addPoint(Point{6, 7}, false);
trajectory.appendAnotherTrajectory(another);
REQUIRE(trajectory.getSize() == 4);
REQUIRE(trajectory[0] == Point{0, 1});
REQUIRE(trajectory[1] == Point{2, 3});
REQUIRE(trajectory[2] == Point{4, 5});
REQUIRE(trajectory[3] == Point{6, 7});
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 0);
}
SECTION("accepted steps should add") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, true);
trajectory.addPoint(Point{4, 5}, false);
Trajectory another;
another.addPoint(Point{4, 5}, false);
another.addPoint(Point{6, 7}, true);
trajectory.appendAnotherTrajectory(another);
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 2);
}
SECTION("appending on empty") {
Trajectory another;
another.addPoint(Point{0, 1}, false);
another.addPoint(Point{2, 3}, true);
another.addPoint(Point{4, 5}, false);
Trajectory trajectory;
trajectory.appendAnotherTrajectory(another);
REQUIRE(trajectory.getSize() == 3);
REQUIRE(trajectory[0] == Point{0, 1});
REQUIRE(trajectory[1] == Point{2, 3});
REQUIRE(trajectory[2] == Point{4, 5});
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 1);
}
SECTION("non-equal last and first steps should throw") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, false);
Trajectory another;
another.addPoint(Point{4, 5}, false);
REQUIRE_THROWS(trajectory.appendAnotherTrajectory(another));
}
}
TEST_CASE("Trajectory: storing") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, true);
trajectory.addPoint(Point{4, 5}, false);
std::ostringstream out;
trajectory.store(out);
std::ostringstream outExpected;
outExpected << "0 1" << std::endl << "2 3" << std::endl << "4 5" << std::endl;
REQUIRE(out.str() == outExpected.str());
}
TEST_CASE("Trajectory: copy from GPU") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, true);
trajectory.addPoint(Point{4, 5}, true);
std::vector<Point> cpuVector = {{6, 7}, {8, 9}};
Point *gpuVector;
cudaCheck( hipMalloc(&gpuVector, sizeof(Point) * 2) );
cudaCheck( hipMemcpy(gpuVector, cpuVector.data(), sizeof(Point) * 2, hipMemcpyHostToDevice) );
trajectory.copyGPUData(gpuVector, 2, 1);
cudaCheck( hipFree(gpuVector) );
REQUIRE(trajectory.getSize() == 2);
REQUIRE(trajectory[0] == Point{6, 7});
REQUIRE(trajectory[1] == Point{8, 9});
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 1);
}
| 3d33d7e3c63adf9428542849136092b58026f717.cu | /*
* TrajectoryTest.cpp
*
* Created on: 21 gru 2019
* Author: pkua
*/
#include <catch2/catch.hpp>
#include <sstream>
#include "utils/CudaCheck.h"
#include "simulation/Trajectory.h"
TEST_CASE("Trajectory: adding") {
SECTION("empty") {
Trajectory trajectory;
REQUIRE(trajectory.getSize() == 0);
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 0);
}
SECTION("simple") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, false);
trajectory.addPoint(Point{4, 5}, false);
REQUIRE(trajectory.getSize() == 3);
REQUIRE(trajectory[0] == Point{0, 1});
REQUIRE(trajectory[1] == Point{2, 3});
REQUIRE(trajectory[2] == Point{4, 5});
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 0);
}
SECTION("one accepted") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, true);
trajectory.addPoint(Point{4, 5}, false);
REQUIRE(trajectory.getSize() == 3);
REQUIRE(trajectory[0] == Point{0, 1});
REQUIRE(trajectory[1] == Point{2, 3});
REQUIRE(trajectory[2] == Point{4, 5});
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 1);
}
SECTION("first accepted should throw") {
Trajectory trajectory;
REQUIRE_THROWS(trajectory.addPoint(Point{2, 3}, true));
}
SECTION("clear") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, true);
trajectory.addPoint(Point{4, 5}, false);
trajectory.clear();
REQUIRE(trajectory.getSize() == 0);
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 0);
}
}
TEST_CASE("Trajectory: access") {
SECTION("first") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, false);
trajectory.addPoint(Point{4, 5}, false);
REQUIRE(trajectory.getFirst() == Point{0, 1});
}
SECTION("last") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, false);
trajectory.addPoint(Point{4, 5}, false);
REQUIRE(trajectory.getLast() == Point{4, 5});
}
}
TEST_CASE("Trajectory: append") {
SECTION("easy") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, false);
trajectory.addPoint(Point{4, 5}, false);
Trajectory another;
another.addPoint(Point{4, 5}, false);
another.addPoint(Point{6, 7}, false);
trajectory.appendAnotherTrajectory(another);
REQUIRE(trajectory.getSize() == 4);
REQUIRE(trajectory[0] == Point{0, 1});
REQUIRE(trajectory[1] == Point{2, 3});
REQUIRE(trajectory[2] == Point{4, 5});
REQUIRE(trajectory[3] == Point{6, 7});
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 0);
}
SECTION("accepted steps should add") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, true);
trajectory.addPoint(Point{4, 5}, false);
Trajectory another;
another.addPoint(Point{4, 5}, false);
another.addPoint(Point{6, 7}, true);
trajectory.appendAnotherTrajectory(another);
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 2);
}
SECTION("appending on empty") {
Trajectory another;
another.addPoint(Point{0, 1}, false);
another.addPoint(Point{2, 3}, true);
another.addPoint(Point{4, 5}, false);
Trajectory trajectory;
trajectory.appendAnotherTrajectory(another);
REQUIRE(trajectory.getSize() == 3);
REQUIRE(trajectory[0] == Point{0, 1});
REQUIRE(trajectory[1] == Point{2, 3});
REQUIRE(trajectory[2] == Point{4, 5});
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 1);
}
SECTION("non-equal last and first steps should throw") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, false);
Trajectory another;
another.addPoint(Point{4, 5}, false);
REQUIRE_THROWS(trajectory.appendAnotherTrajectory(another));
}
}
TEST_CASE("Trajectory: storing") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, true);
trajectory.addPoint(Point{4, 5}, false);
std::ostringstream out;
trajectory.store(out);
std::ostringstream outExpected;
outExpected << "0 1" << std::endl << "2 3" << std::endl << "4 5" << std::endl;
REQUIRE(out.str() == outExpected.str());
}
TEST_CASE("Trajectory: copy from GPU") {
Trajectory trajectory;
trajectory.addPoint(Point{0, 1}, false);
trajectory.addPoint(Point{2, 3}, true);
trajectory.addPoint(Point{4, 5}, true);
std::vector<Point> cpuVector = {{6, 7}, {8, 9}};
Point *gpuVector;
cudaCheck( cudaMalloc(&gpuVector, sizeof(Point) * 2) );
cudaCheck( cudaMemcpy(gpuVector, cpuVector.data(), sizeof(Point) * 2, cudaMemcpyHostToDevice) );
trajectory.copyGPUData(gpuVector, 2, 1);
cudaCheck( cudaFree(gpuVector) );
REQUIRE(trajectory.getSize() == 2);
REQUIRE(trajectory[0] == Point{6, 7});
REQUIRE(trajectory[1] == Point{8, 9});
REQUIRE(trajectory.getNumberOfAcceptedSteps() == 1);
}
|
3fab72f161de0c513a4cd939601e6d6d0341ede8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
#include "bhsparse_spmv_cuda.h"
#include "mmio.h"
#include <unordered_set>
#include<mpi.h>
//#include "mpi-ext.h" /* Needed for CUDA-aware check */
#define MAX_STRING_LENGTH 128
long strideCounts = 0;
char matName[MAX_STRING_LENGTH];
int testType = 0, mpi_rank, nRanks, MASTER = 0, sqrRank, row_rank, col_rank, firstRow, firstCol, total_sparsity = 0,
max_sparsity = 0, transactionByte = 128;
MPI_Comm commrow;
MPI_Comm commcol;
hipsparseStatus_t cusparse_spmv(hipsparseHandle_t handle, hipsparseMatDescr_t descr,
int m, int n, int nnz,
int *csrRowPtrA, int *csrColIdxA, double *csrValA,
double *x, double *y, double alpha, double beta)
{
return hipsparseDcsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, m, n, nnz, &alpha, descr, csrValA, csrRowPtrA, csrColIdxA, x, &beta, y);
}
hipsparseStatus_t cusparse_spmv(hipsparseHandle_t handle, hipsparseMatDescr_t descr,
int m, int n, int nnz,
int *csrRowPtrA, int *csrColIdxA, float *csrValA,
float *x, float *y, float alpha, float beta)
{
return hipsparseScsrmv(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, m, n, nnz, &alpha, descr, csrValA, csrRowPtrA, csrColIdxA, x, &beta, y);
}
template <unsigned int THREADS_PER_VECTOR>
void cusp_spmv(int m, int n, int nnz, int *svm_csrRowPtrA, int *svm_csrColIdxA, double *svm_csrValA, double *svm_x, double *svm_y)
{
const size_t THREADS_PER_BLOCK = 128;
const size_t VECTORS_PER_BLOCK = THREADS_PER_BLOCK / THREADS_PER_VECTOR;
const size_t NUM_BLOCKS = ceil((double)m / (double)VECTORS_PER_BLOCK);
hipLaunchKernelGGL(( spmv_csr_vector_kernel
<int, double, VECTORS_PER_BLOCK, THREADS_PER_VECTOR>)
, dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0,
m, svm_csrRowPtrA, svm_csrColIdxA, svm_csrValA, svm_x, svm_y);
}
template <unsigned int THREADS_PER_VECTOR>
void cusp_spmv(int m, int n, int nnz, int *svm_csrRowPtrA, int *svm_csrColIdxA, float *svm_csrValA, float *svm_x, float *svm_y)
{
const size_t THREADS_PER_BLOCK = 128;
const size_t VECTORS_PER_BLOCK = THREADS_PER_BLOCK / THREADS_PER_VECTOR;
const size_t NUM_BLOCKS = ceil((double)m / (double)VECTORS_PER_BLOCK);
hipLaunchKernelGGL(( spmv_csr_vector_kernel
<int, float, VECTORS_PER_BLOCK, THREADS_PER_VECTOR>)
, dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0,
m, svm_csrRowPtrA, svm_csrColIdxA, svm_csrValA, svm_x, svm_y);
}
void call_cusparse_ref(int m, int n, int nnz,
int *csrRowPtrA, int *csrColIdxA, value_type *csrValA,
value_type *x, value_type *y, value_type *y_ref)
{
// prepare shared virtual memory (unified memory)
#if USE_SVM_ALWAYS
cout << endl << "cuSPARSE is using shared virtual memory (unified memory).";
int *svm_csrRowPtrA;
int *svm_csrColIdxA;
value_type *svm_csrValA;
value_type *svm_x;
value_type *svm_y;
checkCudaErrors(hipMallocManaged(&svm_csrRowPtrA, (m+1) * sizeof(int)));
checkCudaErrors(hipMallocManaged(&svm_csrColIdxA, nnz * sizeof(int)));
checkCudaErrors(hipMallocManaged(&svm_csrValA, nnz * sizeof(value_type)));
memcpy(svm_csrRowPtrA, csrRowPtrA, (m+1) * sizeof(int));
memcpy(svm_csrColIdxA, csrColIdxA, nnz * sizeof(int));
memcpy(svm_csrValA, csrValA, nnz * sizeof(value_type));
checkCudaErrors(hipMallocManaged(&svm_x, n * sizeof(value_type)));
memcpy(svm_x, x, n * sizeof(value_type));
checkCudaErrors(hipMallocManaged(&svm_y, m * sizeof(value_type)));
memcpy(svm_y, y, m * sizeof(value_type));
// prepare device memory
#else
cout << endl << "cuSPARSE is using dedicated GPU memory.";
int *d_csrRowPtrA;
int *d_csrColIdxA;
value_type *d_csrValA;
value_type *d_x;
value_type *d_y;
checkCudaErrors(hipMalloc((void **)&d_csrRowPtrA, (m+1) * sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_csrColIdxA, nnz * sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_csrValA, nnz * sizeof(value_type)));
checkCudaErrors(hipMemcpy(d_csrRowPtrA, csrRowPtrA, (m+1) * sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_csrColIdxA, csrColIdxA, nnz * sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_csrValA, csrValA, nnz * sizeof(value_type), hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&d_x, n * sizeof(value_type)));
checkCudaErrors(hipMemcpy(d_x, x, n * sizeof(value_type), hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&d_y, m * sizeof(value_type)));
checkCudaErrors(hipMemcpy(d_y, y, m * sizeof(value_type), hipMemcpyHostToDevice));
#endif
double gb = (double)((m + 1 + nnz) * sizeof(int) + (2 * nnz + m) * sizeof(value_type));
double gflop = (double)(2 * nnz);
// run cuSPARSE START
hipsparseHandle_t handle = 0;
hipsparseStatus_t status;
status = hipsparseCreate(&handle);
if (status != HIPSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE initialization error\n");
//return -1;
}
hipsparseMatDescr_t descr = 0;
status = hipsparseCreateMatDescr(&descr);
if (status != HIPSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE hipsparseCreateMatDescr error\n");
//return -2;
}
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
value_type alpha = 1.0;
value_type beta = 0.0;
checkCudaErrors(hipDeviceSynchronize());
bhsparse_timer cusparse_timer;
cusparse_timer.start();
for (int i = 0; i < NUM_RUN; i++)
{
#if USE_SVM_ALWAYS
status = cusparse_spmv(handle, descr, m, n, nnz, svm_csrRowPtrA, svm_csrColIdxA, svm_csrValA, svm_x, svm_y, alpha, beta);
#else
status = cusparse_spmv(handle, descr, m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y, alpha, beta);
#endif
}
checkCudaErrors(hipDeviceSynchronize());
if (status != HIPSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE cusparseS/Dcsrmv error\n");
//return -3;
}
double cusparseTime = cusparse_timer.stop() / NUM_RUN;
cout << endl << "Checking cuSPARSE SpMV Correctness ... ";
#if USE_SVM_ALWAYS == 0
value_type *y_cusparse_ref = (value_type *)malloc(m * sizeof(value_type));
checkCudaErrors(hipMemcpy(y_cusparse_ref, d_y, m * sizeof(value_type), hipMemcpyDeviceToHost));
#endif
int error_count = 0;
for (int i = 0; i < m; i++)
#if USE_SVM_ALWAYS
if (y_ref[i] != svm_y[i])
error_count++;
#else
if (y_ref[i] != y_cusparse_ref[i])
error_count++;
#endif
if (error_count)
cout << "NO PASS. Error count = " << error_count << " out of " << m << " entries.";
else
cout << "PASS!";
cout << endl;
cout << "cuSPARSE time = " << cusparseTime
<< " ms. Bandwidth = " << gb/(1.0e+6 * cusparseTime)
<< " GB/s. GFlops = " << gflop/(1.0e+6 * cusparseTime) << " GFlops." << endl << endl;
// run cuSPARSE STOP
char outputFile[100] = "Results/CSR_CUDA_2DSpMV.csv";
FILE *resultCSV;
FILE *checkFile;
if ((checkFile = fopen(outputFile, "r")) != NULL) {
// file exists
fclose(checkFile);
if (!(resultCSV = fopen(outputFile, "a"))) {
fprintf(stderr, "fopen: failed to open %s file\n", outputFile);
exit(EXIT_FAILURE);
}
} else {
if (!(resultCSV = fopen(outputFile, "w"))) {
fprintf(stderr, "fopen: failed to open file %s\n", outputFile);
exit(EXIT_FAILURE);
}
fprintf(resultCSV, "Name,M,N,AvgTime,TotalRun,NonZeroPerRow,NonZeroElements,Bandwidth,Flops,ValueType,Type,Strides,TransactionByte,WordSize\n");
}
fprintf(resultCSV, "%s,%d,%d,%10.6lf,%d,%lf,%d,%lf,%lf,%d,%s,%ld,%d,%d\n", matName, m, n, cusparseTime, NUM_RUN, (double) nnz / m,
nnz, gb / (1.0e+6 * cusparseTime), gflop / (1.0e+6 * cusparseTime), sizeof(value_type), "CUSPARSE", strideCounts,
TRANSACTION_BYTE, TRANSACTION_BYTE/ sizeof(value_type));
if (fclose(resultCSV) != 0) {
fprintf(stderr, "fopen: failed to open file %s\n", outputFile);
exit(EXIT_FAILURE);
}
#if USE_SVM_ALWAYS
checkCudaErrors(hipFree(svm_csrValA));
checkCudaErrors(hipFree(svm_csrRowPtrA));
checkCudaErrors(hipFree(svm_csrColIdxA));
checkCudaErrors(hipFree(svm_x));
checkCudaErrors(hipFree(svm_y));
#else
free(y_cusparse_ref);
checkCudaErrors(hipFree(d_csrRowPtrA));
checkCudaErrors(hipFree(d_csrColIdxA));
checkCudaErrors(hipFree(d_csrValA));
checkCudaErrors(hipFree(d_x));
checkCudaErrors(hipFree(d_y));
#endif
return;
}
void call_cusp_ref(int m, int n, int nnz, int *csrRowPtrA, int *csrColIdxA, value_type *csrValA, value_type *x
, value_type *y, value_type *y_ref)
{
int *d_csrRowPtrA;
int *d_csrColIdxA;
value_type *d_csrValA;
value_type *d_x;
value_type *d_y;
checkCudaErrors(hipMalloc((void **)&d_csrRowPtrA, (m+1) * sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_csrColIdxA, nnz * sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_csrValA, nnz * sizeof(value_type)));
checkCudaErrors(hipMemcpy(d_csrRowPtrA, csrRowPtrA, (m+1) * sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_csrColIdxA, csrColIdxA, nnz * sizeof(int), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_csrValA, csrValA, nnz * sizeof(value_type), hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **)&d_x, n * sizeof(value_type)));
checkCudaErrors(hipMalloc((void **)&d_y, m * sizeof(value_type)));
checkCudaErrors(hipMemcpy(d_y, y, m * sizeof(value_type), hipMemcpyHostToDevice));
// cout << endl << "[" << mpi_rank << "] Checking CUSP SpMV Correctness ... ";
checkCudaErrors(hipMemcpy(d_x, x, n * sizeof(value_type), hipMemcpyHostToDevice));
cusp_spmv<32>(m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y);
value_type *y_cusp_ref = (value_type *)malloc(m * sizeof(value_type));
checkCudaErrors(hipMemcpy(y_cusp_ref, d_y, m * sizeof(value_type), hipMemcpyDeviceToHost));
checkCudaErrors(hipDeviceSynchronize());
int error_count = 0;
for (int i = 0; i < m; i++)
if (abs(y_ref[i] - y_cusp_ref[i]) > 0.01 * abs(y_ref[i])/*y_ref[i] != y_cusp_ref[i]*/)
error_count++;
if (error_count)
cout << "[" << mpi_rank << "] NO PASS. Error count = " << error_count << " out of " << m << " entries." << endl;
// else
// cout << "PASS!" << endl;
double gb = (double)((m + 1 + nnz) * sizeof(int) + (2 * nnz + m) * sizeof(value_type));
double gflop = (double)(2 * nnz);
// run CUSP START
const int nnz_per_row = nnz / m;
bhsparse_timer cusp_timer;
bhsparse_timer broadcast_timer;
bhsparse_timer mult_timer;
bhsparse_timer reduce_timer;
cusp_timer.start();
double b_time, r_time, m_time, avg_b_time = 0, avg_r_time = 0, avg_m_time = 0;
if (nnz_per_row <= 2)
{
// cout<< "THREADS_PER_VECTOR = 2" << endl;
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 0; i < NUM_RUN+SKIP; i++) {
// cout << "[" << mpi_rank << "] 2-iter= " << i+1 << " mat= " << matName << endl;
broadcast_timer.start();
MPI_Bcast(x, m, MPI_FLOAT, col_rank, commcol); //col_rank is the one with the correct information
b_time = broadcast_timer.stop();
checkCudaErrors(hipMemcpy(d_x, x, n * sizeof(value_type), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_y, y, m * sizeof(value_type), hipMemcpyHostToDevice));
mult_timer.start();
cusp_spmv<2>(m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y);
m_time = mult_timer.stop();
checkCudaErrors(hipMemcpy(y, d_y, m * sizeof(value_type), hipMemcpyDeviceToHost));
reduce_timer.start();
MPI_Reduce(y, x, m, MPI_FLOAT, MPI_SUM, row_rank, commrow);
r_time = reduce_timer.stop();
if(i>=SKIP){
avg_b_time += b_time;
avg_m_time += m_time;
avg_r_time += r_time;
}
MPI_Barrier(commcol);
MPI_Barrier(commrow);
MPI_Barrier(MPI_COMM_WORLD);
}
}
else if (nnz_per_row <= 4)
{
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 0; i < NUM_RUN+SKIP; i++) {
// cout << "[" << mpi_rank << "] 4-iter= " << i+1 << " mat= " << matName << endl;
broadcast_timer.start();
MPI_Bcast(x, m, MPI_FLOAT, col_rank, commcol); //col_rank is the one with the correct information
b_time = broadcast_timer.stop();
checkCudaErrors(hipMemcpy(d_x, x, n * sizeof(value_type), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_y, y, m * sizeof(value_type), hipMemcpyHostToDevice));
mult_timer.start();
cusp_spmv<4>(m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y);
m_time = mult_timer.stop();
checkCudaErrors(hipMemcpy(y, d_y, m * sizeof(value_type), hipMemcpyDeviceToHost));
reduce_timer.start();
MPI_Reduce(y, x, m, MPI_FLOAT, MPI_SUM, row_rank, commrow);
r_time = reduce_timer.stop();
if(i>=SKIP){
avg_b_time += b_time;
avg_m_time += m_time;
avg_r_time += r_time;
}
MPI_Barrier(commcol);
MPI_Barrier(commrow);
MPI_Barrier(MPI_COMM_WORLD);
}
}
else if (nnz_per_row <= 8)
{
// cout<< "THREADS_PER_VECTOR = 8" << endl;
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 0; i < NUM_RUN+SKIP; i++) {
// cout << "[" << mpi_rank << "] 8-iter= " << i+1 << " mat= " << matName << endl;
broadcast_timer.start();
MPI_Bcast(x, m, MPI_FLOAT, col_rank, commcol); //col_rank is the one with the correct information
b_time = broadcast_timer.stop();
checkCudaErrors(hipMemcpy(d_x, x, n * sizeof(value_type), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_y, y, m * sizeof(value_type), hipMemcpyHostToDevice));
mult_timer.start();
cusp_spmv<8>(m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y);
m_time = mult_timer.stop();
checkCudaErrors(hipMemcpy(y, d_y, m * sizeof(value_type), hipMemcpyDeviceToHost));
reduce_timer.start();
MPI_Reduce(y, x, m, MPI_FLOAT, MPI_SUM, row_rank, commrow);
r_time = reduce_timer.stop();
if(i>=SKIP){
avg_b_time += b_time;
avg_m_time += m_time;
avg_r_time += r_time;
}
MPI_Barrier(commcol);
MPI_Barrier(commrow);
MPI_Barrier(MPI_COMM_WORLD);
}
}
else if (nnz_per_row <= 16)
{
// cout<< "[" << mpi_rank << "] THREADS_PER_VECTOR = 16" << endl;
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 0; i < NUM_RUN; i++) {
/*if(i==19){
for (int j = 0; j < 10; ++j) {
cout<< "[" << mpi_rank << "] 16: " ;
cout << x[j] << " ";
}
cout<<endl;
}
cout << "[" << mpi_rank << "] 16-iter= " << i+1 << " mat= " << matName << endl;*/
broadcast_timer.start();
MPI_Bcast(x, m, MPI_FLOAT, col_rank, commcol); //col_rank is the one with the correct information
b_time = broadcast_timer.stop();
checkCudaErrors(hipMemcpy(d_x, x, n * sizeof(value_type), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_y, y, m * sizeof(value_type), hipMemcpyHostToDevice));
mult_timer.start();
cusp_spmv<16>(m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y);
m_time = mult_timer.stop();
checkCudaErrors(hipMemcpy(y, d_y, m * sizeof(value_type), hipMemcpyDeviceToHost));
reduce_timer.start();
MPI_Reduce(y, x, m, MPI_FLOAT, MPI_SUM, row_rank, commrow);
r_time = reduce_timer.stop();
if(i>=SKIP){
avg_b_time += b_time;
avg_m_time += m_time;
avg_r_time += r_time;
}
MPI_Barrier(commcol);
MPI_Barrier(commrow);
MPI_Barrier(MPI_COMM_WORLD);
}
}
else
{
// cout<< "THREADS_PER_VECTOR = 32" << endl;
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 0; i < NUM_RUN+SKIP; i++) {
/*if(i==20){
for (int j = 0; j < 10; ++j) {
cout<< "[" << mpi_rank << "] 32: ";
cout << x[j] << " ";
}
cout<<endl;
}
cout << "[" << mpi_rank << "] 32-iter= " << i+1 << " mat= " << matName << endl;*/
broadcast_timer.start();
MPI_Bcast(x, m, MPI_FLOAT, col_rank, commcol); //col_rank is the one with the correct information
b_time = broadcast_timer.stop();
checkCudaErrors(hipMemcpy(d_x, x, n * sizeof(value_type), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_y, y, m * sizeof(value_type), hipMemcpyHostToDevice));
mult_timer.start();
cusp_spmv<32>(m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y);
m_time = mult_timer.stop();
checkCudaErrors(hipMemcpy(y, d_y, m * sizeof(value_type), hipMemcpyDeviceToHost));
reduce_timer.start();
MPI_Reduce(y, x, m, MPI_FLOAT, MPI_SUM, row_rank, commrow);
r_time = reduce_timer.stop();
if(i>=SKIP){
avg_b_time += b_time;
avg_m_time += m_time;
avg_r_time += r_time;
}
MPI_Barrier(commcol);
MPI_Barrier(commrow);
MPI_Barrier(MPI_COMM_WORLD);
}
}
MPI_Barrier(MPI_COMM_WORLD);
checkCudaErrors(hipDeviceSynchronize());
if(mpi_rank == MASTER)
cout<< "Run complete" << endl;
double cuspTime = cusp_timer.stop() / (NUM_RUN+SKIP);
int avg_nnz;
double avg_nnz_per_row, avgTime;
avg_b_time /= NUM_RUN;
avg_m_time /= NUM_RUN;
avg_r_time /= NUM_RUN;
MPI_Reduce(&nnz, &avg_nnz, 1, MPI_INT, MPI_SUM, MASTER, MPI_COMM_WORLD);
MPI_Reduce(&nnz_per_row, &avg_nnz_per_row, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD);
MPI_Reduce(&avg_b_time, &b_time, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD);
MPI_Reduce(&avg_m_time, &m_time, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD);
MPI_Reduce(&avg_r_time, &r_time, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD);
MPI_Reduce(&cuspTime, &avgTime, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD);
avg_nnz /= nRanks;
avg_nnz_per_row /= nRanks;
b_time /= nRanks;
m_time /= nRanks;
r_time /= nRanks;
avgTime /= nRanks;
if(mpi_rank == MASTER) {
cout << "CUSP time = " << cuspTime
<< " ms. Bandwidth = " << gb / (1.0e+6 * cuspTime)
<< " GB/s. GFlops = " << gflop / (1.0e+6 * cuspTime) << " GFlops." << endl << endl;
// run CUSP STOP
char outputFile[100] = "Results/CSR_CUDA_2DSpMV.csv";
FILE *resultCSV;
FILE *checkFile;
if ((checkFile = fopen(outputFile, "r")) != NULL) {
// file exists
fclose(checkFile);
if (!(resultCSV = fopen(outputFile, "a"))) {
fprintf(stderr, "fopen: failed to open %s file\n", outputFile);
exit(EXIT_FAILURE);
}
} else {
if (!(resultCSV = fopen(outputFile, "w"))) {
fprintf(stderr, "fopen: failed to open file %s\n", outputFile);
exit(EXIT_FAILURE);
}
fprintf(resultCSV,
"Name,M,N,AvgTime,AvgBcastTime,AvgMultTime,AvgReduceTime,TotalRun,NonZeroPerRow,NonZeroElements,Bandwidth,Flops,ValueType,Type,Strides,TransactionByte,WordSize\n");
}
fprintf(resultCSV, "%s,%d,%d,%10.6lf,%10.6lf,%10.6lf,%10.6lf,%d,%lf,%d,%lf,%lf,%d,%s,%ld,%d,%d\n", matName, m,
n, avgTime, b_time, m_time, r_time, (NUM_RUN + SKIP), avg_nnz_per_row, avg_nnz, gb/(1.0e+6 * avgTime),
gflop/(1.0e+6 * avgTime), sizeof(value_type), "CUSP", strideCounts, TRANSACTION_BYTE,
TRANSACTION_BYTE/sizeof(value_type));
if (fclose(resultCSV) != 0) {
fprintf(stderr, "fopen: failed to open file %s\n", outputFile);
exit(EXIT_FAILURE);
}
}
checkCudaErrors(hipDeviceSynchronize());
free(y_cusp_ref);
checkCudaErrors(hipFree(d_csrRowPtrA));
checkCudaErrors(hipFree(d_csrColIdxA));
checkCudaErrors(hipFree(d_csrValA));
checkCudaErrors(hipFree(d_x));
checkCudaErrors(hipFree(d_y));
return;
}
void call_omp_ref(int m, int n, int nnz,
int *csrRowPtrA, int *csrColIdxA, value_type *csrValA,
value_type *x, value_type *y, value_type *y_ref)
{
#if USE_SVM_ALWAYS
cout << endl << "OpenMP is using shared virtual memory (unified memory).";
int *svm_csrRowPtrA;
int *svm_csrColIdxA;
value_type *svm_csrValA;
value_type *svm_x;
value_type *svm_y;
// prepare shared virtual memory (unified memory)
checkCudaErrors(hipMallocManaged(&svm_csrRowPtrA, (m+1) * sizeof(int)));
checkCudaErrors(hipMallocManaged(&svm_csrColIdxA, nnz * sizeof(int)));
checkCudaErrors(hipMallocManaged(&svm_csrValA, nnz * sizeof(value_type)));
memcpy(svm_csrRowPtrA, csrRowPtrA, (m+1) * sizeof(int));
memcpy(svm_csrColIdxA, csrColIdxA, nnz * sizeof(int));
memcpy(svm_csrValA, csrValA, nnz * sizeof(value_type));
checkCudaErrors(hipMallocManaged(&svm_x, n * sizeof(value_type)));
memcpy(svm_x, x, n * sizeof(value_type));
checkCudaErrors(hipMallocManaged(&svm_y, m * sizeof(value_type)));
memcpy(svm_y, y, m * sizeof(value_type));
#else
// cout << endl << "OpenMP is using dedicated HOST memory.";
value_type *y_omp_ref = (value_type *)malloc(m * sizeof(value_type));
#endif
double gb = (double)((m + 1 + nnz) * sizeof(int) + (2 * nnz + m) * sizeof(value_type));
double gflop = (double)(2 * nnz);
// run OpenMP START
// omp_set_num_threads(4);
// cout << endl << "OpenMP is using 4 threads.";
checkCudaErrors(hipDeviceSynchronize());
bhsparse_timer omp_timer;
omp_timer.start();
for (int iter = 0; iter < NUM_RUN; iter++)
{
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
value_type sum = 0;
#if USE_SVM_ALWAYS
for (int j = svm_csrRowPtrA[i]; j < svm_csrRowPtrA[i+1]; j++)
sum += svm_x[svm_csrColIdxA[j]] * svm_csrValA[j];
svm_y[i] = sum;
#else
for (int j = csrRowPtrA[i]; j < csrRowPtrA[i+1]; j++)
sum += x[csrColIdxA[j]] * csrValA[j];
y_omp_ref[i] = sum;
#endif
}
}
double ompTime = omp_timer.stop() / NUM_RUN;
cout << endl << "Checking OpenMP SpMV Correctness ... ";
int error_count = 0;
for (int i = 0; i < m; i++)
#if USE_SVM_ALWAYS
if (y_ref[i] != svm_y[i])
error_count++;
#else
if (y_ref[i] != y_omp_ref[i])
error_count++;
#endif
if (error_count)
cout << "NO PASS. Error count = " << error_count << " out of " << m << " entries." << endl;
// else
// cout << "PASS!" << endl;
if(mpi_rank == MASTER) {
cout << "OpenMP time = " << ompTime
<< " ms. Bandwidth = " << gb / (1.0e+6 * ompTime)
<< " GB/s. GFlops = " << gflop / (1.0e+6 * ompTime) << " GFlops." << endl << endl;
}
// run OpenMP STOP
#if USE_SVM_ALWAYS
checkCudaErrors(hipFree(svm_csrValA));
checkCudaErrors(hipFree(svm_csrRowPtrA));
checkCudaErrors(hipFree(svm_csrColIdxA));
checkCudaErrors(hipFree(svm_x));
checkCudaErrors(hipFree(svm_y));
#else
free(y_omp_ref);
#endif
return;
}
int call_bhsparse_small()
{
int err = 0;
int m, n, nnzA;
int *csrColIdxA;
int *csrRowPtrA;
value_type *csrValA;
m = 6;
n = 6;
nnzA = 15;
csrColIdxA = (int *)malloc(nnzA * sizeof(int));
csrRowPtrA = (int *)malloc((m+1) * sizeof(int));
csrValA = (value_type *)malloc(nnzA * sizeof(value_type));
int row_ptr[7] = {0, 3, 9, 11, 11, 12, 15};
int col_idx[15] = {0, 2, 5, 0, 1, 2, 3, 4, 5, 2, 4, 4, 2, 3, 4};
value_type val[15] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
memcpy(csrRowPtrA, row_ptr, (m+1) * sizeof(int));
memcpy(csrColIdxA, col_idx, nnzA * sizeof(int));
memcpy(csrValA, val, nnzA * sizeof(value_type));
cout << "row_ptr = [ ";
for (int i = 0; i < m + 1; i++)
cout << csrRowPtrA[i] << ", ";
cout << " ]" << endl;
cout << "col_idx = [ ";
for (int i = 0; i < nnzA; i++)
cout << csrColIdxA[i] << ", ";
cout << " ]" << endl;
cout << "value = [ ";
for (int i = 0; i < nnzA; i++)
cout << csrValA[i] << ", ";
cout << " ]" << endl << endl;
value_type *x = (value_type *)malloc(n * sizeof(value_type));
for (int i = 0; i < n; i++)
x[i] = 1.0;
value_type *y = (value_type *)malloc(m * sizeof(value_type));
value_type *y_ref = (value_type *)malloc(m * sizeof(value_type));
// compute cpu results
for (int i = 0; i < m; i++)
{
value_type sum = 0;
for (int j = csrRowPtrA[i]; j < csrRowPtrA[i+1]; j++)
sum += x[csrColIdxA[j]] * csrValA[j];
y_ref[i] = sum;
}
memset(y, 0, m * sizeof(value_type));
bhsparse_spmv_cuda *bhsparse = new bhsparse_spmv_cuda();
err = bhsparse->init_platform();
// cout << "Initializing CUDA platform ... ";
if (!err) {
// cout << "Done.";
}
else
cout << "\"Initializing CUDA platform ... Failed. Error code = " << err << endl;
// cout << endl;
err = bhsparse->prepare_mem(m, n, nnzA, csrRowPtrA, csrColIdxA, csrValA, x, y);
err = bhsparse->run_benchmark();
cout << endl;
// print y_ref
cout << "(CPU) y = ";
for (int i = 0; i < m; i++)
{
cout << y_ref[i] << ", ";
if ((i+1) % 16 == 0)
cout << endl;
}
cout << endl;
// print y
cout << "(GPU) y = ";
for (int i = 0; i < m; i++)
{
cout << y[i] << ", ";
if ((i+1) % 16 == 0)
cout << endl;
}
cout << endl;
// compare cpu and gpu results
cout << endl << "Checking bhSPARSE SpMV Correctness ... ";
int error_count = 0;
for (int i = 0; i < m; i++)
if (y_ref[i] != y[i])
{
error_count++;
cout << "ERROR ROW [ " << i << " ] " "cpu = " << y_ref[i] << ", gpu = " << y[i] << endl;
}
if (error_count)
cout << "NO PASS. Error count = " << error_count << " out of " << m << " entries.";
else
cout << "PASS!";
cout << endl;
free(y_ref);
err = bhsparse->free_platform();
err = bhsparse->free_mem();
return err;
}
int call_bhsparse(const char *datasetpath)
{
int err = 0;
// report precision of floating-point
char *precision;
if (sizeof(value_type) == 4)
{
precision = "32-bit Single Precision";
}
else if (sizeof(value_type) == 8)
{
precision = "64-bit Double Precision";
}
else
{
cout << "Wrong precision. Program exit!" << endl;
return 0;
}
// cout << "PRECISION = " << precision << endl;
// cout << "RUN SpMV " << NUM_RUN << " times" << endl;
int ret_code;
MM_typecode matcode;
FILE *f;
int m, n, nnzA, max_deg = 0;
int *csrRowPtrA;
int *csrColIdxA;
value_type *csrValA;
int nnzA_mtx_report;
int isInteger = 0, isReal = 0, isPattern = 0, isSymmetric = 0;
// load matrix
if ((f = fopen(datasetpath, "r")) == NULL)
return -1;
if (mm_read_banner(f, &matcode) != 0)
{
cout << "Could not process Matrix Market banner." << endl;
return -2;
}
if ( mm_is_complex( matcode ) )
{
cout <<"Sorry, data type 'COMPLEX' is not supported. " << endl;
return -3;
}
if ( mm_is_pattern( matcode ) ) { isPattern = 1; /*cout << "type = Pattern" << endl;*/ }
if ( mm_is_real ( matcode) ) { isReal = 1; /*cout << "type = real" << endl;*/ }
if ( mm_is_integer ( matcode ) ) { isInteger = 1; /*cout << "type = integer" << endl;*/ }
/* find out size of sparse matrix .... */
ret_code = mm_read_mtx_crd_size(f, &m, &n, &nnzA_mtx_report);
if (ret_code != 0)
return -4;
if ( mm_is_symmetric( matcode ) || mm_is_hermitian( matcode ) )
{
isSymmetric = 1;
//cout << "symmetric = true" << endl;
}
else
{
//cout << "symmetric = false" << endl;
}
firstRow = ceil(((double) n / sqrRank)) * (mpi_rank / sqrRank);
m = ceil(((double) n) / sqrRank);
firstCol = col_rank * m;
int *csrRowPtrA_counter = (int *)malloc((m+1) * sizeof(int));
memset(csrRowPtrA_counter, 0, (m+1) * sizeof(int));
int *csrRowIdxA_tmp = (int *)malloc(nnzA_mtx_report * sizeof(int));
int *csrColIdxA_tmp = (int *)malloc(nnzA_mtx_report * sizeof(int));
value_type *csrValA_tmp = (value_type *)malloc(nnzA_mtx_report * sizeof(value_type));
/* NOTE: when reading in doubles, ANSI C requires the use of the "l" */
/* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */
/* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */
for (int i = 0; i < nnzA_mtx_report; i++)
{
int idxi, idxj;
double fval;
int ival;
if (isReal)
int count = fscanf(f, "%d %d %lg\n", &idxi, &idxj, &fval);
else if (isInteger)
{
int count = fscanf(f, "%d %d %d\n", &idxi, &idxj, &ival);
fval = ival;
}
else if (isPattern)
{
int count = fscanf(f, "%d %d\n", &idxi, &idxj);
fval = 1.0;
}
// adjust from 1-based to 0-based
idxi--;
idxj--;
csrRowPtrA_counter[idxi - firstRow]++;
csrRowIdxA_tmp[i] = idxi - firstRow;
csrColIdxA_tmp[i] = idxj - firstCol;
csrValA_tmp[i] = fval;
if (csrRowPtrA_counter[idxi - firstRow] > max_deg) {
max_deg = csrRowPtrA_counter[idxi - firstRow];
}
}
if (f != stdin)
fclose(f);
if (isSymmetric)
{
for (int i = 0; i < nnzA_mtx_report; i++)
{
if (csrRowIdxA_tmp[i] != csrColIdxA_tmp[i])
csrRowPtrA_counter[csrColIdxA_tmp[i]]++;
}
}
// exclusive scan for csrRowPtrA_counter
int old_val, new_val;
old_val = csrRowPtrA_counter[0];
csrRowPtrA_counter[0] = 0;
for (int i = 1; i <= m; i++)
{
new_val = csrRowPtrA_counter[i];
csrRowPtrA_counter[i] = old_val + csrRowPtrA_counter[i-1];
old_val = new_val;
}
nnzA = csrRowPtrA_counter[m];
csrRowPtrA = (int *)malloc((m+1) * sizeof(int));
memcpy(csrRowPtrA, csrRowPtrA_counter, (m+1) * sizeof(int));
memset(csrRowPtrA_counter, 0, (m+1) * sizeof(int));
csrColIdxA = (int *)malloc(nnzA * sizeof(int));
csrValA = (value_type *)malloc(nnzA * sizeof(value_type));
double gb = (double)((m + 1 + nnzA) * sizeof(int) + (2 * nnzA + m) * sizeof(value_type));
double gflop = (double)(2 * nnzA);
if (isSymmetric)
{
for (int i = 0; i < nnzA_mtx_report; i++)
{
if (csrRowIdxA_tmp[i] != csrColIdxA_tmp[i])
{
int offset = csrRowPtrA[csrRowIdxA_tmp[i]] + csrRowPtrA_counter[csrRowIdxA_tmp[i]];
csrColIdxA[offset] = csrColIdxA_tmp[i];
csrValA[offset] = csrValA_tmp[i];
csrRowPtrA_counter[csrRowIdxA_tmp[i]]++;
offset = csrRowPtrA[csrColIdxA_tmp[i]] + csrRowPtrA_counter[csrColIdxA_tmp[i]];
csrColIdxA[offset] = csrRowIdxA_tmp[i];
csrValA[offset] = csrValA_tmp[i];
csrRowPtrA_counter[csrColIdxA_tmp[i]]++;
}
else
{
int offset = csrRowPtrA[csrRowIdxA_tmp[i]] + csrRowPtrA_counter[csrRowIdxA_tmp[i]];
csrColIdxA[offset] = csrColIdxA_tmp[i];
csrValA[offset] = csrValA_tmp[i];
csrRowPtrA_counter[csrRowIdxA_tmp[i]]++;
}
}
}
else
{
for (int i = 0; i < nnzA_mtx_report; i++)
{
int offset = csrRowPtrA[csrRowIdxA_tmp[i]] + csrRowPtrA_counter[csrRowIdxA_tmp[i]];
csrColIdxA[offset] = csrColIdxA_tmp[i];
csrValA[offset] = csrValA_tmp[i];
csrRowPtrA_counter[csrRowIdxA_tmp[i]]++;
}
}
// free tmp space
free(csrColIdxA_tmp);
free(csrValA_tmp);
free(csrRowIdxA_tmp);
free(csrRowPtrA_counter);
if(mpi_rank == MASTER)
cout << " ( " << m << ", " << n << " ) nnz = " << nnzA << endl;
srand(time(NULL));
for (int i = 0; i < nnzA; i++)
{
csrValA[i] = 1.0/(value_type)m;
}
value_type *x = (value_type *)malloc(m * sizeof(value_type));
for (int i = 0; i < m; i++)
x[i] = 1.0;
value_type *y = (value_type *)malloc(m * sizeof(value_type));
value_type *y_ref = (value_type *)malloc(m * sizeof(value_type));
/***********Access Pattern Based on 128 Threads Per Block *********/
if(mpi_rank == MASTER)
cout << "M: " << m << " N: " << n << " nnzA: " << nnzA << " Max degree=" << max_deg << endl;
int wordSize = TRANSACTION_BYTE/ sizeof(value_type);
for (int row_i = 0; row_i < m; row_i += wordSize) {
for (int k = 0; k < max_deg; ++k) {
int failed = 0;
int row_check = (row_i + wordSize) > m ? m : (row_i + wordSize);
unordered_set<long> hashme;
for (int th = row_i; th < row_check; ++th) {
if (k < (csrRowPtrA[th + 1] - csrRowPtrA[th])) {
hashme.insert((long)(&x[csrColIdxA[csrRowPtrA[th] + k]])/TRANSACTION_BYTE);
failed = 1;
}
}
if (failed == 0) {
break;
}
strideCounts += hashme.size();
}
}
if(mpi_rank == MASTER)
cout << "Strides count: " << strideCounts << " Transaction Byte Size: " << TRANSACTION_BYTE << " Number of Transaction Word: " << wordSize << endl;
/*****************************************************************/
// compute cpu results
bhsparse_timer ref_timer;
ref_timer.start();
int ref_iter = 1;
for (int iter = 0; iter < ref_iter; iter++)
{
for (int i = 0; i < m; i++)
{
value_type sum = 0;
for (int j = csrRowPtrA[i]; j < csrRowPtrA[i+1]; j++)
sum += x[csrColIdxA[j]] * csrValA[j];
y_ref[i] = sum;
}
}
double ref_time = ref_timer.stop() / (double)ref_iter;
if(mpi_rank == MASTER) {
cout << "cpu sequential time = " << ref_time
<< " ms. Bandwidth = " << gb / (1.0e+6 * ref_time)
<< " GB/s. GFlops = " << gflop / (1.0e+6 * ref_time) << " GFlops." << endl << endl;
}
memset(y, 0, m * sizeof(value_type));
bhsparse_spmv_cuda *bhsparse = new bhsparse_spmv_cuda();
// cout<< " rank sent to set device = " << mpi_rank << endl;
err = bhsparse->init_platform(mpi_rank);
// test OpenMP, cuSPARSE and CUSP v0.4.0
call_cusp_ref(m, m, nnzA, csrRowPtrA, csrColIdxA, csrValA, x, y, y_ref);
// call_cusparse_ref(m, m, nnzA, csrRowPtrA, csrColIdxA, csrValA, x, y, y_ref);
// call_omp_ref(m, n, nnzA, csrRowPtrA, csrColIdxA, csrValA, x, y, y_ref);
// run bhSPARSE
/*err = bhsparse->prepare_mem(m, m, nnzA, csrRowPtrA, csrColIdxA, csrValA, x, y);
double time = 0.0;
err = bhsparse->run_benchmark();
bhsparse->get_y();
// compare ref and our results
cout << endl << "Checking bhSPARSE SpMV Correctness ... ";
int error_count = 0;
for (int i = 0; i < m; i++)
if (y_ref[i] != y[i])
{
error_count++;
// cout << "ROW [ " << i << " ] "
// << csrRowPtrA[i] << " - "
// << csrRowPtrA[i+1]
// << " warp = " << csrRowPtrA[i+1]/(31*256)
// << "\t cpu = " << y_ref[i]
// << ", \t gpu = " << y[i]
// << ", \t error = " << y_ref[i] - y[i]
// << endl;
}
if (error_count)
cout << "NO PASS. Error count = " << error_count << " out of " << m << " entries.";
else
{
cout << "PASS!";
bhsparse_timer spmv_timer;
spmv_timer.start();
for (int i = 0; i < NUM_RUN; i++)
{
err = bhsparse->run_benchmark();
}
time = spmv_timer.stop()/(double)NUM_RUN;
cout << endl << "bhSPARSE time = " << time
<< " ms. Bandwidth = " << gb/(1.0e+6 * time)
<< " GB/s. GFlops = " << gflop/(1.0e+6 * time) << " GFlops." << endl;
}
err = bhsparse->free_platform();
err = bhsparse->free_mem();*/
free(csrRowPtrA);
free(csrColIdxA);
free(csrValA);
free(x);
free(y);
free(y_ref);
return err;
}
int main(int argc, char ** argv)
{
/*
#if defined(MPIX_CUDA_AWARE_SUPPORT) && MPIX_CUDA_AWARE_SUPPORT
printf("This MPI library has CUDA-aware support.\n", MPIX_CUDA_AWARE_SUPPORT);
#elif defined(MPIX_CUDA_AWARE_SUPPORT) && !MPIX_CUDA_AWARE_SUPPORT
printf("This MPI library does not have CUDA-aware support.\n");
#else
printf("This MPI library cannot determine if there is CUDA-aware support.\n");
#endif *//* MPIX_CUDA_AWARE_SUPPORT *//*
printf("Run time check:\n");
#if defined(MPIX_CUDA_AWARE_SUPPORT)
if (1 == MPIX_Query_cuda_support()) {
printf("This MPI library has CUDA-aware support.\n");
} else {
printf("This MPI library does not have CUDA-aware support.\n");
}
#else *//* !defined(MPIX_CUDA_AWARE_SUPPORT) *//*
printf("This MPI library cannot determine if there is CUDA-aware support.\n");
#endif *//* MPIX_CUDA_AWARE_SUPPORT *//*
return 0;*/
int argi = 1;
char *input;
char filename[MAX_STRING_LENGTH];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nRanks);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
if(mpi_rank == MASTER)
std::cout<<"[" << mpi_rank << ": " << processor_name << "] GPU 2d SpMV, MPI rank " << mpi_rank << " of "<< nRanks
<< " starting...." << endl;
sqrRank = sqrt(nRanks);
row_rank = mpi_rank / sqrRank; //which col of proc am I
col_rank = mpi_rank % sqrRank; //which row of proc am I
//initialize communicators
MPI_Comm_split(MPI_COMM_WORLD, row_rank, mpi_rank, &commrow);
MPI_Comm_split(MPI_COMM_WORLD, col_rank, mpi_rank, &commcol);
if(argc > argi)
{
input = argv[argi];
argi++;
}
if (argc > argi){
testType = atoi(argv[argi]);
argi++;
}
int err = 0;
char *file[MAX_STRING_LENGTH];
char *only_mat_name[MAX_STRING_LENGTH];
char n[MAX_STRING_LENGTH] = "";
char * ptr = strtok(input, "/");
int i=0,j;
while(ptr != NULL)
{
file[i++] = ptr;
ptr = strtok(NULL, "/");
}
for(j=0; j<i-1; ++j){
strcat(n, file[j]);
strcat(n, "/");
}
ptr = strtok(file[i-1], ".");
sprintf(filename, "%s%s_%d.mtx", n, ptr, mpi_rank);
char *good_format = strtok(ptr, "_");
i=0;
while(good_format != NULL)
{
only_mat_name[i++] = good_format;
good_format = strtok(NULL, "_");
}
for(j=0; j<i-2; ++j){
strcat(matName, only_mat_name[j]);
if(j < i-3)
strcat(matName, "_");
}
if (strcmp(filename, "0") == 0)
err = call_bhsparse_small();
else
{
if(mpi_rank == MASTER)
cout << "--------------" << filename << "--------------" << endl;
err = call_bhsparse(filename);
}
MPI_Barrier(MPI_COMM_WORLD);
if(mpi_rank == MASTER)
cout << "------------------------------------------------------" << endl;
MPI_Finalize();
return err;
}
| 3fab72f161de0c513a4cd939601e6d6d0341ede8.cu | #include "common.h"
#include "bhsparse_spmv_cuda.h"
#include "mmio.h"
#include <unordered_set>
#include<mpi.h>
//#include "mpi-ext.h" /* Needed for CUDA-aware check */
#define MAX_STRING_LENGTH 128
long strideCounts = 0;
char matName[MAX_STRING_LENGTH];
int testType = 0, mpi_rank, nRanks, MASTER = 0, sqrRank, row_rank, col_rank, firstRow, firstCol, total_sparsity = 0,
max_sparsity = 0, transactionByte = 128;
MPI_Comm commrow;
MPI_Comm commcol;
cusparseStatus_t cusparse_spmv(cusparseHandle_t handle, cusparseMatDescr_t descr,
int m, int n, int nnz,
int *csrRowPtrA, int *csrColIdxA, double *csrValA,
double *x, double *y, double alpha, double beta)
{
return cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, n, nnz, &alpha, descr, csrValA, csrRowPtrA, csrColIdxA, x, &beta, y);
}
cusparseStatus_t cusparse_spmv(cusparseHandle_t handle, cusparseMatDescr_t descr,
int m, int n, int nnz,
int *csrRowPtrA, int *csrColIdxA, float *csrValA,
float *x, float *y, float alpha, float beta)
{
return cusparseScsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, n, nnz, &alpha, descr, csrValA, csrRowPtrA, csrColIdxA, x, &beta, y);
}
template <unsigned int THREADS_PER_VECTOR>
void cusp_spmv(int m, int n, int nnz, int *svm_csrRowPtrA, int *svm_csrColIdxA, double *svm_csrValA, double *svm_x, double *svm_y)
{
const size_t THREADS_PER_BLOCK = 128;
const size_t VECTORS_PER_BLOCK = THREADS_PER_BLOCK / THREADS_PER_VECTOR;
const size_t NUM_BLOCKS = ceil((double)m / (double)VECTORS_PER_BLOCK);
spmv_csr_vector_kernel
<int, double, VECTORS_PER_BLOCK, THREADS_PER_VECTOR>
<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>
(m, svm_csrRowPtrA, svm_csrColIdxA, svm_csrValA, svm_x, svm_y);
}
template <unsigned int THREADS_PER_VECTOR>
void cusp_spmv(int m, int n, int nnz, int *svm_csrRowPtrA, int *svm_csrColIdxA, float *svm_csrValA, float *svm_x, float *svm_y)
{
const size_t THREADS_PER_BLOCK = 128;
const size_t VECTORS_PER_BLOCK = THREADS_PER_BLOCK / THREADS_PER_VECTOR;
const size_t NUM_BLOCKS = ceil((double)m / (double)VECTORS_PER_BLOCK);
spmv_csr_vector_kernel
<int, float, VECTORS_PER_BLOCK, THREADS_PER_VECTOR>
<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>
(m, svm_csrRowPtrA, svm_csrColIdxA, svm_csrValA, svm_x, svm_y);
}
void call_cusparse_ref(int m, int n, int nnz,
int *csrRowPtrA, int *csrColIdxA, value_type *csrValA,
value_type *x, value_type *y, value_type *y_ref)
{
// prepare shared virtual memory (unified memory)
#if USE_SVM_ALWAYS
cout << endl << "cuSPARSE is using shared virtual memory (unified memory).";
int *svm_csrRowPtrA;
int *svm_csrColIdxA;
value_type *svm_csrValA;
value_type *svm_x;
value_type *svm_y;
checkCudaErrors(cudaMallocManaged(&svm_csrRowPtrA, (m+1) * sizeof(int)));
checkCudaErrors(cudaMallocManaged(&svm_csrColIdxA, nnz * sizeof(int)));
checkCudaErrors(cudaMallocManaged(&svm_csrValA, nnz * sizeof(value_type)));
memcpy(svm_csrRowPtrA, csrRowPtrA, (m+1) * sizeof(int));
memcpy(svm_csrColIdxA, csrColIdxA, nnz * sizeof(int));
memcpy(svm_csrValA, csrValA, nnz * sizeof(value_type));
checkCudaErrors(cudaMallocManaged(&svm_x, n * sizeof(value_type)));
memcpy(svm_x, x, n * sizeof(value_type));
checkCudaErrors(cudaMallocManaged(&svm_y, m * sizeof(value_type)));
memcpy(svm_y, y, m * sizeof(value_type));
// prepare device memory
#else
cout << endl << "cuSPARSE is using dedicated GPU memory.";
int *d_csrRowPtrA;
int *d_csrColIdxA;
value_type *d_csrValA;
value_type *d_x;
value_type *d_y;
checkCudaErrors(cudaMalloc((void **)&d_csrRowPtrA, (m+1) * sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_csrColIdxA, nnz * sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_csrValA, nnz * sizeof(value_type)));
checkCudaErrors(cudaMemcpy(d_csrRowPtrA, csrRowPtrA, (m+1) * sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_csrColIdxA, csrColIdxA, nnz * sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_csrValA, csrValA, nnz * sizeof(value_type), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&d_x, n * sizeof(value_type)));
checkCudaErrors(cudaMemcpy(d_x, x, n * sizeof(value_type), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&d_y, m * sizeof(value_type)));
checkCudaErrors(cudaMemcpy(d_y, y, m * sizeof(value_type), cudaMemcpyHostToDevice));
#endif
double gb = (double)((m + 1 + nnz) * sizeof(int) + (2 * nnz + m) * sizeof(value_type));
double gflop = (double)(2 * nnz);
// run cuSPARSE START
cusparseHandle_t handle = 0;
cusparseStatus_t status;
status = cusparseCreate(&handle);
if (status != CUSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE initialization error\n");
//return -1;
}
cusparseMatDescr_t descr = 0;
status = cusparseCreateMatDescr(&descr);
if (status != CUSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE cusparseCreateMatDescr error\n");
//return -2;
}
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
value_type alpha = 1.0;
value_type beta = 0.0;
checkCudaErrors(cudaDeviceSynchronize());
bhsparse_timer cusparse_timer;
cusparse_timer.start();
for (int i = 0; i < NUM_RUN; i++)
{
#if USE_SVM_ALWAYS
status = cusparse_spmv(handle, descr, m, n, nnz, svm_csrRowPtrA, svm_csrColIdxA, svm_csrValA, svm_x, svm_y, alpha, beta);
#else
status = cusparse_spmv(handle, descr, m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y, alpha, beta);
#endif
}
checkCudaErrors(cudaDeviceSynchronize());
if (status != CUSPARSE_STATUS_SUCCESS)
{
printf("CUSPARSE cusparseS/Dcsrmv error\n");
//return -3;
}
double cusparseTime = cusparse_timer.stop() / NUM_RUN;
cout << endl << "Checking cuSPARSE SpMV Correctness ... ";
#if USE_SVM_ALWAYS == 0
value_type *y_cusparse_ref = (value_type *)malloc(m * sizeof(value_type));
checkCudaErrors(cudaMemcpy(y_cusparse_ref, d_y, m * sizeof(value_type), cudaMemcpyDeviceToHost));
#endif
int error_count = 0;
for (int i = 0; i < m; i++)
#if USE_SVM_ALWAYS
if (y_ref[i] != svm_y[i])
error_count++;
#else
if (y_ref[i] != y_cusparse_ref[i])
error_count++;
#endif
if (error_count)
cout << "NO PASS. Error count = " << error_count << " out of " << m << " entries.";
else
cout << "PASS!";
cout << endl;
cout << "cuSPARSE time = " << cusparseTime
<< " ms. Bandwidth = " << gb/(1.0e+6 * cusparseTime)
<< " GB/s. GFlops = " << gflop/(1.0e+6 * cusparseTime) << " GFlops." << endl << endl;
// run cuSPARSE STOP
char outputFile[100] = "Results/CSR_CUDA_2DSpMV.csv";
FILE *resultCSV;
FILE *checkFile;
if ((checkFile = fopen(outputFile, "r")) != NULL) {
// file exists
fclose(checkFile);
if (!(resultCSV = fopen(outputFile, "a"))) {
fprintf(stderr, "fopen: failed to open %s file\n", outputFile);
exit(EXIT_FAILURE);
}
} else {
if (!(resultCSV = fopen(outputFile, "w"))) {
fprintf(stderr, "fopen: failed to open file %s\n", outputFile);
exit(EXIT_FAILURE);
}
fprintf(resultCSV, "Name,M,N,AvgTime,TotalRun,NonZeroPerRow,NonZeroElements,Bandwidth,Flops,ValueType,Type,Strides,TransactionByte,WordSize\n");
}
fprintf(resultCSV, "%s,%d,%d,%10.6lf,%d,%lf,%d,%lf,%lf,%d,%s,%ld,%d,%d\n", matName, m, n, cusparseTime, NUM_RUN, (double) nnz / m,
nnz, gb / (1.0e+6 * cusparseTime), gflop / (1.0e+6 * cusparseTime), sizeof(value_type), "CUSPARSE", strideCounts,
TRANSACTION_BYTE, TRANSACTION_BYTE/ sizeof(value_type));
if (fclose(resultCSV) != 0) {
fprintf(stderr, "fopen: failed to open file %s\n", outputFile);
exit(EXIT_FAILURE);
}
#if USE_SVM_ALWAYS
checkCudaErrors(cudaFree(svm_csrValA));
checkCudaErrors(cudaFree(svm_csrRowPtrA));
checkCudaErrors(cudaFree(svm_csrColIdxA));
checkCudaErrors(cudaFree(svm_x));
checkCudaErrors(cudaFree(svm_y));
#else
free(y_cusparse_ref);
checkCudaErrors(cudaFree(d_csrRowPtrA));
checkCudaErrors(cudaFree(d_csrColIdxA));
checkCudaErrors(cudaFree(d_csrValA));
checkCudaErrors(cudaFree(d_x));
checkCudaErrors(cudaFree(d_y));
#endif
return;
}
void call_cusp_ref(int m, int n, int nnz, int *csrRowPtrA, int *csrColIdxA, value_type *csrValA, value_type *x
, value_type *y, value_type *y_ref)
{
int *d_csrRowPtrA;
int *d_csrColIdxA;
value_type *d_csrValA;
value_type *d_x;
value_type *d_y;
checkCudaErrors(cudaMalloc((void **)&d_csrRowPtrA, (m+1) * sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_csrColIdxA, nnz * sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_csrValA, nnz * sizeof(value_type)));
checkCudaErrors(cudaMemcpy(d_csrRowPtrA, csrRowPtrA, (m+1) * sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_csrColIdxA, csrColIdxA, nnz * sizeof(int), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_csrValA, csrValA, nnz * sizeof(value_type), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **)&d_x, n * sizeof(value_type)));
checkCudaErrors(cudaMalloc((void **)&d_y, m * sizeof(value_type)));
checkCudaErrors(cudaMemcpy(d_y, y, m * sizeof(value_type), cudaMemcpyHostToDevice));
// cout << endl << "[" << mpi_rank << "] Checking CUSP SpMV Correctness ... ";
checkCudaErrors(cudaMemcpy(d_x, x, n * sizeof(value_type), cudaMemcpyHostToDevice));
cusp_spmv<32>(m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y);
value_type *y_cusp_ref = (value_type *)malloc(m * sizeof(value_type));
checkCudaErrors(cudaMemcpy(y_cusp_ref, d_y, m * sizeof(value_type), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaDeviceSynchronize());
int error_count = 0;
for (int i = 0; i < m; i++)
if (abs(y_ref[i] - y_cusp_ref[i]) > 0.01 * abs(y_ref[i])/*y_ref[i] != y_cusp_ref[i]*/)
error_count++;
if (error_count)
cout << "[" << mpi_rank << "] NO PASS. Error count = " << error_count << " out of " << m << " entries." << endl;
// else
// cout << "PASS!" << endl;
double gb = (double)((m + 1 + nnz) * sizeof(int) + (2 * nnz + m) * sizeof(value_type));
double gflop = (double)(2 * nnz);
// run CUSP START
const int nnz_per_row = nnz / m;
bhsparse_timer cusp_timer;
bhsparse_timer broadcast_timer;
bhsparse_timer mult_timer;
bhsparse_timer reduce_timer;
cusp_timer.start();
double b_time, r_time, m_time, avg_b_time = 0, avg_r_time = 0, avg_m_time = 0;
if (nnz_per_row <= 2)
{
// cout<< "THREADS_PER_VECTOR = 2" << endl;
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 0; i < NUM_RUN+SKIP; i++) {
// cout << "[" << mpi_rank << "] 2-iter= " << i+1 << " mat= " << matName << endl;
broadcast_timer.start();
MPI_Bcast(x, m, MPI_FLOAT, col_rank, commcol); //col_rank is the one with the correct information
b_time = broadcast_timer.stop();
checkCudaErrors(cudaMemcpy(d_x, x, n * sizeof(value_type), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_y, y, m * sizeof(value_type), cudaMemcpyHostToDevice));
mult_timer.start();
cusp_spmv<2>(m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y);
m_time = mult_timer.stop();
checkCudaErrors(cudaMemcpy(y, d_y, m * sizeof(value_type), cudaMemcpyDeviceToHost));
reduce_timer.start();
MPI_Reduce(y, x, m, MPI_FLOAT, MPI_SUM, row_rank, commrow);
r_time = reduce_timer.stop();
if(i>=SKIP){
avg_b_time += b_time;
avg_m_time += m_time;
avg_r_time += r_time;
}
MPI_Barrier(commcol);
MPI_Barrier(commrow);
MPI_Barrier(MPI_COMM_WORLD);
}
}
else if (nnz_per_row <= 4)
{
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 0; i < NUM_RUN+SKIP; i++) {
// cout << "[" << mpi_rank << "] 4-iter= " << i+1 << " mat= " << matName << endl;
broadcast_timer.start();
MPI_Bcast(x, m, MPI_FLOAT, col_rank, commcol); //col_rank is the one with the correct information
b_time = broadcast_timer.stop();
checkCudaErrors(cudaMemcpy(d_x, x, n * sizeof(value_type), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_y, y, m * sizeof(value_type), cudaMemcpyHostToDevice));
mult_timer.start();
cusp_spmv<4>(m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y);
m_time = mult_timer.stop();
checkCudaErrors(cudaMemcpy(y, d_y, m * sizeof(value_type), cudaMemcpyDeviceToHost));
reduce_timer.start();
MPI_Reduce(y, x, m, MPI_FLOAT, MPI_SUM, row_rank, commrow);
r_time = reduce_timer.stop();
if(i>=SKIP){
avg_b_time += b_time;
avg_m_time += m_time;
avg_r_time += r_time;
}
MPI_Barrier(commcol);
MPI_Barrier(commrow);
MPI_Barrier(MPI_COMM_WORLD);
}
}
else if (nnz_per_row <= 8)
{
// cout<< "THREADS_PER_VECTOR = 8" << endl;
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 0; i < NUM_RUN+SKIP; i++) {
// cout << "[" << mpi_rank << "] 8-iter= " << i+1 << " mat= " << matName << endl;
broadcast_timer.start();
MPI_Bcast(x, m, MPI_FLOAT, col_rank, commcol); //col_rank is the one with the correct information
b_time = broadcast_timer.stop();
checkCudaErrors(cudaMemcpy(d_x, x, n * sizeof(value_type), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_y, y, m * sizeof(value_type), cudaMemcpyHostToDevice));
mult_timer.start();
cusp_spmv<8>(m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y);
m_time = mult_timer.stop();
checkCudaErrors(cudaMemcpy(y, d_y, m * sizeof(value_type), cudaMemcpyDeviceToHost));
reduce_timer.start();
MPI_Reduce(y, x, m, MPI_FLOAT, MPI_SUM, row_rank, commrow);
r_time = reduce_timer.stop();
if(i>=SKIP){
avg_b_time += b_time;
avg_m_time += m_time;
avg_r_time += r_time;
}
MPI_Barrier(commcol);
MPI_Barrier(commrow);
MPI_Barrier(MPI_COMM_WORLD);
}
}
else if (nnz_per_row <= 16)
{
// cout<< "[" << mpi_rank << "] THREADS_PER_VECTOR = 16" << endl;
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 0; i < NUM_RUN; i++) {
/*if(i==19){
for (int j = 0; j < 10; ++j) {
cout<< "[" << mpi_rank << "] 16: " ;
cout << x[j] << " ";
}
cout<<endl;
}
cout << "[" << mpi_rank << "] 16-iter= " << i+1 << " mat= " << matName << endl;*/
broadcast_timer.start();
MPI_Bcast(x, m, MPI_FLOAT, col_rank, commcol); //col_rank is the one with the correct information
b_time = broadcast_timer.stop();
checkCudaErrors(cudaMemcpy(d_x, x, n * sizeof(value_type), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_y, y, m * sizeof(value_type), cudaMemcpyHostToDevice));
mult_timer.start();
cusp_spmv<16>(m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y);
m_time = mult_timer.stop();
checkCudaErrors(cudaMemcpy(y, d_y, m * sizeof(value_type), cudaMemcpyDeviceToHost));
reduce_timer.start();
MPI_Reduce(y, x, m, MPI_FLOAT, MPI_SUM, row_rank, commrow);
r_time = reduce_timer.stop();
if(i>=SKIP){
avg_b_time += b_time;
avg_m_time += m_time;
avg_r_time += r_time;
}
MPI_Barrier(commcol);
MPI_Barrier(commrow);
MPI_Barrier(MPI_COMM_WORLD);
}
}
else
{
// cout<< "THREADS_PER_VECTOR = 32" << endl;
MPI_Barrier(MPI_COMM_WORLD);
for (int i = 0; i < NUM_RUN+SKIP; i++) {
/*if(i==20){
for (int j = 0; j < 10; ++j) {
cout<< "[" << mpi_rank << "] 32: ";
cout << x[j] << " ";
}
cout<<endl;
}
cout << "[" << mpi_rank << "] 32-iter= " << i+1 << " mat= " << matName << endl;*/
broadcast_timer.start();
MPI_Bcast(x, m, MPI_FLOAT, col_rank, commcol); //col_rank is the one with the correct information
b_time = broadcast_timer.stop();
checkCudaErrors(cudaMemcpy(d_x, x, n * sizeof(value_type), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_y, y, m * sizeof(value_type), cudaMemcpyHostToDevice));
mult_timer.start();
cusp_spmv<32>(m, n, nnz, d_csrRowPtrA, d_csrColIdxA, d_csrValA, d_x, d_y);
m_time = mult_timer.stop();
checkCudaErrors(cudaMemcpy(y, d_y, m * sizeof(value_type), cudaMemcpyDeviceToHost));
reduce_timer.start();
MPI_Reduce(y, x, m, MPI_FLOAT, MPI_SUM, row_rank, commrow);
r_time = reduce_timer.stop();
if(i>=SKIP){
avg_b_time += b_time;
avg_m_time += m_time;
avg_r_time += r_time;
}
MPI_Barrier(commcol);
MPI_Barrier(commrow);
MPI_Barrier(MPI_COMM_WORLD);
}
}
MPI_Barrier(MPI_COMM_WORLD);
checkCudaErrors(cudaDeviceSynchronize());
if(mpi_rank == MASTER)
cout<< "Run complete" << endl;
double cuspTime = cusp_timer.stop() / (NUM_RUN+SKIP);
int avg_nnz;
double avg_nnz_per_row, avgTime;
avg_b_time /= NUM_RUN;
avg_m_time /= NUM_RUN;
avg_r_time /= NUM_RUN;
MPI_Reduce(&nnz, &avg_nnz, 1, MPI_INT, MPI_SUM, MASTER, MPI_COMM_WORLD);
MPI_Reduce(&nnz_per_row, &avg_nnz_per_row, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD);
MPI_Reduce(&avg_b_time, &b_time, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD);
MPI_Reduce(&avg_m_time, &m_time, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD);
MPI_Reduce(&avg_r_time, &r_time, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD);
MPI_Reduce(&cuspTime, &avgTime, 1, MPI_DOUBLE, MPI_SUM, MASTER, MPI_COMM_WORLD);
avg_nnz /= nRanks;
avg_nnz_per_row /= nRanks;
b_time /= nRanks;
m_time /= nRanks;
r_time /= nRanks;
avgTime /= nRanks;
if(mpi_rank == MASTER) {
cout << "CUSP time = " << cuspTime
<< " ms. Bandwidth = " << gb / (1.0e+6 * cuspTime)
<< " GB/s. GFlops = " << gflop / (1.0e+6 * cuspTime) << " GFlops." << endl << endl;
// run CUSP STOP
char outputFile[100] = "Results/CSR_CUDA_2DSpMV.csv";
FILE *resultCSV;
FILE *checkFile;
if ((checkFile = fopen(outputFile, "r")) != NULL) {
// file exists
fclose(checkFile);
if (!(resultCSV = fopen(outputFile, "a"))) {
fprintf(stderr, "fopen: failed to open %s file\n", outputFile);
exit(EXIT_FAILURE);
}
} else {
if (!(resultCSV = fopen(outputFile, "w"))) {
fprintf(stderr, "fopen: failed to open file %s\n", outputFile);
exit(EXIT_FAILURE);
}
fprintf(resultCSV,
"Name,M,N,AvgTime,AvgBcastTime,AvgMultTime,AvgReduceTime,TotalRun,NonZeroPerRow,NonZeroElements,Bandwidth,Flops,ValueType,Type,Strides,TransactionByte,WordSize\n");
}
fprintf(resultCSV, "%s,%d,%d,%10.6lf,%10.6lf,%10.6lf,%10.6lf,%d,%lf,%d,%lf,%lf,%d,%s,%ld,%d,%d\n", matName, m,
n, avgTime, b_time, m_time, r_time, (NUM_RUN + SKIP), avg_nnz_per_row, avg_nnz, gb/(1.0e+6 * avgTime),
gflop/(1.0e+6 * avgTime), sizeof(value_type), "CUSP", strideCounts, TRANSACTION_BYTE,
TRANSACTION_BYTE/sizeof(value_type));
if (fclose(resultCSV) != 0) {
fprintf(stderr, "fopen: failed to open file %s\n", outputFile);
exit(EXIT_FAILURE);
}
}
checkCudaErrors(cudaDeviceSynchronize());
free(y_cusp_ref);
checkCudaErrors(cudaFree(d_csrRowPtrA));
checkCudaErrors(cudaFree(d_csrColIdxA));
checkCudaErrors(cudaFree(d_csrValA));
checkCudaErrors(cudaFree(d_x));
checkCudaErrors(cudaFree(d_y));
return;
}
void call_omp_ref(int m, int n, int nnz,
int *csrRowPtrA, int *csrColIdxA, value_type *csrValA,
value_type *x, value_type *y, value_type *y_ref)
{
#if USE_SVM_ALWAYS
cout << endl << "OpenMP is using shared virtual memory (unified memory).";
int *svm_csrRowPtrA;
int *svm_csrColIdxA;
value_type *svm_csrValA;
value_type *svm_x;
value_type *svm_y;
// prepare shared virtual memory (unified memory)
checkCudaErrors(cudaMallocManaged(&svm_csrRowPtrA, (m+1) * sizeof(int)));
checkCudaErrors(cudaMallocManaged(&svm_csrColIdxA, nnz * sizeof(int)));
checkCudaErrors(cudaMallocManaged(&svm_csrValA, nnz * sizeof(value_type)));
memcpy(svm_csrRowPtrA, csrRowPtrA, (m+1) * sizeof(int));
memcpy(svm_csrColIdxA, csrColIdxA, nnz * sizeof(int));
memcpy(svm_csrValA, csrValA, nnz * sizeof(value_type));
checkCudaErrors(cudaMallocManaged(&svm_x, n * sizeof(value_type)));
memcpy(svm_x, x, n * sizeof(value_type));
checkCudaErrors(cudaMallocManaged(&svm_y, m * sizeof(value_type)));
memcpy(svm_y, y, m * sizeof(value_type));
#else
// cout << endl << "OpenMP is using dedicated HOST memory.";
value_type *y_omp_ref = (value_type *)malloc(m * sizeof(value_type));
#endif
double gb = (double)((m + 1 + nnz) * sizeof(int) + (2 * nnz + m) * sizeof(value_type));
double gflop = (double)(2 * nnz);
// run OpenMP START
// omp_set_num_threads(4);
// cout << endl << "OpenMP is using 4 threads.";
checkCudaErrors(cudaDeviceSynchronize());
bhsparse_timer omp_timer;
omp_timer.start();
for (int iter = 0; iter < NUM_RUN; iter++)
{
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
value_type sum = 0;
#if USE_SVM_ALWAYS
for (int j = svm_csrRowPtrA[i]; j < svm_csrRowPtrA[i+1]; j++)
sum += svm_x[svm_csrColIdxA[j]] * svm_csrValA[j];
svm_y[i] = sum;
#else
for (int j = csrRowPtrA[i]; j < csrRowPtrA[i+1]; j++)
sum += x[csrColIdxA[j]] * csrValA[j];
y_omp_ref[i] = sum;
#endif
}
}
double ompTime = omp_timer.stop() / NUM_RUN;
cout << endl << "Checking OpenMP SpMV Correctness ... ";
int error_count = 0;
for (int i = 0; i < m; i++)
#if USE_SVM_ALWAYS
if (y_ref[i] != svm_y[i])
error_count++;
#else
if (y_ref[i] != y_omp_ref[i])
error_count++;
#endif
if (error_count)
cout << "NO PASS. Error count = " << error_count << " out of " << m << " entries." << endl;
// else
// cout << "PASS!" << endl;
if(mpi_rank == MASTER) {
cout << "OpenMP time = " << ompTime
<< " ms. Bandwidth = " << gb / (1.0e+6 * ompTime)
<< " GB/s. GFlops = " << gflop / (1.0e+6 * ompTime) << " GFlops." << endl << endl;
}
// run OpenMP STOP
#if USE_SVM_ALWAYS
checkCudaErrors(cudaFree(svm_csrValA));
checkCudaErrors(cudaFree(svm_csrRowPtrA));
checkCudaErrors(cudaFree(svm_csrColIdxA));
checkCudaErrors(cudaFree(svm_x));
checkCudaErrors(cudaFree(svm_y));
#else
free(y_omp_ref);
#endif
return;
}
int call_bhsparse_small()
{
int err = 0;
int m, n, nnzA;
int *csrColIdxA;
int *csrRowPtrA;
value_type *csrValA;
m = 6;
n = 6;
nnzA = 15;
csrColIdxA = (int *)malloc(nnzA * sizeof(int));
csrRowPtrA = (int *)malloc((m+1) * sizeof(int));
csrValA = (value_type *)malloc(nnzA * sizeof(value_type));
int row_ptr[7] = {0, 3, 9, 11, 11, 12, 15};
int col_idx[15] = {0, 2, 5, 0, 1, 2, 3, 4, 5, 2, 4, 4, 2, 3, 4};
value_type val[15] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
memcpy(csrRowPtrA, row_ptr, (m+1) * sizeof(int));
memcpy(csrColIdxA, col_idx, nnzA * sizeof(int));
memcpy(csrValA, val, nnzA * sizeof(value_type));
cout << "row_ptr = [ ";
for (int i = 0; i < m + 1; i++)
cout << csrRowPtrA[i] << ", ";
cout << " ]" << endl;
cout << "col_idx = [ ";
for (int i = 0; i < nnzA; i++)
cout << csrColIdxA[i] << ", ";
cout << " ]" << endl;
cout << "value = [ ";
for (int i = 0; i < nnzA; i++)
cout << csrValA[i] << ", ";
cout << " ]" << endl << endl;
value_type *x = (value_type *)malloc(n * sizeof(value_type));
for (int i = 0; i < n; i++)
x[i] = 1.0;
value_type *y = (value_type *)malloc(m * sizeof(value_type));
value_type *y_ref = (value_type *)malloc(m * sizeof(value_type));
// compute cpu results
for (int i = 0; i < m; i++)
{
value_type sum = 0;
for (int j = csrRowPtrA[i]; j < csrRowPtrA[i+1]; j++)
sum += x[csrColIdxA[j]] * csrValA[j];
y_ref[i] = sum;
}
memset(y, 0, m * sizeof(value_type));
bhsparse_spmv_cuda *bhsparse = new bhsparse_spmv_cuda();
err = bhsparse->init_platform();
// cout << "Initializing CUDA platform ... ";
if (!err) {
// cout << "Done.";
}
else
cout << "\"Initializing CUDA platform ... Failed. Error code = " << err << endl;
// cout << endl;
err = bhsparse->prepare_mem(m, n, nnzA, csrRowPtrA, csrColIdxA, csrValA, x, y);
err = bhsparse->run_benchmark();
cout << endl;
// print y_ref
cout << "(CPU) y = ";
for (int i = 0; i < m; i++)
{
cout << y_ref[i] << ", ";
if ((i+1) % 16 == 0)
cout << endl;
}
cout << endl;
// print y
cout << "(GPU) y = ";
for (int i = 0; i < m; i++)
{
cout << y[i] << ", ";
if ((i+1) % 16 == 0)
cout << endl;
}
cout << endl;
// compare cpu and gpu results
cout << endl << "Checking bhSPARSE SpMV Correctness ... ";
int error_count = 0;
for (int i = 0; i < m; i++)
if (y_ref[i] != y[i])
{
error_count++;
cout << "ERROR ROW [ " << i << " ] " "cpu = " << y_ref[i] << ", gpu = " << y[i] << endl;
}
if (error_count)
cout << "NO PASS. Error count = " << error_count << " out of " << m << " entries.";
else
cout << "PASS!";
cout << endl;
free(y_ref);
err = bhsparse->free_platform();
err = bhsparse->free_mem();
return err;
}
int call_bhsparse(const char *datasetpath)
{
int err = 0;
// report precision of floating-point
char *precision;
if (sizeof(value_type) == 4)
{
precision = "32-bit Single Precision";
}
else if (sizeof(value_type) == 8)
{
precision = "64-bit Double Precision";
}
else
{
cout << "Wrong precision. Program exit!" << endl;
return 0;
}
// cout << "PRECISION = " << precision << endl;
// cout << "RUN SpMV " << NUM_RUN << " times" << endl;
int ret_code;
MM_typecode matcode;
FILE *f;
int m, n, nnzA, max_deg = 0;
int *csrRowPtrA;
int *csrColIdxA;
value_type *csrValA;
int nnzA_mtx_report;
int isInteger = 0, isReal = 0, isPattern = 0, isSymmetric = 0;
// load matrix
if ((f = fopen(datasetpath, "r")) == NULL)
return -1;
if (mm_read_banner(f, &matcode) != 0)
{
cout << "Could not process Matrix Market banner." << endl;
return -2;
}
if ( mm_is_complex( matcode ) )
{
cout <<"Sorry, data type 'COMPLEX' is not supported. " << endl;
return -3;
}
if ( mm_is_pattern( matcode ) ) { isPattern = 1; /*cout << "type = Pattern" << endl;*/ }
if ( mm_is_real ( matcode) ) { isReal = 1; /*cout << "type = real" << endl;*/ }
if ( mm_is_integer ( matcode ) ) { isInteger = 1; /*cout << "type = integer" << endl;*/ }
/* find out size of sparse matrix .... */
ret_code = mm_read_mtx_crd_size(f, &m, &n, &nnzA_mtx_report);
if (ret_code != 0)
return -4;
if ( mm_is_symmetric( matcode ) || mm_is_hermitian( matcode ) )
{
isSymmetric = 1;
//cout << "symmetric = true" << endl;
}
else
{
//cout << "symmetric = false" << endl;
}
firstRow = ceil(((double) n / sqrRank)) * (mpi_rank / sqrRank);
m = ceil(((double) n) / sqrRank);
firstCol = col_rank * m;
int *csrRowPtrA_counter = (int *)malloc((m+1) * sizeof(int));
memset(csrRowPtrA_counter, 0, (m+1) * sizeof(int));
int *csrRowIdxA_tmp = (int *)malloc(nnzA_mtx_report * sizeof(int));
int *csrColIdxA_tmp = (int *)malloc(nnzA_mtx_report * sizeof(int));
value_type *csrValA_tmp = (value_type *)malloc(nnzA_mtx_report * sizeof(value_type));
/* NOTE: when reading in doubles, ANSI C requires the use of the "l" */
/* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */
/* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */
for (int i = 0; i < nnzA_mtx_report; i++)
{
int idxi, idxj;
double fval;
int ival;
if (isReal)
int count = fscanf(f, "%d %d %lg\n", &idxi, &idxj, &fval);
else if (isInteger)
{
int count = fscanf(f, "%d %d %d\n", &idxi, &idxj, &ival);
fval = ival;
}
else if (isPattern)
{
int count = fscanf(f, "%d %d\n", &idxi, &idxj);
fval = 1.0;
}
// adjust from 1-based to 0-based
idxi--;
idxj--;
csrRowPtrA_counter[idxi - firstRow]++;
csrRowIdxA_tmp[i] = idxi - firstRow;
csrColIdxA_tmp[i] = idxj - firstCol;
csrValA_tmp[i] = fval;
if (csrRowPtrA_counter[idxi - firstRow] > max_deg) {
max_deg = csrRowPtrA_counter[idxi - firstRow];
}
}
if (f != stdin)
fclose(f);
if (isSymmetric)
{
for (int i = 0; i < nnzA_mtx_report; i++)
{
if (csrRowIdxA_tmp[i] != csrColIdxA_tmp[i])
csrRowPtrA_counter[csrColIdxA_tmp[i]]++;
}
}
// exclusive scan for csrRowPtrA_counter
int old_val, new_val;
old_val = csrRowPtrA_counter[0];
csrRowPtrA_counter[0] = 0;
for (int i = 1; i <= m; i++)
{
new_val = csrRowPtrA_counter[i];
csrRowPtrA_counter[i] = old_val + csrRowPtrA_counter[i-1];
old_val = new_val;
}
nnzA = csrRowPtrA_counter[m];
csrRowPtrA = (int *)malloc((m+1) * sizeof(int));
memcpy(csrRowPtrA, csrRowPtrA_counter, (m+1) * sizeof(int));
memset(csrRowPtrA_counter, 0, (m+1) * sizeof(int));
csrColIdxA = (int *)malloc(nnzA * sizeof(int));
csrValA = (value_type *)malloc(nnzA * sizeof(value_type));
double gb = (double)((m + 1 + nnzA) * sizeof(int) + (2 * nnzA + m) * sizeof(value_type));
double gflop = (double)(2 * nnzA);
if (isSymmetric)
{
for (int i = 0; i < nnzA_mtx_report; i++)
{
if (csrRowIdxA_tmp[i] != csrColIdxA_tmp[i])
{
int offset = csrRowPtrA[csrRowIdxA_tmp[i]] + csrRowPtrA_counter[csrRowIdxA_tmp[i]];
csrColIdxA[offset] = csrColIdxA_tmp[i];
csrValA[offset] = csrValA_tmp[i];
csrRowPtrA_counter[csrRowIdxA_tmp[i]]++;
offset = csrRowPtrA[csrColIdxA_tmp[i]] + csrRowPtrA_counter[csrColIdxA_tmp[i]];
csrColIdxA[offset] = csrRowIdxA_tmp[i];
csrValA[offset] = csrValA_tmp[i];
csrRowPtrA_counter[csrColIdxA_tmp[i]]++;
}
else
{
int offset = csrRowPtrA[csrRowIdxA_tmp[i]] + csrRowPtrA_counter[csrRowIdxA_tmp[i]];
csrColIdxA[offset] = csrColIdxA_tmp[i];
csrValA[offset] = csrValA_tmp[i];
csrRowPtrA_counter[csrRowIdxA_tmp[i]]++;
}
}
}
else
{
for (int i = 0; i < nnzA_mtx_report; i++)
{
int offset = csrRowPtrA[csrRowIdxA_tmp[i]] + csrRowPtrA_counter[csrRowIdxA_tmp[i]];
csrColIdxA[offset] = csrColIdxA_tmp[i];
csrValA[offset] = csrValA_tmp[i];
csrRowPtrA_counter[csrRowIdxA_tmp[i]]++;
}
}
// free tmp space
free(csrColIdxA_tmp);
free(csrValA_tmp);
free(csrRowIdxA_tmp);
free(csrRowPtrA_counter);
if(mpi_rank == MASTER)
cout << " ( " << m << ", " << n << " ) nnz = " << nnzA << endl;
srand(time(NULL));
for (int i = 0; i < nnzA; i++)
{
csrValA[i] = 1.0/(value_type)m;
}
value_type *x = (value_type *)malloc(m * sizeof(value_type));
for (int i = 0; i < m; i++)
x[i] = 1.0;
value_type *y = (value_type *)malloc(m * sizeof(value_type));
value_type *y_ref = (value_type *)malloc(m * sizeof(value_type));
/***********Access Pattern Based on 128 Threads Per Block *********/
if(mpi_rank == MASTER)
cout << "M: " << m << " N: " << n << " nnzA: " << nnzA << " Max degree=" << max_deg << endl;
int wordSize = TRANSACTION_BYTE/ sizeof(value_type);
for (int row_i = 0; row_i < m; row_i += wordSize) {
for (int k = 0; k < max_deg; ++k) {
int failed = 0;
int row_check = (row_i + wordSize) > m ? m : (row_i + wordSize);
unordered_set<long> hashme;
for (int th = row_i; th < row_check; ++th) {
if (k < (csrRowPtrA[th + 1] - csrRowPtrA[th])) {
hashme.insert((long)(&x[csrColIdxA[csrRowPtrA[th] + k]])/TRANSACTION_BYTE);
failed = 1;
}
}
if (failed == 0) {
break;
}
strideCounts += hashme.size();
}
}
if(mpi_rank == MASTER)
cout << "Strides count: " << strideCounts << " Transaction Byte Size: " << TRANSACTION_BYTE << " Number of Transaction Word: " << wordSize << endl;
/*****************************************************************/
// compute cpu results
bhsparse_timer ref_timer;
ref_timer.start();
int ref_iter = 1;
for (int iter = 0; iter < ref_iter; iter++)
{
for (int i = 0; i < m; i++)
{
value_type sum = 0;
for (int j = csrRowPtrA[i]; j < csrRowPtrA[i+1]; j++)
sum += x[csrColIdxA[j]] * csrValA[j];
y_ref[i] = sum;
}
}
double ref_time = ref_timer.stop() / (double)ref_iter;
if(mpi_rank == MASTER) {
cout << "cpu sequential time = " << ref_time
<< " ms. Bandwidth = " << gb / (1.0e+6 * ref_time)
<< " GB/s. GFlops = " << gflop / (1.0e+6 * ref_time) << " GFlops." << endl << endl;
}
memset(y, 0, m * sizeof(value_type));
bhsparse_spmv_cuda *bhsparse = new bhsparse_spmv_cuda();
// cout<< " rank sent to set device = " << mpi_rank << endl;
err = bhsparse->init_platform(mpi_rank);
// test OpenMP, cuSPARSE and CUSP v0.4.0
call_cusp_ref(m, m, nnzA, csrRowPtrA, csrColIdxA, csrValA, x, y, y_ref);
// call_cusparse_ref(m, m, nnzA, csrRowPtrA, csrColIdxA, csrValA, x, y, y_ref);
// call_omp_ref(m, n, nnzA, csrRowPtrA, csrColIdxA, csrValA, x, y, y_ref);
// run bhSPARSE
/*err = bhsparse->prepare_mem(m, m, nnzA, csrRowPtrA, csrColIdxA, csrValA, x, y);
double time = 0.0;
err = bhsparse->run_benchmark();
bhsparse->get_y();
// compare ref and our results
cout << endl << "Checking bhSPARSE SpMV Correctness ... ";
int error_count = 0;
for (int i = 0; i < m; i++)
if (y_ref[i] != y[i])
{
error_count++;
// cout << "ROW [ " << i << " ] "
// << csrRowPtrA[i] << " - "
// << csrRowPtrA[i+1]
// << " warp = " << csrRowPtrA[i+1]/(31*256)
// << "\t cpu = " << y_ref[i]
// << ", \t gpu = " << y[i]
// << ", \t error = " << y_ref[i] - y[i]
// << endl;
}
if (error_count)
cout << "NO PASS. Error count = " << error_count << " out of " << m << " entries.";
else
{
cout << "PASS!";
bhsparse_timer spmv_timer;
spmv_timer.start();
for (int i = 0; i < NUM_RUN; i++)
{
err = bhsparse->run_benchmark();
}
time = spmv_timer.stop()/(double)NUM_RUN;
cout << endl << "bhSPARSE time = " << time
<< " ms. Bandwidth = " << gb/(1.0e+6 * time)
<< " GB/s. GFlops = " << gflop/(1.0e+6 * time) << " GFlops." << endl;
}
err = bhsparse->free_platform();
err = bhsparse->free_mem();*/
free(csrRowPtrA);
free(csrColIdxA);
free(csrValA);
free(x);
free(y);
free(y_ref);
return err;
}
int main(int argc, char ** argv)
{
/*
#if defined(MPIX_CUDA_AWARE_SUPPORT) && MPIX_CUDA_AWARE_SUPPORT
printf("This MPI library has CUDA-aware support.\n", MPIX_CUDA_AWARE_SUPPORT);
#elif defined(MPIX_CUDA_AWARE_SUPPORT) && !MPIX_CUDA_AWARE_SUPPORT
printf("This MPI library does not have CUDA-aware support.\n");
#else
printf("This MPI library cannot determine if there is CUDA-aware support.\n");
#endif *//* MPIX_CUDA_AWARE_SUPPORT *//*
printf("Run time check:\n");
#if defined(MPIX_CUDA_AWARE_SUPPORT)
if (1 == MPIX_Query_cuda_support()) {
printf("This MPI library has CUDA-aware support.\n");
} else {
printf("This MPI library does not have CUDA-aware support.\n");
}
#else *//* !defined(MPIX_CUDA_AWARE_SUPPORT) *//*
printf("This MPI library cannot determine if there is CUDA-aware support.\n");
#endif *//* MPIX_CUDA_AWARE_SUPPORT *//*
return 0;*/
int argi = 1;
char *input;
char filename[MAX_STRING_LENGTH];
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nRanks);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
char processor_name[MPI_MAX_PROCESSOR_NAME];
int name_len;
MPI_Get_processor_name(processor_name, &name_len);
if(mpi_rank == MASTER)
std::cout<<"[" << mpi_rank << ": " << processor_name << "] GPU 2d SpMV, MPI rank " << mpi_rank << " of "<< nRanks
<< " starting...." << endl;
sqrRank = sqrt(nRanks);
row_rank = mpi_rank / sqrRank; //which col of proc am I
col_rank = mpi_rank % sqrRank; //which row of proc am I
//initialize communicators
MPI_Comm_split(MPI_COMM_WORLD, row_rank, mpi_rank, &commrow);
MPI_Comm_split(MPI_COMM_WORLD, col_rank, mpi_rank, &commcol);
if(argc > argi)
{
input = argv[argi];
argi++;
}
if (argc > argi){
testType = atoi(argv[argi]);
argi++;
}
int err = 0;
char *file[MAX_STRING_LENGTH];
char *only_mat_name[MAX_STRING_LENGTH];
char n[MAX_STRING_LENGTH] = "";
char * ptr = strtok(input, "/");
int i=0,j;
while(ptr != NULL)
{
file[i++] = ptr;
ptr = strtok(NULL, "/");
}
for(j=0; j<i-1; ++j){
strcat(n, file[j]);
strcat(n, "/");
}
ptr = strtok(file[i-1], ".");
sprintf(filename, "%s%s_%d.mtx", n, ptr, mpi_rank);
char *good_format = strtok(ptr, "_");
i=0;
while(good_format != NULL)
{
only_mat_name[i++] = good_format;
good_format = strtok(NULL, "_");
}
for(j=0; j<i-2; ++j){
strcat(matName, only_mat_name[j]);
if(j < i-3)
strcat(matName, "_");
}
if (strcmp(filename, "0") == 0)
err = call_bhsparse_small();
else
{
if(mpi_rank == MASTER)
cout << "--------------" << filename << "--------------" << endl;
err = call_bhsparse(filename);
}
MPI_Barrier(MPI_COMM_WORLD);
if(mpi_rank == MASTER)
cout << "------------------------------------------------------" << endl;
MPI_Finalize();
return err;
}
|
dc03cbecd0bd59e0b2514edb505e187cad0c2088.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file density_overflow_cuda_kernel.cu
* @author Yibo Lin
* @date Jun 2018
* @brief Compute density overflow according to NTUPlace3 (https://doi.org/10.1109/TCAD.2008.923063).
* This is for fixed cells.
*/
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "hip/hip_runtime.h"
//#include "utility/src/gemm.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void computeDensityMap(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
const int num_impacted_bins_x, const int num_impacted_bins_y,
T* density_map_tensor)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// rank-one update density map
if (i < num_nodes*num_impacted_bins_x*num_impacted_bins_y)
{
// density overflow function
auto computeDensityOverflowFunc = [](T x, T node_size, T bin_center, T bin_size){
return max(T(0.0), min(x+node_size, bin_center+bin_size/2) - max(x, bin_center-bin_size/2));
};
int node_id = i/(num_impacted_bins_x*num_impacted_bins_y);
int residual_index = i-node_id*num_impacted_bins_x*num_impacted_bins_y;
// x direction
int bin_index_xl = int((x_tensor[node_id]-xl)/bin_size_x);
bin_index_xl = max(bin_index_xl, 0);
int k = bin_index_xl+int(residual_index / num_impacted_bins_y);
if (k+1 > num_bins_x)
{
return;
}
// y direction
int bin_index_yl = int((y_tensor[node_id]-yl)/bin_size_y);
bin_index_yl = max(bin_index_yl, 0);
int h = bin_index_yl+(residual_index % num_impacted_bins_y);
if (h+1 > num_bins_y)
{
return;
}
T px = computeDensityOverflowFunc(x_tensor[node_id], node_size_x_tensor[node_id], bin_center_x_tensor[k], bin_size_x);
T py = computeDensityOverflowFunc(y_tensor[node_id], node_size_y_tensor[node_id], bin_center_y_tensor[h], bin_size_y);
// still area
atomicAdd(&density_map_tensor[k*num_bins_y+h], px*py);
}
}
template <typename T>
int computeDensityOverflowMapCudaLauncher(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const int num_impacted_bins_x, const int num_impacted_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
T* density_map_tensor
)
{
int thread_count = 512;
int block_count = (num_nodes*num_impacted_bins_x*num_impacted_bins_y - 1 + thread_count) /thread_count;
hipLaunchKernelGGL(( computeDensityMap), dim3(block_count), dim3(thread_count), 0, 0,
x_tensor, y_tensor,
node_size_x_tensor, node_size_y_tensor,
bin_center_x_tensor, bin_center_y_tensor,
num_nodes,
num_bins_x, num_bins_y,
xl, yl, xh, yh,
bin_size_x, bin_size_y,
num_impacted_bins_x, num_impacted_bins_y,
density_map_tensor);
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
int instantiateComputeDensityOverflowMapLauncher(\
const T* x_tensor, const T* y_tensor, \
const T* node_size_x_tensor, const T* node_size_y_tensor, \
const T* bin_center_x_tensor, const T* bin_center_y_tensor, \
const int num_nodes, \
const int num_bins_x, const int num_bins_y, \
const int num_impacted_bins_x, const int num_impacted_bins_y, \
const T xl, const T yl, const T xh, const T yh, \
const T bin_size_x, const T bin_size_y, \
T* density_map_tensor\
)\
{ \
return computeDensityOverflowMapCudaLauncher(\
x_tensor, y_tensor, \
node_size_x_tensor, node_size_y_tensor, \
bin_center_x_tensor, bin_center_y_tensor, \
num_nodes, \
num_bins_x, num_bins_y, \
num_impacted_bins_x, num_impacted_bins_y, \
xl, yl, xh, yh, \
bin_size_x, bin_size_y, \
density_map_tensor\
);\
}
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
template <typename T>
__global__ void computeGaussianFilterWeights(
const int num_bins_x, const int num_bins_y,
const T sigma,
T* gaussian_filter_tensor
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_bins_x*num_bins_y)
{
int x = i/num_bins_y;
int y = i-x*num_bins_y;
T sigma_square = sigma*sigma;
T x2_y2 = (x-num_bins_x/2)*(x-num_bins_x) + (y-num_bins_y/2)*(y-num_bins_y);
//gaussian_filter_tensor[i] = 1.0/(2*M_PI*sigma_square) * exp(-x2_y2/(2*sigma_square));
gaussian_filter_tensor[i] = 2 * exp(-x2_y2/(2*sigma_square));
}
}
template <typename T>
int computeGaussianFilterLauncher(
const int num_bins_x, const int num_bins_y,
const T sigma,
T* gaussian_filter_tensor
)
{
int thread_count = 512;
int block_count = (num_bins_x*num_bins_y - 1 + thread_count) / thread_count;
hipLaunchKernelGGL(( computeGaussianFilterWeights), dim3(block_count), dim3(thread_count), 0, 0,
num_bins_x, num_bins_y,
sigma,
gaussian_filter_tensor
);
return 0;
}
#define REGISTER_GAUSSIAN_FILTER_LAUNCHER(T) \
int instantiateComputeGaussianFilterLauncher(\
const int num_bins_x, const int num_bins_y, \
const T sigma, \
T* gaussian_filter_tensor\
)\
{ \
return computeGaussianFilterLauncher(\
num_bins_x, num_bins_y, \
sigma, \
gaussian_filter_tensor\
);\
}
REGISTER_GAUSSIAN_FILTER_LAUNCHER(float);
REGISTER_GAUSSIAN_FILTER_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
| dc03cbecd0bd59e0b2514edb505e187cad0c2088.cu | /**
* @file density_overflow_cuda_kernel.cu
* @author Yibo Lin
* @date Jun 2018
* @brief Compute density overflow according to NTUPlace3 (https://doi.org/10.1109/TCAD.2008.923063).
* This is for fixed cells.
*/
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "cuda_runtime.h"
//#include "utility/src/gemm.h"
#include "utility/src/print.h"
#include "utility/src/Msg.h"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void computeDensityMap(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
const int num_impacted_bins_x, const int num_impacted_bins_y,
T* density_map_tensor)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// rank-one update density map
if (i < num_nodes*num_impacted_bins_x*num_impacted_bins_y)
{
// density overflow function
auto computeDensityOverflowFunc = [](T x, T node_size, T bin_center, T bin_size){
return max(T(0.0), min(x+node_size, bin_center+bin_size/2) - max(x, bin_center-bin_size/2));
};
int node_id = i/(num_impacted_bins_x*num_impacted_bins_y);
int residual_index = i-node_id*num_impacted_bins_x*num_impacted_bins_y;
// x direction
int bin_index_xl = int((x_tensor[node_id]-xl)/bin_size_x);
bin_index_xl = max(bin_index_xl, 0);
int k = bin_index_xl+int(residual_index / num_impacted_bins_y);
if (k+1 > num_bins_x)
{
return;
}
// y direction
int bin_index_yl = int((y_tensor[node_id]-yl)/bin_size_y);
bin_index_yl = max(bin_index_yl, 0);
int h = bin_index_yl+(residual_index % num_impacted_bins_y);
if (h+1 > num_bins_y)
{
return;
}
T px = computeDensityOverflowFunc(x_tensor[node_id], node_size_x_tensor[node_id], bin_center_x_tensor[k], bin_size_x);
T py = computeDensityOverflowFunc(y_tensor[node_id], node_size_y_tensor[node_id], bin_center_y_tensor[h], bin_size_y);
// still area
atomicAdd(&density_map_tensor[k*num_bins_y+h], px*py);
}
}
template <typename T>
int computeDensityOverflowMapCudaLauncher(
const T* x_tensor, const T* y_tensor,
const T* node_size_x_tensor, const T* node_size_y_tensor,
const T* bin_center_x_tensor, const T* bin_center_y_tensor,
const int num_nodes,
const int num_bins_x, const int num_bins_y,
const int num_impacted_bins_x, const int num_impacted_bins_y,
const T xl, const T yl, const T xh, const T yh,
const T bin_size_x, const T bin_size_y,
T* density_map_tensor
)
{
int thread_count = 512;
int block_count = (num_nodes*num_impacted_bins_x*num_impacted_bins_y - 1 + thread_count) /thread_count;
computeDensityMap<<<block_count, thread_count>>>(
x_tensor, y_tensor,
node_size_x_tensor, node_size_y_tensor,
bin_center_x_tensor, bin_center_y_tensor,
num_nodes,
num_bins_x, num_bins_y,
xl, yl, xh, yh,
bin_size_x, bin_size_y,
num_impacted_bins_x, num_impacted_bins_y,
density_map_tensor);
return 0;
}
#define REGISTER_KERNEL_LAUNCHER(T) \
int instantiateComputeDensityOverflowMapLauncher(\
const T* x_tensor, const T* y_tensor, \
const T* node_size_x_tensor, const T* node_size_y_tensor, \
const T* bin_center_x_tensor, const T* bin_center_y_tensor, \
const int num_nodes, \
const int num_bins_x, const int num_bins_y, \
const int num_impacted_bins_x, const int num_impacted_bins_y, \
const T xl, const T yl, const T xh, const T yh, \
const T bin_size_x, const T bin_size_y, \
T* density_map_tensor\
)\
{ \
return computeDensityOverflowMapCudaLauncher(\
x_tensor, y_tensor, \
node_size_x_tensor, node_size_y_tensor, \
bin_center_x_tensor, bin_center_y_tensor, \
num_nodes, \
num_bins_x, num_bins_y, \
num_impacted_bins_x, num_impacted_bins_y, \
xl, yl, xh, yh, \
bin_size_x, bin_size_y, \
density_map_tensor\
);\
}
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
template <typename T>
__global__ void computeGaussianFilterWeights(
const int num_bins_x, const int num_bins_y,
const T sigma,
T* gaussian_filter_tensor
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_bins_x*num_bins_y)
{
int x = i/num_bins_y;
int y = i-x*num_bins_y;
T sigma_square = sigma*sigma;
T x2_y2 = (x-num_bins_x/2)*(x-num_bins_x) + (y-num_bins_y/2)*(y-num_bins_y);
//gaussian_filter_tensor[i] = 1.0/(2*M_PI*sigma_square) * exp(-x2_y2/(2*sigma_square));
gaussian_filter_tensor[i] = 2 * exp(-x2_y2/(2*sigma_square));
}
}
template <typename T>
int computeGaussianFilterLauncher(
const int num_bins_x, const int num_bins_y,
const T sigma,
T* gaussian_filter_tensor
)
{
int thread_count = 512;
int block_count = (num_bins_x*num_bins_y - 1 + thread_count) / thread_count;
computeGaussianFilterWeights<<<block_count, thread_count>>>(
num_bins_x, num_bins_y,
sigma,
gaussian_filter_tensor
);
return 0;
}
#define REGISTER_GAUSSIAN_FILTER_LAUNCHER(T) \
int instantiateComputeGaussianFilterLauncher(\
const int num_bins_x, const int num_bins_y, \
const T sigma, \
T* gaussian_filter_tensor\
)\
{ \
return computeGaussianFilterLauncher(\
num_bins_x, num_bins_y, \
sigma, \
gaussian_filter_tensor\
);\
}
REGISTER_GAUSSIAN_FILTER_LAUNCHER(float);
REGISTER_GAUSSIAN_FILTER_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
2320e3039fefae06f9fc49bfd73f103246277cbe.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright (c) 2019 by Contributors
* \file kernel/cuda/binary_reduce_min.cu
* \brief CUDA kernels for binary reduce min
*/
#include "binary_reduce_impl_hip.cuh"
#include "./backward_binary_reduce_impl.cuh"
namespace dgl {
namespace kernel {
#define REDUCER ReduceMin
#define XPU kDLGPU
#define IDX int32_t
EVAL(GEN_DTYPE, GEN_OP_TARGET, GEN_DEFINE)
EVAL(GEN_BACKWARD_MODE, GEN_DTYPE, GEN_OP_TARGET, GEN_BACKWARD_DEFINE)
} // namespace kernel
} // namespace dgl
| 2320e3039fefae06f9fc49bfd73f103246277cbe.cu | /*!
* Copyright (c) 2019 by Contributors
* \file kernel/cuda/binary_reduce_min.cu
* \brief CUDA kernels for binary reduce min
*/
#include "./binary_reduce_impl.cuh"
#include "./backward_binary_reduce_impl.cuh"
namespace dgl {
namespace kernel {
#define REDUCER ReduceMin
#define XPU kDLGPU
#define IDX int32_t
EVAL(GEN_DTYPE, GEN_OP_TARGET, GEN_DEFINE)
EVAL(GEN_BACKWARD_MODE, GEN_DTYPE, GEN_OP_TARGET, GEN_BACKWARD_DEFINE)
} // namespace kernel
} // namespace dgl
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.